max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
src/language/semantic_analysis/type_builder.py | ArielTriana/battle-sim | 2 | 12769251 | <filename>src/language/semantic_analysis/type_builder.py
from .context import *
from typing import List
from ...utils.visitor import *
from ..parser.ast import *
class Type_Builder:
def __init__(self, context) -> None:
self.context = context
self.current_type : Type
@visitor(BsFile)
def visit(self, node: BsFile):
for classDef in node.classes:
self.visit(classDef)
@visitor(ClassDef)
def visit(self, node: ClassDef):
self.current_type=self.context.get_type_object(node.name)
for attrDef in node.attributes:
self.visit(attrDef)
for methodDef in node.methods:
self.visit(methodDef)
@visitor(AttrDef)
def visit(self, node: AttrDef):
node.context=self.current_type.context
self.current_type.define_attribute(node.name, node.type)
@visitor(FuncDef)
def visit(self, node: FuncDef):
node.context=self.current_type.context
node.my_context=self.current_type.define_method(node.name, node.return_type, node.arg_names, node.arg_types)
node.my_context.define_var("self",self.current_type.name)
node.my_context.define_func("super",self.current_type.parent.name,[],[])
| 2.578125 | 3 |
mridc/collections/common/callbacks/callbacks.py | jerke123/mridc | 0 | 12769252 | # encoding: utf-8
__author__ = "<NAME>"
# Taken and adapted from: https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/common/callbacks/callbacks.py
import time
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.utilities import rank_zero_only
class LogEpochTimeCallback(Callback):
"""Simple callback that logs how long each epoch takes, in seconds, to a pytorch lightning log"""
def __init__(self):
"""Initialize the callback."""
super().__init__()
self.epoch_start = time.time()
@rank_zero_only
def on_train_epoch_start(self, trainer, pl_module):
"""Called at the start of each epoch."""
self.epoch_start = time.time()
@rank_zero_only
def on_train_epoch_end(self, trainer, pl_module):
"""Called at the end of each epoch."""
curr_time = time.time()
duration = curr_time - self.epoch_start
trainer.logger.log_metrics({"epoch_time": duration}, step=trainer.global_step)
| 2.5 | 2 |
src/middleware/processContactForm.py | jskrable/personal-site | 0 | 12769253 | <filename>src/middleware/processContactForm.py
#!/usr/bin/env python3
# coding: utf-8
"""
title: processContactForm.py
date: 02-18-2020
author: jskrable
description: lambda function to process contact form submissions
"""
import json
import uuid
import boto3
import datetime
def lambda_handler(event, context):
"""
handles request to the lambda function from frontend. parses json request
and sends to update_table function.
"""
submission = event['submission']
# ACTUALLY USE RESPONSE HERE TO RETURN AN ERROR IF OCCURS
update_table(submission)
response = "Successful submission"
return {
'statusCode': 200,
'body': json.dumps(response)}
def update_table(submission):
"""
adds new entry to dynamoDB table. creates a unique ID and adds timestamp
of submission. SubmissionBody is a JSON object containing the form submission.
"""
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
history = dynamodb.Table('jackskrable.com-contact')
item = {
'SubmissionID': str(uuid.uuid4()),
'SubmissionTimestamp': str(datetime.datetime.now()),
'SubmissionBody': submission
}
history.put_item(
Item = item
) | 2.453125 | 2 |
FigureSimulation.py | grewelle/geneDriveSchistoReview | 0 | 12769254 | <gh_stars>0
import scipy
import numpy as np
from scipy.integrate import quad
from matplotlib import pyplot as plt
import seaborn as sns; sns.set(style="white", color_codes=True)
SMALL_SIZE = 36
MEDIUM_SIZE = 48
BIGGER_SIZE = 64
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
#integration function for neg binomial mating
def integrand(theta, alpha, k):
return (1-np.cos(theta))/(1+alpha*np.cos(theta))**(1+k)
def main():
#parameter block
years = 10
m = 40 # equilibrium mean worm burden
k = 0.24 # clumping parameter (also known as r in negative binomial)
b = 2.075622396 * 10 ** -7
a = 1.066666666666666667 * 10 ** -3 # 50 cercariae shed per day per snail * 7 days per week * prob of cerc contacting human and maturing
N = 10 ** 4 # number of total snails
mu_1 = .004 # + .02666667 #weekly death rate of worms per capita
mu_2 = 0.25 # weekly death rate of infected snails per capita
H = 1000 # number of humans
rho = 0 # fraction of resistant snails [0-1]
#begin simulation
X0 = scipy.array([m, 0.015]) # initials conditions: x0=10 and y0=5
t = scipy.linspace(0, 52 * years, 52 * years)
def dX_dt(X, t=0): # specify the initial time point t0=0
alpha = X[0] / (X[0] + k)
part1 = ((1 - alpha) ** (1 + k)) / (2 * np.pi)
part2 = quad(integrand, 0, 2 * np.pi, args=(alpha, k))
phi = 1 - part1 * part2[0]
w = 0.5*phi
beta= b*w
#print(beta)
y = scipy.array([a*N*X[1] - mu_1 * X[0], beta * H * X[0]*(1-X[1]-rho) - mu_2 * X[1]])
return y
wormBurdenRed = [] # per capita worm burden reduction %
matedPairsRed = [] # per capita number mated pair reduction %
prevalenceRed = [] # prevalence reduction %
reproNumberRed = [] # effective reproductive number reduction %
for u in range(101):
X, infodict = scipy.integrate.odeint(dX_dt, X0, t, full_output=True)
wormBurdenRed.append(100*(40-X[-1, 0])/40)
alpha_ = X[-1, 0] / (X[-1, 0] + k)
part1_ = ((1 - alpha_) ** (1 + k)) / (2 * np.pi)
part2_ = quad(integrand, 0, 2 * np.pi, args=(alpha_, k))
phi_ = 1 - part1_ * part2_[0]
w = 0.5 * phi_
matedPairsRed.append((18.3421134484-w*X[-1,0])/18.3421134484*100)
sigma = 1 - 2 * (1 + X[-1, 0] / (2 * k)) ** -k + (1 + X[-1, 0] / k) ** -k
prevalenceRed.append(100*(0.602598639541-sigma)/0.602598639541)
rt = H * N * a * b * w * (1 - rho) / mu_1 / mu_2
reproNumberRed.append(100*(1.0152306002 - rt)/1.0152306002)
rho += .01
X0 = scipy.array([m, 0.015, rho])
rhoRange = scipy.linspace(0, 1, num=101, endpoint=True)
print(len(rhoRange))
fig = plt.figure(figsize=(18, 18))
plt.axis((0, 1, 100, 0))
plt.xlabel('Rho')
plt.ylabel('Percent Reduction from Baseline')
#wormBurden = plt.plot(rhoRange, wormBurdenRed, 'r:', label= 'Worm Burden', linewidth= 4)
matedPairs = plt.plot(rhoRange, matedPairsRed, 'm--', label= 'Infection Intensity', linewidth= 4)
prevalence = plt.plot(rhoRange, prevalenceRed, 'b-.', label= 'Prevalence', linewidth= 4)
reproNumber = plt.plot(rhoRange, reproNumberRed, 'k', label= 'Reproductive No.', linewidth= 4)
currentAxis = plt.gca()
#plt.legend()
fig.savefig("GeneDriveReviewFigure1b.png")
fig.savefig("GeneDriveReviewFigure1b.tiff")
main()
| 2.765625 | 3 |
Board/BitBoard.py | y701311/Othello | 2 | 12769255 | from Board.Board import Board
from Location.Location import Location
from Board.Disc import Disc
# BoardクラスのbitBoardとしての実装
class BitBoard(Board):
def __init__(self) -> None:
# 打っているプレイヤーの色
self.player = Disc.black
self.turn = 1
self.playerBoard = 0x0000000810000000
self.opponentBoard = 0x0000001008000000
# Locationで指定された場所のみビットが立っているボードに変換
def locationToBits(self, location:Location) -> int:
bits = 1
shift = 63 - (8*(location.row - 1) + (location.column - 1))
return bits << shift
# 指定された場所に置く
def put(self, location:Location) -> None:
if self.canPut(location):
put = self.locationToBits(location)
self.reverse(put)
# パスをする
def passPut(self) -> None:
pass
# 指定された場所に置けるかどうか
def canPut(self, location:Location) -> bool:
if location.checkRange():
putBoard = self.locationToBits(location)
legalBoard = self.makeLegalBoard()
# 指定された場所が合法手に含まれているか
return (putBoard & legalBoard) == putBoard
else:
return False
# 合法手のビットのみが立っているボードを生成
def makeLegalBoard(self) -> int:
legalBoard = 0
# 空きマスのみにビットが立っているボード
blankBoard = ~(self.playerBoard | self.opponentBoard)
# 左右の端を除く相手ボード
horizontalMaskedOpponentBoard = self.opponentBoard & 0x7e7e7e7e7e7e7e7e
# 上下の端を除く相手ボード
verticalMaskedOpponentBoard = self.opponentBoard & 0x00ffffffffffff00
# 上下左右の端を除く相手ボード
allSideMaskedOpponentBoard = self.opponentBoard & 0x007e7e7e7e7e7e00
# 相手の石がある場所を保存する
opponentDiscs = 0
# 8方向をチェック
# 1度に返せる石は6つまで
# 左
opponentDiscs = horizontalMaskedOpponentBoard & (self.playerBoard << 1)
opponentDiscs |= horizontalMaskedOpponentBoard & (opponentDiscs << 1)
opponentDiscs |= horizontalMaskedOpponentBoard & (opponentDiscs << 1)
opponentDiscs |= horizontalMaskedOpponentBoard & (opponentDiscs << 1)
opponentDiscs |= horizontalMaskedOpponentBoard & (opponentDiscs << 1)
opponentDiscs |= horizontalMaskedOpponentBoard & (opponentDiscs << 1)
legalBoard |= blankBoard & (opponentDiscs << 1)
# 右
opponentDiscs = horizontalMaskedOpponentBoard & (self.playerBoard >> 1)
opponentDiscs |= horizontalMaskedOpponentBoard & (opponentDiscs >> 1)
opponentDiscs |= horizontalMaskedOpponentBoard & (opponentDiscs >> 1)
opponentDiscs |= horizontalMaskedOpponentBoard & (opponentDiscs >> 1)
opponentDiscs |= horizontalMaskedOpponentBoard & (opponentDiscs >> 1)
opponentDiscs |= horizontalMaskedOpponentBoard & (opponentDiscs >> 1)
legalBoard |= blankBoard & (opponentDiscs >> 1)
# 上
opponentDiscs = verticalMaskedOpponentBoard & (self.playerBoard << 8)
opponentDiscs |= verticalMaskedOpponentBoard & (opponentDiscs << 8)
opponentDiscs |= verticalMaskedOpponentBoard & (opponentDiscs << 8)
opponentDiscs |= verticalMaskedOpponentBoard & (opponentDiscs << 8)
opponentDiscs |= verticalMaskedOpponentBoard & (opponentDiscs << 8)
opponentDiscs |= verticalMaskedOpponentBoard & (opponentDiscs << 8)
legalBoard |= blankBoard & (opponentDiscs << 8)
# 下
opponentDiscs = verticalMaskedOpponentBoard & (self.playerBoard >> 8)
opponentDiscs |= verticalMaskedOpponentBoard & (opponentDiscs >> 8)
opponentDiscs |= verticalMaskedOpponentBoard & (opponentDiscs >> 8)
opponentDiscs |= verticalMaskedOpponentBoard & (opponentDiscs >> 8)
opponentDiscs |= verticalMaskedOpponentBoard & (opponentDiscs >> 8)
opponentDiscs |= verticalMaskedOpponentBoard & (opponentDiscs >> 8)
legalBoard |= blankBoard & (opponentDiscs >> 8)
# 左上
opponentDiscs = allSideMaskedOpponentBoard & (self.playerBoard << 9)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs << 9)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs << 9)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs << 9)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs << 9)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs << 9)
legalBoard |= blankBoard & (opponentDiscs << 9)
# 右上
opponentDiscs = allSideMaskedOpponentBoard & (self.playerBoard << 7)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs << 7)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs << 7)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs << 7)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs << 7)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs << 7)
legalBoard |= blankBoard & (opponentDiscs << 7)
# 右下
opponentDiscs = allSideMaskedOpponentBoard & (self.playerBoard >> 9)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs >> 9)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs >> 9)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs >> 9)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs >> 9)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs >> 9)
legalBoard |= blankBoard & (opponentDiscs >> 9)
# 左下
opponentDiscs = allSideMaskedOpponentBoard & (self.playerBoard >> 7)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs >> 7)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs >> 7)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs >> 7)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs >> 7)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs >> 7)
legalBoard |= blankBoard & (opponentDiscs >> 7)
return legalBoard
# 置ける場所をLocationのlistとして返す
def getPlaceableLocation(self) -> list:
placeableLocation = []
legalBoard = self.makeLegalBoard()
mask = 1
for i in range(1, 9):
for j in range(1, 9):
if (legalBoard & (mask << (63 - (8*(i - 1) + (j - 1))))) != 0:
placeableLocation.append(Location(i, j))
return placeableLocation
# 反転処理
def reverse(self, put:int) -> None:
rev = self.getReverseBoard(put)
self.playerBoard ^= (put | rev)
self.opponentBoard ^= rev
# 反転箇所のビットが立っているボードを返却
def getReverseBoard(self, put:int) -> int:
# 反転箇所のビットが立っているボード
rev = 0
# 左右の端を除く相手ボード
horizontalMaskedOpponentBoard = self.opponentBoard & 0x7e7e7e7e7e7e7e7e
# 上下の端を除く相手ボード
verticalMaskedOpponentBoard = self.opponentBoard & 0x00ffffffffffff00
# 上下左右の端を除く相手ボード
allSideMaskedOpponentBoard = self.opponentBoard & 0x007e7e7e7e7e7e00
# 8方向をチェック
# 1度に返せる石は6つまで
# 左
opponentDiscs = horizontalMaskedOpponentBoard & (put << 1)
opponentDiscs |= horizontalMaskedOpponentBoard & (opponentDiscs << 1)
opponentDiscs |= horizontalMaskedOpponentBoard & (opponentDiscs << 1)
opponentDiscs |= horizontalMaskedOpponentBoard & (opponentDiscs << 1)
opponentDiscs |= horizontalMaskedOpponentBoard & (opponentDiscs << 1)
opponentDiscs |= horizontalMaskedOpponentBoard & (opponentDiscs << 1)
if (self.playerBoard & (opponentDiscs << 1)) != 0:
rev |= opponentDiscs
# 右
opponentDiscs = horizontalMaskedOpponentBoard & (put >> 1)
opponentDiscs |= horizontalMaskedOpponentBoard & (opponentDiscs >> 1)
opponentDiscs |= horizontalMaskedOpponentBoard & (opponentDiscs >> 1)
opponentDiscs |= horizontalMaskedOpponentBoard & (opponentDiscs >> 1)
opponentDiscs |= horizontalMaskedOpponentBoard & (opponentDiscs >> 1)
opponentDiscs |= horizontalMaskedOpponentBoard & (opponentDiscs >> 1)
if (self.playerBoard & (opponentDiscs >> 1)) != 0:
rev |= opponentDiscs
# 上
opponentDiscs = verticalMaskedOpponentBoard & (put << 8)
opponentDiscs |= verticalMaskedOpponentBoard & (opponentDiscs << 8)
opponentDiscs |= verticalMaskedOpponentBoard & (opponentDiscs << 8)
opponentDiscs |= verticalMaskedOpponentBoard & (opponentDiscs << 8)
opponentDiscs |= verticalMaskedOpponentBoard & (opponentDiscs << 8)
opponentDiscs |= verticalMaskedOpponentBoard & (opponentDiscs << 8)
if (self.playerBoard & (opponentDiscs << 8)) != 0:
rev |= opponentDiscs
# 下
opponentDiscs = verticalMaskedOpponentBoard & (put >> 8)
opponentDiscs |= verticalMaskedOpponentBoard & (opponentDiscs >> 8)
opponentDiscs |= verticalMaskedOpponentBoard & (opponentDiscs >> 8)
opponentDiscs |= verticalMaskedOpponentBoard & (opponentDiscs >> 8)
opponentDiscs |= verticalMaskedOpponentBoard & (opponentDiscs >> 8)
opponentDiscs |= verticalMaskedOpponentBoard & (opponentDiscs >> 8)
if (self.playerBoard & (opponentDiscs >> 8)) != 0:
rev |= opponentDiscs
# 左上
opponentDiscs = allSideMaskedOpponentBoard & (put << 9)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs << 9)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs << 9)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs << 9)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs << 9)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs << 9)
if (self.playerBoard & (opponentDiscs << 9)) != 0:
rev |= opponentDiscs
# 右上
opponentDiscs = allSideMaskedOpponentBoard & (put << 7)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs << 7)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs << 7)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs << 7)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs << 7)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs << 7)
if (self.playerBoard & (opponentDiscs << 7)) != 0:
rev |= opponentDiscs
# 右下
opponentDiscs = allSideMaskedOpponentBoard & (put >> 9)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs >> 9)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs >> 9)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs >> 9)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs >> 9)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs >> 9)
if (self.playerBoard & (opponentDiscs >> 9)) != 0:
rev |= opponentDiscs
# 左下
opponentDiscs = allSideMaskedOpponentBoard & (put >> 7)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs >> 7)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs >> 7)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs >> 7)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs >> 7)
opponentDiscs |= allSideMaskedOpponentBoard & (opponentDiscs >> 7)
if (self.playerBoard & (opponentDiscs >> 7)) != 0:
rev |= opponentDiscs
return rev
# ゲームの終了判定
def gameIsFinished(self) -> bool:
# 自分、相手が共に合法手が無いなら終了
playerLegalBoard = self.makeLegalBoard()
self.swapBoard()
opponentLegalBoard = self.makeLegalBoard()
self.swapBoard()
return (playerLegalBoard == 0) and (opponentLegalBoard == 0)
# ボードのパラメータを更新
def updateBoardStatus(self):
self.swapBoard()
self.changePlayerColor()
self.turn += 1
# 自分と相手のボードを入れ替える
def swapBoard(self) -> None:
temp = self.playerBoard
self.playerBoard = self.opponentBoard
self.opponentBoard = temp
# 打ち手の色を入れ替える
def changePlayerColor(self) -> None:
if self.player == Disc.black:
self.player = Disc.white
else:
self.player = Disc.black
# 石の数がより多い色を返す
def getWinner(self) -> Disc:
blackDiscNum, whiteDiscNum = self.getDiscNum()
if blackDiscNum > whiteDiscNum:
return Disc.black
elif blackDiscNum < whiteDiscNum:
return Disc.white
else:
return Disc.empty
# 黒石と白石の数を返す
def getDiscNum(self) -> tuple:
if self.player == Disc.black:
blackDiscNum = self.numOfDisc(self.playerBoard)
whiteDiscNum = self.numOfDisc(self.opponentBoard)
else:
whiteDiscNum = self.numOfDisc(self.playerBoard)
blackDiscNum = self.numOfDisc(self.opponentBoard)
return blackDiscNum, whiteDiscNum
# ボードの立っているビット数を数える
def numOfDisc(self, board:int) -> int:
# forで回してもいいが、ビット演算で計算すると
# O(N)からO(logN)になる
mask1bit = 0x5555555555555555
mask2bit = 0x3333333333333333
mask4bit = 0x0f0f0f0f0f0f0f0f
mask8bit = 0x00ff00ff00ff00ff
mask16bit = 0x0000ffff0000ffff
mask32bit = 0x00000000ffffffff
board = (board & mask1bit) + ((board >> 1) & mask1bit)
board = (board & mask2bit) + ((board >> 2) & mask2bit)
board = (board & mask4bit) + ((board >> 4) & mask4bit)
board = (board & mask8bit) + ((board >> 8) & mask8bit)
board = (board & mask16bit) + ((board >> 16) & mask16bit)
return (board & mask32bit) + ((board >> 32) & mask32bit)
# 指定された場所の石の色を返す
def getLocationDisc(self, location:Location) -> Disc:
mask = self.locationToBits(location)
if self.player == Disc.black:
if (self.playerBoard & mask) != 0:
return Disc.black
elif (self.opponentBoard & mask) != 0:
return Disc.white
else:
return Disc.empty
elif self.player == Disc.white:
if (self.playerBoard & mask) != 0:
return Disc.white
elif (self.opponentBoard & mask) != 0:
return Disc.black
else:
return Disc.empty
| 3.1875 | 3 |
zad2/zad2.py | Rogue05/SPD | 0 | 12769256 | <filename>zad2/zad2.py
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 16 12:33:07 2019
@author: Wojtek
"""
from Process import *
from copy import deepcopy
def insertions(elem,base_list):
for i in range(len(base_list)+1):
yield [*base_list[0:i],elem,*base_list[i:]]
def removals(elem,base_list):
for i in range(len(base_list)):
yield [*base_list[0:i],*base_list[i+1:]]
def add_proc_to_time(t,nthproc):
if t is None:
t = [0]*len(nthproc.p)
# else:
# t = deepcopy(t)
nthproc.start = []
for machine_nr in range(len(nthproc.p)):
if machine_nr == 0:
nthproc.start = [t[machine_nr],]
t[machine_nr] = t[machine_nr]+nthproc.p[machine_nr]
else:
nthproc.start.append(max(t[machine_nr],t[machine_nr-1]))
t[machine_nr] = max(t[machine_nr],t[machine_nr-1])+nthproc.p[machine_nr]
return t
#def get_pipe_cost(N,return_pipe=False):
# t = [0]*len(N[0].p)
#
# for nthproc in N:
# t = add_proc_to_time(t,nthproc)
## nthproc.start = []
## for machine_nr in range(len(nthproc.p)):
## if machine_nr == 0:
## nthproc.start = [t[machine_nr],]
## t[machine_nr] = t[machine_nr]+nthproc.p[machine_nr]
## else:
## nthproc.start.append(max(t[machine_nr],t[machine_nr-1]))
## t[machine_nr] = max(t[machine_nr],t[machine_nr-1])+nthproc.p[machine_nr]
## if return_pipe:
## return N
# return max(t)
def pipe_to_str(pipe):
return ' '.join([str(proc.uid) for proc in pipe])
def my_min(elem,new_order):
t = add_proc_to_time(None,elem)
cached_t = [0]*len(t)
best_i = 0
for i in range(len(new_order)):
add_proc_to_time(t,new_order[i])
best_t=max(t)
for i in range(len(new_order)):
add_proc_to_time(cached_t,new_order[i])
tmp_t = deepcopy(cached_t)
tmp_t = add_proc_to_time(tmp_t,elem)
for j in range(i+1,len(new_order)):
add_proc_to_time(tmp_t,new_order[j])
max_tmp_t = max(tmp_t)
if max_tmp_t < best_t:
best_t = max_tmp_t
best_i = i+1
return [*new_order[0:best_i],elem,*new_order[best_i:]]
def my_remove_min(new_order):
min_cost = 100000000
t = [0]*len(new_order[0].p)
cached_t = deepcopy(t)
best_order = []
elem = None
for i in range(len(new_order)):
tmp_order = [*new_order[:i],*new_order[i+1:]]
# tmp_t = deepcopy(cached_t)
# for j in range(i+1,len(new_order)):
# add_proc_to_time(tmp_t,new_order[j])
# cost = max(tmp_t)
cost = get_pipe_cost(tmp_order)
if cost<min_cost:
min_cost = cost
best_order = tmp_order
elem = new_order[i]
add_proc_to_time(cached_t,new_order[i])
return best_order,elem
def NEH(N,boost=True):
new_order = []
# get_pipe_cost.clear_cache()
for elem in sorted(N,key=lambda x:1/sum(x.p)):
if not boost:
# print('not boosted')
new_order = min(insertions(elem,new_order),key=lambda x:get_pipe_cost(x))
else:
# print('boosted')
new_order = my_min(elem,new_order)
new_order,max_elem = my_remove_min(new_order)
new_order = my_min(max_elem,new_order)
return new_order
with open('neh.data.txt','r') as file:
lines = file.readlines()
class Dataset:
pass
datasets = [];i=0;pid=0
readdata = False
ignorenext = False
for line in lines:
# if i < 10:
# print(len(line.split('.'))==2)
if len(line.split('.')) == 2:
datasets.append(Dataset())
datasets[-1].uid = i
datasets[-1].data = []
i=i+1
pid = 1
readdata = True
ignorenext = True
if len(line.split(':')) == 2 and line.split(':')[0]=='neh':
readdata = False
if len(line.split(' ')) == 1 and not readdata and line != 'neh:\n' and line != '\n':
datasets[-1].cost = int(line.split(' ')[0])
if len(line.split(' ')) > 1 and readdata:
if ignorenext:
ignorenext = False
continue
margv = []
for p in line.split(' '):
margv.append(int(p))
datasets[-1].data.append(processPipe(pid,*margv))
pid = pid + 1
i = 0;ok=0
out = []
import time
for dataset in datasets:
i = i + 1
if i > 100:
continue
# print('call')
out.append(Dataset())
start = time.time()
order = NEH(dataset.data,boost=True)
out[-1].time= time.time() - start
# c = get_pipe_cost.cache
# order = []
if get_pipe_cost(order) == dataset.cost:
ok = ok+1
out[-1].uid = i
out[-1].mycost = get_pipe_cost(order)
out[-1].datacost = dataset.cost
out[-1].order = ' '.join([str(proc.uid) for proc in order])
lista = [proc.uid for proc in order]
print(i,
out[-1].time,
get_pipe_cost(order),
dataset.cost,
'{:.2f}%'.format((out[-1].mycost-out[-1].datacost)*100/out[-1].datacost),
'len test:',len(lista)==len(set(lista)),
)
# if i==10:
# break
print('ok',ok)
with open('improved_slow.txt','w') as file:
for o in out:
file.write(
str(o.uid)+';'+
str(o.time)+';'+
str(o.mycost)+';'+
str(o.datacost)+';'+
o.order+'\n')
print('saved')
#end = time.time()
#print('time: ',end-start)
# noboost nocache 893.081104516983
# noboost cache 332.2397389411926
# boost 336.8780333995819
#79 [11, 85, 91, 102, 97, 98, 96, 18, 84, 34, 47, 32, 12, 82, 74, 99, 81, 100, 52, 101, 73, 53, 42, 55, 56, 103, 30, 36, 87, 54, 51, 49, 50, 88, 79, 58, 62, 33, 38, 27, 90, 77, 75, 72, 41, 44, 40, 13, 5, 66, 43, 37, 69, 22, 61, 59, 23, 20, 83, 19, 21, 8, 25, 60, 94, 0, 14, 9, 2, 15, 3, 28, 17, 4, 6, 93, 29, 35, 31, 26, 7, 10, 16, 45, 67, 39, 57, 89, 63, 70, 68, 86, 95, 71, 48, 64, 92, 46, 24, 1, 76, 80, 65, 78] | 2.609375 | 3 |
lib/airflow/airflow/contrib/jobs/scheduler_client.py | aqua7regia/ai-flow | 0 | 12769257 | <gh_stars>0
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import List, Text, Optional
import queue
import time
from airflow.contrib.jobs.event_based_scheduler_job import SCHEDULER_NAMESPACE
from airflow.events.scheduler_events import RequestEvent, SchedulerInnerEventType, \
ResponseEvent, RunDagMessage, ExecuteTaskMessage, StopDagRunMessage
from airflow.executors.scheduling_action import SchedulingAction
from notification_service.base_notification import BaseEvent, EventWatcher
from notification_service.client import NotificationClient, ThreadEventWatcherHandle
class ExecutionContext(object):
def __init__(self, dagrun_id):
self.dagrun_id = dagrun_id
class ResponseWatcher(EventWatcher):
def __init__(self):
self.queue: queue.Queue = queue.Queue(1)
def process(self, events: List[BaseEvent]):
self.queue.put(events[0])
def get_result(self, timeout: Optional[int] = None) -> object:
if timeout is None:
return self.queue.get()
else:
try:
return self.queue.get(timeout=timeout)
except queue.Empty:
raise TimeoutError('Get response timeout({})'.format(timeout))
class EventSchedulerClient(object):
def __init__(self, notification_server_uri=None, namespace=None, ns_client=None):
if ns_client is None:
self.ns_client = NotificationClient(notification_server_uri, namespace)
else:
self.ns_client = ns_client
@staticmethod
def generate_id(id):
return '{}_{}'.format(id, time.time_ns())
def trigger_parse_dag(self, file_path, timeout: Optional[int] = 60) -> bool:
id = self.generate_id('')
watcher: ResponseWatcher = ResponseWatcher()
handler: ThreadEventWatcherHandle \
= self.ns_client.start_listen_event(key=id,
event_type=SchedulerInnerEventType.PARSE_DAG_RESPONSE.value,
namespace=SCHEDULER_NAMESPACE, watcher=watcher)
self.ns_client.send_event(BaseEvent(key=id,
event_type=SchedulerInnerEventType.PARSE_DAG_REQUEST.value,
value=file_path))
try:
result = watcher.get_result(timeout=timeout)
except TimeoutError as e:
raise TimeoutError("Trigger the scheduler to parse the dag timeout({}).".format(timeout))
finally:
handler.stop()
return True
def schedule_dag(self, dag_id, context: Text = None, timeout: Optional[int] = 30) -> ExecutionContext:
id = self.generate_id(dag_id)
watcher: ResponseWatcher = ResponseWatcher()
handler: ThreadEventWatcherHandle \
= self.ns_client.start_listen_event(key=id,
event_type=SchedulerInnerEventType.RESPONSE.value,
namespace=SCHEDULER_NAMESPACE, watcher=watcher)
self.ns_client.send_event(RequestEvent(request_id=id, body=RunDagMessage(dag_id, context).to_json()).to_event())
try:
result: ResponseEvent = ResponseEvent.from_base_event(watcher.get_result(timeout))
except TimeoutError as e:
raise TimeoutError("Trigger the scheduler to schedule the dag timeout({}).".format(timeout))
finally:
handler.stop()
return ExecutionContext(dagrun_id=result.body)
def stop_dag_run(self, dag_id, context: ExecutionContext, timeout: Optional[int] = 30) -> ExecutionContext:
id = self.generate_id(str(dag_id) + str(context.dagrun_id))
watcher: ResponseWatcher = ResponseWatcher()
handler: ThreadEventWatcherHandle \
= self.ns_client.start_listen_event(key=id,
event_type=SchedulerInnerEventType.RESPONSE.value,
namespace=SCHEDULER_NAMESPACE, watcher=watcher)
self.ns_client.send_event(RequestEvent(request_id=id,
body=StopDagRunMessage(dag_id=dag_id,
dagrun_id=context.dagrun_id)
.to_json()).to_event())
try:
result: ResponseEvent = ResponseEvent.from_base_event(watcher.get_result(timeout))
except TimeoutError as e:
raise TimeoutError("Trigger the scheduler to stop the dag run timeout({}).".format(timeout))
finally:
handler.stop()
return ExecutionContext(dagrun_id=result.body)
def schedule_task(self, dag_id: str, task_id: str,
action: SchedulingAction, context: ExecutionContext,
timeout: Optional[int] = 30) -> ExecutionContext:
id = self.generate_id(context.dagrun_id)
watcher: ResponseWatcher = ResponseWatcher()
handler: ThreadEventWatcherHandle \
= self.ns_client.start_listen_event(key=id,
event_type=SchedulerInnerEventType.RESPONSE.value,
namespace=SCHEDULER_NAMESPACE, watcher=watcher)
self.ns_client.send_event(RequestEvent(request_id=id,
body=ExecuteTaskMessage(dag_id=dag_id,
task_id=task_id,
dagrun_id=context.dagrun_id,
action=action.value)
.to_json()).to_event())
try:
result: ResponseEvent = ResponseEvent.from_base_event(watcher.get_result(timeout))
except TimeoutError as e:
raise TimeoutError("Trigger the scheduler to schedule the task timeout({}).".format(timeout))
finally:
handler.stop()
return ExecutionContext(dagrun_id=result.body)
| 2.09375 | 2 |
investor_management/urls.py | BuildForSDG/Team-004-Backend | 2 | 12769258 | from django.urls import path
from rest_framework.routers import DefaultRouter
from investor_management import views
router = DefaultRouter()
app_name = 'investor_management'
urlpatterns = [
path('user/create/', views.CreateInvestorUserView.as_view(), name='investor_user_create'),
path('user/manage/<int:id>', views.ManageInvestorUserView.as_view(), name='investor_user_manage')
]
| 1.8125 | 2 |
flaskApp.py | johnsliao/flask-bp | 0 | 12769259 | import os
import sqlite3
from flask import Flask, render_template
from contextlib import closing
app = Flask(__name__)
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'flaskApp.db'),
))
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def init_db():
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.route('/')
def index():
return render_template('index.html')
if __name__ == '__main__':
init_db()
app.run() | 2.5 | 2 |
blogapp/admin.py | melodyPereira05/ShopStop | 0 | 12769260 | <filename>blogapp/admin.py
from django.contrib import admin
# Register your models here.
from .models import BlogModel, CommentModel
admin.site.register(BlogModel)
admin.site.register(CommentModel)
| 1.515625 | 2 |
nn_xor_relu_act.py | Hanumanth004/NN_XOR_implementation | 0 | 12769261 | <filename>nn_xor_relu_act.py
import numpy as np
from scipy.special import expit
hidden_size=4
input_size=2
classes=1
X=np.array(([[0, 0],[0, 1],[1, 0],[1, 1]]))
Wh=np.random.randn(hidden_size,input_size)
bh=np.random.randn(hidden_size,1)
Wo=np.random.randn(1,hidden_size)
bo=np.random.randn(classes,1)
T=np.array(([[0],[1],[1],[0]]))
reg=1e-3
def lossFunc():
loss=0;
dbo=np.zeros_like(bo)
dWo=np.zeros_like(Wo)
hg12=np.zeros_like(bh)
dbh=np.zeros_like(bh)
dh12tmp=np.zeros_like(bh)
dh12=np.zeros_like(bh)
dWh=np.zeros_like(Wh)
dhg3=np.zeros_like(bo)
for i in xrange(4):
#forward propogation
h12 = np.dot(Wh, X[i,:][np.newaxis].T) + bh
#hg=expit(h12)
#hg= np.tanh(h12)
#hg = np.maximum(0,h12)
hg = np.maximum(0.1*h12,h12)
h3=np.dot(Wo,hg) + bo
#hg3=expit(h3)
#hg3 = np.maximum(0,h3)
hg3 = np.maximum(0.1*h3,h3)
#hg3=np.tanh(h3)
y=hg3
loss+=0.5*(T[i]-y)*(T[i]-y)
#backward propogation
de=-(T[i]-y)
#dhg3=hg3*(1-hg3)
#dhg3=(1-hg3*hg3)
#dhg3=hg3
dhg3[dhg3 <= 0] = 0.1
dy=dhg3*de
dbo+=dy
dWo+=np.dot(dy,hg.T)
dh12=np.dot(Wo.T,dy)
#dh12tmp=hg*(1-hg)*dh12
dh12tmp=hg
dh12tmp[dh12tmp <= 0]= 0.1
dh12tmp*=dh12
#dh12tmp=(1-hg*hg)*dh12
dbh+=dh12tmp
xsub=X[i,:]
xsub=np.matrix(xsub)
dWh+=np.dot(dh12tmp, xsub)
np.clip(dWh,-2,2,dWh)
np.clip(dWo,-2,2,dWo)
np.clip(dbh,-2,2,dbh)
np.clip(dbo,-2,2,dbo)
return loss,dWh,dWo,dbh,dbo
learning_rate=0.4
for ep in xrange(1000):
loss,dWh,dWo,dbh,dbo=lossFunc()
"""
for param,dparam in zip([Wh,Wo,bh,bo],[dWh,dWo,dbh,dbo]):
param=param-learning_rate*dparam
"""
"""
print dWh
print dWo
print dbh
print dbo
print 'before the update'
print Wh
print Wo
print bh
print bo
"""
Wh+=-learning_rate*dWh
Wo+=-learning_rate*dWo
bh+=-learning_rate*dbh
bo+=-learning_rate*dbo
"""
print 'after the update'
print Wh
print Wo
print bh
print bo
"""
if(ep%100==0):
print 'Loss is %f' %(loss)
for i in xrange(4):
#forward propogation
h12 = np.dot(Wh, X[i,:][np.newaxis].T) + bh
#hg=expit(h12)
#hg=np.tanh(h12)
#hg=np.maximum(0,h12)
hg=np.maximum(0.1*h12,h12)
#print hg
h3=np.dot(Wo,hg) + bo
#hg3=expit(h3)
#hg3=np.tanh(h3)
#hg3=np.maximum(0,h3)
hg3=np.maximum(0.1*h3,h3)
print hg3
| 2.546875 | 3 |
VisualizeApp/migrations/0001_initial.py | KnownAsNob/SOStimate | 0 | 12769262 | <gh_stars>0
# Generated by Django 3.0 on 2020-01-26 01:22
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Calls',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('details', django.contrib.postgres.fields.jsonb.JSONField()),
],
),
migrations.CreateModel(
name='Station',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('info', django.contrib.postgres.fields.jsonb.JSONField()),
],
),
]
| 2 | 2 |
recognizer/pddl/propositional_planner.py | RukNdf/MA-Landmark | 0 | 12769263 | #!/usr/bin/env python
# Four spaces as indentation [no tabs]
#
# propositional_planner.py
# ma-goal-recognition
#
# Created by <NAME> on 2020-03-12.
# Copyright 2020 <NAME>. All rights reserved.
#
from recognizer.pddl.pddl_parser import PDDL_Parser
from recognizer.pddl.pddl_planner import PDDL_Planner
# from recognizer.pddl.domain import State
from recognizer.pddl.state import applicable, apply
import time
class Propositional_Planner(PDDL_Planner):
def __init__(self, max_length=0, time_limit = 0, verbose=False):
super().__init__(verbose)
self.max_length = max_length
self.time_limit = time_limit
def tree_length(self,plan):
length = 0
while plan:
length += 1
act, plan = plan
return length
#-----------------------------------------------
# Solve
#-----------------------------------------------
def solve(self, domain,initial_state,goal_state):
if self.time_limit: start = time.time()
# Parsed data
actions = domain
state = frozenset(initial_state)
goal_pos = frozenset(goal_state[0])
goal_not = frozenset(goal_state[1])
# Do nothing
if applicable(state, goal_pos, goal_not):
return []
# Search
visited = set([state])
fringe = [(state, None)]
while fringe:
# state = fringe.pop(0)
# plan = fringe.pop(0)
state, plan = fringe.pop(0)
if self.max_length and plan is not None and self.tree_length(plan) > self.max_length: return None
if self.time_limit and time.time() - start > self.time_limit: return None
for act in actions:
if applicable(state, act.positive_preconditions, act.negative_preconditions):
new_state = apply(state, act.add_effects, act.del_effects)
if new_state not in visited:
if applicable(new_state, goal_pos, goal_not):
full_plan = [act]
while plan:
act, plan = plan
full_plan.insert(0, act)
return full_plan
# visited.append(new_state)
visited.add(new_state)
fringe.append((new_state, (act, plan)))
return None
def main(domain, problem):
planner = Propositional_Planner()
plan = planner.solve_file(domain, problem)
if plan:
print('plan:')
for act in plan:
print(act)
else:
print('No plan was found')
# ==========================================
# Main
# ==========================================
if __name__ == '__main__':
import sys
domain = sys.argv[1]
problem = sys.argv[2]
main(domain,problem) | 2.453125 | 2 |
pyxsim/utils.py | Joeybraspenning/pyxsim | 17 | 12769264 | <reponame>Joeybraspenning/pyxsim
from unyt import unyt_array, unyt_quantity
from astropy.units import Quantity
import logging
from more_itertools import always_iterable
import numpy as np
pyxsimLogger = logging.getLogger("pyxsim")
ufstring = "%(name)-3s : [%(levelname)-9s] %(asctime)s %(message)s"
cfstring = "%(name)-3s : [%(levelname)-18s] %(asctime)s %(message)s"
pyxsim_sh = logging.StreamHandler()
# create formatter and add it to the handlers
formatter = logging.Formatter(ufstring)
pyxsim_sh.setFormatter(formatter)
# add the handler to the logger
pyxsimLogger.addHandler(pyxsim_sh)
pyxsimLogger.setLevel('INFO')
pyxsimLogger.propagate = False
mylog = pyxsimLogger
def parse_value(value, default_units, ds=None):
if isinstance(value, Quantity):
value = unyt_quantity.from_astropy(value)
if ds is None:
quan = unyt_quantity
else:
quan = ds.quan
if isinstance(value, unyt_quantity):
return quan(value.v, value.units).in_units(default_units)
elif isinstance(value, tuple):
return quan(value[0], value[1]).in_units(default_units)
else:
return quan(value, default_units)
def isunitful(a):
if isinstance(a, (Quantity, unyt_array)):
return True
elif isinstance(a, tuple):
try:
unyt_array(a[0], a[1])
return True
except:
pass
return False
def ensure_list(obj):
return list(always_iterable(obj))
def validate_parameters(first, second, skip=None):
if skip is None:
skip = []
keys1 = list(first.keys())
keys2 = list(second.keys())
keys1.sort()
keys2.sort()
if keys1 != keys2:
raise RuntimeError("The two inputs do not have the same parameters!")
for k1, k2 in zip(keys1, keys2):
if k1 not in skip:
v1 = first[k1][()]
v2 = first[k2][()]
if isinstance(v1, (str, bytes)) or isinstance(v2, (str, bytes)):
check_equal = v1 == v2
else:
check_equal = np.allclose(np.array(v1), np.array(v2), rtol=0.0, atol=1.0e-10)
if not check_equal:
raise RuntimeError(f"The values for the parameter '{k1}' in the two inputs"
f" are not identical ({v1} vs. {v2})!")
def merge_files(input_files, output_file, overwrite=False,
add_exposure_times=False):
r"""
Helper function for merging PhotonList or EventList HDF5 files.
Parameters
----------
input_files : list of strings
List of filenames that will be merged together.
output_file : string
Name of the merged file to be outputted.
overwrite : boolean, default False
If a the output file already exists, set this to True to
overwrite it.
add_exposure_times : boolean, default False
If set to True, exposure times will be added together. Otherwise,
the exposure times of all of the files must be the same.
Examples
--------
>>> from pyxsim import merge_files
>>> merge_files(["events_0.h5","events_1.h5","events_3.h5"], "events.h5",
... overwrite=True, add_exposure_times=True)
Notes
-----
Currently, to merge files it is mandated that all of the parameters have the
same values, with the exception of the exposure time parameter "exp_time". If
add_exposure_times=False, the maximum exposure time will be used.
"""
from collections import defaultdict
from pathlib import Path
import h5py
if Path(output_file).exists() and not overwrite:
raise IOError(f"Cannot overwrite existing file {output_file}. "
"If you want to do this, set overwrite=True.")
f_in = h5py.File(input_files[0], "r")
f_out = h5py.File(output_file, "w")
exp_time_key = ""
p_out = f_out.create_group("parameters")
for key, param in f_in["parameters"].items():
if key.endswith("exp_time"):
exp_time_key = key
else:
p_out[key] = param[()]
skip = [exp_time_key] if add_exposure_times else []
for fn in input_files[1:]:
f = h5py.File(fn, "r")
validate_parameters(f_in["parameters"], f["parameters"], skip=skip)
f.close()
f_in.close()
data = defaultdict(list)
tot_exp_time = 0.0
for i, fn in enumerate(input_files):
f = h5py.File(fn, "r")
if add_exposure_times:
tot_exp_time += f["/parameters"][exp_time_key][()]
else:
tot_exp_time = max(tot_exp_time, f["/parameters"][exp_time_key][()])
for key in f["/data"]:
data[key].append(f["/data"][key][:])
f.close()
p_out[exp_time_key] = tot_exp_time
d = f_out.create_group("data")
for k in data:
d.create_dataset(k, data=np.concatenate(data[k]))
f_out.close()
| 2.078125 | 2 |
utils/validation.py | Weenkus/on_power_efficient_virtual_network_function_placement_algorithm | 1 | 12769265 |
class Assert(object):
@staticmethod
def is_instance(instance, instances_class):
assert isinstance(instance, instances_class), '{0} should be an instance of class {1}'.format(
instance.__name__, instances_class.__name__
)
| 3.4375 | 3 |
pytorch_lightning/utilities/signature_utils.py | mathemusician/pytorch-lightning | 3,469 | 12769266 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from typing import Callable, Optional
def is_param_in_hook_signature(
hook_fx: Callable, param: str, explicit: bool = False, min_args: Optional[int] = None
) -> bool:
"""
Args:
hook_fx: the hook callable
param: the name of the parameter to check
explicit: whether the parameter has to be explicitly declared
min_args: whether the `signature` as at least `min_args` parameters
"""
parameters = inspect.getfullargspec(hook_fx)
args = parameters.args[1:] # ignore `self`
return (
param in args
or (not explicit and (parameters.varargs is not None))
or (isinstance(min_args, int) and len(args) >= min_args)
)
| 2.53125 | 3 |
storage/pynetwork/DB/Physics/prime.py | opensourceplanet/ICS | 0 | 12769267 | #Prime Number Checker
#<NAME>
'''
This program asks for a number between 0 and 5000
If the number isn't in that range it asks for it again
When a proper number is entered it checks if it is prime
If the numbr is prime it prints the two factors
If the number is not prime it prints all the factors
Finally it asks if you want run the checker again or not
'''
# This function takes a number and prints the factors
def print_factors(x):
print("The factors of", x, "are:")
for i in range(1, x + 1):
if x % i == 0:
print(i)
print(x, "is NOT a prime number")
# this functoin check to see if a number is prime and if so prints that out
# if the numbe is not prime it pushes the user input variable into the print_factors function
def prime_checker():
try:
# user input
a = int(input("Enter a number between 0 and 5000: "))
# checks to make sure the number is within the specified range
if a > 5000:
# prints out and error if the check fails and reruns the function with clean memory
print('\n\t****ERROR****')
print('Enter a number between 0 and 5000\n')
return prime_checker()
# place hold variable
k = 0
# this finds the number of factors of the number
for i in range(1, a):
if a % i == 0:
k = k + 1
# this checks to see if the number has less than or one factor
# if the number satisfies this then it is prime and it prints out
if k <= 1:
print('The factors of your number are:')
print('1')
print(a, 'is a prime number')
# if the number has more than one factor it is not prime and it lands here
# the number is then put into the print_factors function
else:
print_factors(x=a)
# this handles the error when the user inputs something other than number into the input statement
except ValueError:
print('\n\t****Invalid*Input****\n\tPlease enter a number\n')
# loop variables
run = True
run1 = True
# first loop that runs the functions when the program one time when the program is opened
while run1:
prime_checker()
break
# second loop that asks the user if they want to run the program again or quit
while run:
option = input("\nContinue and run again? [y/n]: ").lower()
if option == "y":
prime_checker()
elif option == "n":
break
else:
print("\nNot a valid option. Please enter 'y' or 'n'")
| 4.21875 | 4 |
Laelia/apps/meds/admin.py | arantesdv/LaeliaAppProject | 0 | 12769268 | from django.contrib import admin
from .models import ActiveCompound, CompoundSet, ComercialDrug, Prescription
@admin.register(ActiveCompound)
class ActiveCompoundAdmin(admin.ModelAdmin):
search_fields = ['_search_names']
@admin.register(CompoundSet)
class CompoundSetAdmin(admin.ModelAdmin):
search_fields = ['active_compound__search_names']
@admin.register(ComercialDrug)
class ComercialDrugAdmin(admin.ModelAdmin):
search_fields = ['_search_names', '_name']
autocomplete_fields = ['compound_sets']
@admin.register(Prescription)
class PrescriptionAdmin(admin.ModelAdmin):
autocomplete_fields = ['comercial_drug']
| 1.601563 | 2 |
gc_win2.py | danz2004/learning_python | 0 | 12769269 | #!/usr/bin/env python3
# Write a program that computes the GC fraction of a DNA sequence in a window
# Window size is 11 nt
# Output with 4 significant figures using whichever method you prefer
# Use no nested loops. Instead, count only the first window
# Then 'move' the window by adding 1 letter on one side
# And subtracting 1 letter from the other side
# Describe the pros/cons of this algorith vs. nested loops
seq = 'ACGACGCAGGAGGAGAGTTTCAGAGATCACGAATACATCCATATTACCCAGAGAGAG'
w = 11
window = ""
count = 0
for i in range(w):
window += seq[i]
if seq[i] == 'G' or seq[i] == 'C':
count += 1
print(f'{0} {window} {count / w : .4f}')
for i in range(1, len(seq) - w + 1):
if window[0] == 'G' or window[0] == 'C':
count -= 1
if seq[i + w - 1] == 'G' or seq[i + w - 1] == 'C':
count += 1
window = window[1:] + seq[i + w - 1]
print(f'{i} {window} {count / w : .4f}')
| 4 | 4 |
mmda/utils/tools.py | allenai/mmda | 32 | 12769270 | from typing import List, Union, Dict, Any, Tuple
from mmda.types.span import Span
from mmda.types.box import Box
def merge_neighbor_spans(spans: List[Span], distance) -> List[Span]:
"""Merge neighboring spans in a list of un-overlapped spans:
when the gaps between neighboring spans is not larger than the
specified distance, they are considered as the neighbors.
Args:
spans (List[Span]): The input list of spans.
distance (int, optional):
The upper bound of interval gaps between two neighboring spans.
Defaults to 1.
Returns:
List[Span]: A list of merged spans
"""
is_neighboring_spans = (
lambda span1, span2: min(
abs(span1.start - span2.end), abs(span1.end - span2.start)
)
<= distance
)
# It assumes non-overlapped intervals within the list
merge_neighboring_spans = lambda span1, span2: Span(
min(span1.start, span2.start), max(span1.end, span2.end)
)
spans = sorted(spans, key=lambda ele: ele.start)
# When sorted, only one iteration round is needed.
if len(spans) == 0:
return []
if len(spans) == 1:
return spans
cur_merged_spans = [spans[0]]
for cur_span in spans[1:]:
prev_span = cur_merged_spans.pop()
if is_neighboring_spans(cur_span, prev_span):
cur_merged_spans.append(merge_neighboring_spans(prev_span, cur_span))
else:
# In this case, the prev_span should be moved to the bottom of the stack
cur_merged_spans.extend([prev_span, cur_span])
return cur_merged_spans
def find_overlapping_tokens_for_box(token_spans: List[Span], box: Box) -> List[Span]:
"""Retrun a list of spans where their boxes overlap with the input box."""
return [
token
for token in token_spans
if token.box is not None and token.box.is_overlap(box)
]
def allocate_overlapping_tokens_for_box(
token_spans: List[Span], box
) -> Tuple[List[Span], List[Span]]:
"""Different from `find_overlapping_tokens_for_box`, it will return a tuple
(allocate_tokens, remaining_tokens):
`allocated_tokens` is a list of spans where their boxes overlap with the input box,
`remaining_tokens` is a list of spans where they don't overlap with the input box.
"""
allocated_tokens, remaining_tokens = [], []
for token in token_spans:
if token.box is not None and token.box.is_overlap(box):
allocated_tokens.append(token)
else:
remaining_tokens.append(token)
return allocated_tokens, remaining_tokens | 3.140625 | 3 |
Day 18: Duet/Day 18: Duet.py | djvanhelmond/AdventofCode2017 | 0 | 12769271 | <reponame>djvanhelmond/AdventofCode2017
#!/usr/local/bin/python3
class Duet():
def __init__(self, instruction_list):
self.instruction_list = [ instruction.split() for instruction in instruction_list ]
self.program_counter = 0
self.registers = {}
self.played = None
self.nonZeroPlayed = False
self.__instr_set = {
'set': self.__set,
'add': self.__add,
'mul': self.__mul,
'mod': self.__mod,
'snd': self.__snd,
'rcv': self.__rcv,
'jgz': self.__jgz,
}
def __set(self, x, y):
if y.isalpha(): y = self.registers[y]
self.registers[x] = int(y)
def __add(self, x, y):
if y.isalpha(): y = self.registers[y]
self.registers[x] += int(y)
def __mul(self, x, y):
if y.isalpha(): y = self.registers[y]
self.registers[x] *= int(y)
def __mod(self, x, y):
if y.isalpha(): y = self.registers[y]
self.registers[x] %= int(y)
def __snd(self, x):
self.played = self.registers[x]
def __rcv(self, x):
if self.registers[x] != 0:
self.nonZeroPlayed = True
def __jgz(self, x, y):
if self.registers[x] > 0:
self.program_counter += int(y) - 1
def __exitCriteria(self):
if self.nonZeroPlayed:
return True
if self.program_counter >= len(self.instruction_list):
return True
return False
def __execute(self):
instruction = self.instruction_list[self.program_counter]
self.program_counter += 1
if instruction[1] not in self.registers:
self.registers[instruction[1]] = 0
self.__instr_set[instruction[0]](*instruction[1:])
def run(self):
while not self.__exitCriteria():
self.__execute()
with open("./input.txt") as f:
INPUT = f.readlines()
duet = Duet(INPUT)
duet.run()
print("Star 1: %s" % duet.played)
| 3.15625 | 3 |
Uebung3/Uebung3_Aufgabe8_2_3.py | B0mM3L6000/EiP | 1 | 12769272 | <reponame>B0mM3L6000/EiP
n = int(2367363789863971985761)
#überprüfen welche länge n = 2367363789863971985761 hat
#print(n)
i = 1
while n != 1: #solange n noch nicht gleich 1 ist
if n%2 == 0: #wenn n durch 2 ganz teilbar ist
n = n//2
#print(n)
else: #ansonsten
n = n*3+1
#print(n)
i = i+1
print("Die Länge der Folge ist:",i)
"""
Hier muss noch in die Kommentare wie man die Zahl n findet welches die möglichst
längste Folge hat für n < 10^6. Mit Worten erklärt.
"""
| 3.234375 | 3 |
NN_adj_new_75.py | hlibe/FinTech-of-Networks | 0 | 12769273 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 30 09:52:31 2021
@author: HaoLI
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 8 11:48:41 2021
@author: HaoLI
"""
import torch, torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data.sampler import WeightedRandomSampler
import torch.utils.data as data_utils
import pandas as pd
import numpy as np
import os #for working directory
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc, roc_auc_score # 计算roc和auc
import time
import datetime
from imblearn.over_sampling import RandomOverSampler
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
import random
use_gpu = torch.cuda.is_available()
print("GPU",use_gpu)
list_rec = [] #记录参数
randomseed = 22
random.seed(randomseed)
layer1=196
layer2=196
oversample_ratio=0.5
training_epochs = 80
minibatch_size = 5000
learning_rate=2e-4
penalty=2 #p=1 for L1; p=0 for L2, weight_decay only for L2 ; p=2 for default. 范数计算中的幂指数值,默认求2范数. 当p=0为L2正则化,p=1为L1正则化
weight_decay=0.0125 #weight_decay 就是 L2 正则项
dropout=0.0
#os.getcwd()
os.chdir('/Users/HaoLI/Stata/credit/data')
df = pd.read_csv('data1210rename_use.csv')
col_names = list(df.columns.values[3:30])
col_names.remove('default_geq_1') #X中不能包含目标函数y
col_names.remove('default_geq_2')
col_names.remove('default_geq_3')
base_col_names = col_names[0:13] # for baseline model 包含银行数据+早中晚数据
df_fillna = df.fillna(0) # fill NA with 0. 无消费以0计
X = df_fillna[col_names]
y = df_fillna.default_geq_1 # Target variable
X_base = df_fillna[base_col_names]
y_base = df_fillna.default_geq_1 # Target variable
layer0=len(X.columns) # input层的神经元个数
#min_max_scaler = MinMaxScaler()
#X = min_max_scaler.fit_transform(X)
sc = StandardScaler()# transform X into standard normal distribution for each column. X from dataframe to array
X = sc.fit_transform(X)
ros = RandomOverSampler(random_state=0)
for layer1 in [196]:
for layer2 in [196]:
for weight_decay in [0.0125]:
for training_epochs in [80]:
for minibatch_size in [5000]:
for random_state in [18]:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=random_state) # data types are dataframe
X_train, y_train = ros.fit_resample(X_train, y_train)
y_train = y_train.values
y_test = np.array(y_test)
# construct NN
class CreditNet(nn.Module):
def __init__(self): #p=1 for L1; p=0 for L2, weight_decay only for L2 ; p=2 for default. 范数计算中的幂指数值,默认求2范数. 当p=0为L2正则化,p=1为L1正则化
super().__init__()
self.fc1 = nn.Linear(layer0, layer1) # fc: fully connected
#self.bn1 = nn.BatchNorm1d(num_features=64, momentum=0.1) #default momentum = 0.1
self.fc2 = nn.Linear(layer1, layer2)
#self.fc3 = nn.Linear(layer2, layer3)
#self.bn3 = nn.BatchNorm1d(num_features=32)
#self.fc4 = nn.Linear(28, 24)
self.fc5 = nn.Linear(layer2, 1)
# x represents our data
def forward(self, x): # x is the data
x = F.relu(self.fc1(x)) # first x pass through
#x = self.bn1(x)
x = F.dropout(x, p=dropout)
x = F.relu(self.fc2(x))
x = F.dropout(x, p=dropout)
#x = F.relu(self.fc3(x))
#x = self.bn3(x)
#x = F.dropout(x, p=0.25)
#x = F.relu(self.fc4(x))
#x = F.softmax(self.fc5(x),dim=0)
x = torch.sigmoid(self.fc5(x))
return x
net = CreditNet().double() # .double() makes the data type float, 在pytorch中,只有浮点类型的数才有梯度
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#或device = torch.device("cuda:0")
device1 = torch.device("cuda:1")
if torch.cuda.is_available():
#net = net.cuda()
net = net.to(device1) #使用序号为0的GPU
#或model.to(device1) #使用序号为1的GPU
########### Train #################
#loss_fn = nn.CrossEntropyLoss()
#loss_fn = nn.BCELoss() # binary cross entropy loss
#optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate) # auto adjust lr, better than sgd
#optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate, momentum = 0.9) # auto adjust lr, better than sgd; sgd stable
#优化器采用Adam,并且设置参数weight_decay=0.0,即无正则化的方法
#优化器采用Adam,并且设置参数weight_decay=10.0,即正则化的权重lambda =10.0
optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate, weight_decay=weight_decay) # auto adjust lr, better than sgd
# if we use L2 regularization, apply the following line
#optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate, weight_decay=weight_decay)
X_train = torch.from_numpy(X_train) # transfer to Tensor, no need to add .double(), because it is already float data type
y_train = torch.from_numpy(y_train).double() # .double() makes the data type float, 在pytorch中,只有浮点类型的数才有梯度
#weights_tensor = torch.from_numpy(overwt_arr_y_lossfn)
if torch.cuda.is_available():
X_train = X_train.to(device1)
y_train = y_train.to(device1)
#weights_tensor = weights_tensor.to(device1)
train = data_utils.TensorDataset(X_train, y_train) # adjust format. 打包X Y 用于训练
train_loader = data_utils.DataLoader(train, batch_size=minibatch_size, shuffle=True) # 在PyTorch中训练模型经常要使用它. batch_size定义每次喂给神经网络多少行数据. shuffle在每次迭代训练时是否将数据洗牌,默认设置是False。将输入数据的顺序打乱,是为了使数据更有独立性,
# !tensorboard --logdir './runs' #远程的notebook中如果使用魔法函数, 可能会导致你无法打开tensorboard的http服务
from tensorboardX import SummaryWriter
writer = SummaryWriter()
#%reload_ext tensorboard
# Load the TensorBoard notebook extension
for epoch in range(training_epochs):
y_train_labels = [] # create an empty array
y_train_pred = []
for b, data in enumerate(train_loader, 0): # 取batch
inputs, labels = data#.cuda() # inputs and labels follows that when loaded
if torch.cuda.is_available():
inputs = inputs.to(device1)
labels = labels.to(device1)
#weights = weights.to(device1)
#print("inputs shape", inputs.shape, labels.shape)
#print("inputs", inputs)
#print("labels", labels)
optimizer.zero_grad() #reset gradients, i.e. zero the gradient buffers
y_pred = net(inputs) # obtain the predicted values, a Tensor
y_pred = y_pred.view(y_pred.size()[0])
#print("y_pred", y_pred)
y_train_labels = np.append(y_train_labels, labels.cpu().numpy())
y_train_pred = np.append(y_train_pred,y_pred.detach().cpu().numpy())
loss_fn = nn.BCELoss() # binary cross entropy loss, with weights
if torch.cuda.is_available():
loss_fn = loss_fn.to(device1)
loss = loss_fn(y_pred, labels) # 2 tensors in, 1 value out
loss.backward() # backward pass
optimizer.step() # update weights
if b % 100 == 0: # if b整除10, then output loss
#print('Epochs: {}, batch: {} loss: {}'.format(epoch, b, loss))
writer.add_scalar('NN_oversample',loss, epoch)
writer.close()
#%tensorboard --logdir #定位tensorboard读取的文件目录
X_test = torch.from_numpy(X_test) # check the tested results
y_test = torch.from_numpy(y_test).double()
if torch.cuda.is_available():
X_test = X_test.to(device1)
y_test = y_test.to(device1)
test = data_utils.TensorDataset(X_test, y_test)
test_loader = data_utils.DataLoader(test, batch_size=minibatch_size, shuffle=True)
y_test_labels = []
y_test_pred = []
with torch.no_grad(): #上下文管理器,被该语句 wrap 起来的部分将不会track 梯度
for data in test_loader:
inputs, labels = data
#inputs = inputs.to(device1)
#labels = labels.to(device1)
#print("inputs", inputs)
#print("labels", labels)
outputs = net(inputs)
outputs = outputs.view(outputs.size()[0])
#print("outputs", outputs)
#print("predicted", predicted.numpy())
y_test_labels = np.append(y_test_labels,labels.cpu().numpy())
y_test_pred = np.append(y_test_pred,outputs.cpu().numpy())
#print("Y_test_labels", Y_test_labels)
#print("Y_test_pred", Y_test_pred)
#### plot ROC, compute AUC ###
# y_true is ground truth labels, y_score is predicted probabilities generated by sklearn classifier
test_fpr, test_tpr, te_thresholds = roc_curve(y_true = y_test_labels, y_score = y_test_pred)
#print("AUC TEST = ", auc(test_fpr, test_tpr))
train_fpr, train_tpr, tr_thresholds = roc_curve(y_true = y_train_labels, y_score = y_train_pred) # /w_ytrain, such that return the array to 0,1 array
#print("AUC TRAIN = ", auc(train_fpr, train_tpr))
#print('resample: {}, Epochs: {}, batch size: {}, '.format(oversample_ratio, training_epochs, minibatch_size))
#print(net)
plt.grid()
plt.plot(train_fpr, train_tpr, label=" AUC TRAIN ="+str(auc(train_fpr, train_tpr)))
plt.plot(test_fpr, test_tpr, label=" AUC TEST ="+str(auc(test_fpr, test_tpr)))
plt.plot([0,1],[0,1],'g--')
plt.legend()
plt.xlabel("True Positive Rate")
plt.ylabel("False Positive Rate")
t='''
training_epochs=%s, minibatch_size=%s,
learning_rate=%s, penalty=L%s, weight_decay=%s,
dropout=%s, 24=>%s=>%s=>1, myoversampling, random_state=%s,
randomseed=%s
'''%(training_epochs,minibatch_size,learning_rate,
penalty, weight_decay, dropout, layer1, layer2, random_state,randomseed)
plt.title("AUC(Neural Network ROC curve)"+t)
plt.grid(color='black', linestyle='-', linewidth=0.5)
time1 = datetime.datetime.now()
#对现在时间格式化,以此作为文件名
time2 = time1.strftime('%Y-%m-%d-%H%M%S')
plt.savefig("/Users/HaoLI/Stata/credit/out/ROC figure/Figure_"+time2+".png", bbox_inches = 'tight')
plt.show()
list_rec.append([auc(train_fpr, train_tpr), auc(test_fpr, test_tpr),
training_epochs,minibatch_size,learning_rate,
penalty, weight_decay, dropout, layer1, layer2,
random_state, randomseed
])
list_rec_1 = list_rec
df = pd.DataFrame(list_rec, columns = ['IS_AUC','OOS_AUC','training_epochs',
'minibatch_size','learning_rate',
'penalty', 'weight_decay', 'dropout',
'layer1', 'layer2', 'random_state','randomseed'])
df.to_csv('NN_adj.csv') | 2.140625 | 2 |
reo/migrations/0142_auto_20211206_2100.py | NREL/REopt_API | 7 | 12769274 | # Generated by Django 3.1.13 on 2021-12-06 21:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reo', '0141_auto_20211202_2315'),
]
operations = [
migrations.RemoveField(
model_name='sitemodel',
name='preprocessed_BAU_lifecycle_emissions_tCO2',
),
migrations.RemoveField(
model_name='sitemodel',
name='preprocessed_BAU_year_one_emissions_tCO2',
),
]
| 1.375 | 1 |
examples/splitter.py | naturalis/imgpheno | 15 | 12769275 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import logging
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('.'))
import cv2
import numpy as np
import common
import imgpheno as ft
def main():
logging.basicConfig(level=logging.INFO, format='%(levelname)s %(message)s')
parser = argparse.ArgumentParser(description='Test image segmentation and splitting')
parser.add_argument('files', metavar='FILE', nargs='+', help='Input images')
parser.add_argument('-o', '--output', metavar='PATH', default=".", help='Path for output files.')
parser.add_argument('-i', '--iters', metavar='N', type=int, default=5, help="The number of grabCut iterations. Default is 5.")
parser.add_argument('-m', '--margin', metavar='N', type=int, default=1, help="The margin of the foreground rectangle from the edges. Default is 1.")
parser.add_argument('--max-size', metavar='N', type=float, help="Scale the input image down if its perimeter exceeds N. Default is no scaling.")
parser.add_argument('--min-size-out', metavar='N', type=int, default=200, help="Set the minimum perimeter for output images. Smaller images are ignored. Default is 200.")
args = parser.parse_args()
for f in args.files:
split_image(f, args)
sys.stderr.write("Output was saved to %s\n" % args.output)
return 0
def split_image(path, args):
img = cv2.imread(path)
if img == None or img.size == 0:
sys.stderr.write("Failed to read %s. Skipping.\n" % path)
return -1
logging.info("Processing %s ..." % path)
# Scale the image down if its perimeter exceeds the maximum (if set).
img = common.scale_max_perimeter(img, args.max_size)
logging.info("Segmenting...")
# Perform segmentation.
mask = common.grabcut(img, args.iters, None, args.margin)
# Create a binary mask. Foreground is made white, background black.
bin_mask = np.where((mask==cv2.GC_FGD) + (mask==cv2.GC_PR_FGD), 255, 0).astype('uint8')
# Split the image into segments.
segments = ft.split_by_mask(img, bin_mask)
logging.info("Exporting segments...")
for i, im in enumerate(segments):
if sum(im.shape[:2]) < args.min_size_out:
continue
name = os.path.basename(path)
name = os.path.splitext(name)
out_path = "%s_%d%s" % (name[0], i, name[1])
out_path = os.path.join(args.output, out_path)
logging.info("\t%s" % out_path)
cv2.imwrite(out_path, im)
return 0
if __name__ == "__main__":
main()
| 2.6875 | 3 |
setup.py | Einstein-Floripa/reply-card-corrector | 2 | 12769276 | from setuptools import setup, find_packages
setup(
name='reply_card_corrector',
version='0.1.0',
packages=find_packages(),
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
install_requires=["opencv-python", "numpy"],
)
| 1.109375 | 1 |
typesense-exporter.py | Alasano/typesense-exporter | 0 | 12769277 | <reponame>Alasano/typesense-exporter<gh_stars>0
import os
import json
import bottle
import requests
exporter_address = os.environ.get('TS_EXPORTER_LISTEN_ADDRESS', '0.0.0.0')
exporter_port = os.environ.get('TS_EXPORTER_LISTEN_PORT', 9000)
exporter_prefix = os.environ.get('TS_EXPORTER_METRICS_PREFIX', 'typesense')
typesense_scheme = os.environ.get('TS_EXPORTER_TYPESENSE_SCHEME', 'http')
typesense_host = os.environ.get('TS_EXPORTER_TYPESENSE_HOST', 'localhost')
typesense_port = os.environ.get('TYPESENSE_API_PORT', 8108)
typesense_apikey = os.environ.get('TYPESENSE_API_KEY')
typesense_endpoints = {
"health": { "url": "health" },
"metrics": { "url": "metrics.json" },
"stats": { "url": "stats.json" }
}
## Fetch endpoints and return dict
def fetchEndpoints(ep):
generated = dict()
for endpoint, config in ep.items():
h = {'X-TYPESENSE-API-KEY': typesense_apikey}
r = requests.get( typesense_scheme + "://" + typesense_host + ":" + str(typesense_port) + "/" + config["url"], headers=h, timeout=2 )
generated[endpoint] = json.loads(r.text)
return generated
## Format metrics according to openmetrics
## return metrics as list of strings
def generateOutput(sd):
generated = list()
for endpoint, data in sd.items():
for name, value in data.items():
# Express Bools numerical
if isinstance(value, bool):
value = int(value == True)
# Generate Labels for stats endpoint
if isinstance(value, dict) and endpoint == "stats":
for label, val in value.items():
label = label.split()
generated.append(exporter_prefix + "_" + endpoint + "_" + name + "{method=\"" + label[0] + "\",path=\"" + label[1] + "\"} " + str(val))
else:
# Default output format
generated.append(exporter_prefix + "_" + endpoint + "_" + name + " " + str(value))
return generated
## Setup Webserver
# Index
@bottle.route('/')
def index():
return """
<h1>typesense-exporter</h1>
<ul>
<li><a href="/metrics">metrics<a/></li>
</ul>
"""
# Metrics
@bottle.route('/metrics')
def metrics():
scrapedata = fetchEndpoints(typesense_endpoints)
outputlist = generateOutput(scrapedata)
output = str()
for i in outputlist:
output += str(i) + "\n"
bottle.response.content_type = 'text/plain'
return output
# Run Webserver
bottle.run(host=exporter_address, port=exporter_port) | 2.3125 | 2 |
models/appointments/appointment.py | lohmann99/odensefotografen | 0 | 12769278 | import uuid
from static.common.database import Database
class Appointment(object):
def __init__(self, owner_id, date, time, confirmed=False, _id=None):
self.owner_id = owner_id
self.date = date
self.time = time
self.confirmed = confirmed
self._id = uuid.uuid4().hex if _id is None else _id
def json(self):
return {
'owner_id': self.owner_id,
'date': self.date,
'time': self.time,
'confirmed': self.confirmed,
'_id': self._id
}
def save_to_db(self):
Database.insert('appointments', self.json())
def format_date(self):
return '-'.join(reversed(self.date.split('-')))
def as_text(self):
return 'Du har en forespurgt aftale med OdenseFotografen d. {} kl. {}'.format(self.format_date(), self.time)
@classmethod
def find_by_id(cls, _id):
app_data = Database.find_one('appointments', {'_id': _id})
return cls(**app_data)
| 2.625 | 3 |
regex_redirects/migrations/0005_auto_20210425_1321.py | benkonrath/django-regex-redirects | 7 | 12769279 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('regex_redirects', '0004_auto_20170512_1349'),
]
operations = [
migrations.AlterField(
model_name='redirect',
name='old_path',
field=models.CharField(db_index=True, help_text="This should be an absolute path, excluding the domain name. Example: '/events/search/'.", max_length=512, verbose_name='redirect from'),
),
]
| 1.6875 | 2 |
gridaurora/filterload.py | scivision/gridaurora | 0 | 12769280 | <filename>gridaurora/filterload.py
#!/usr/bin/env python
from pathlib import Path
import logging
import numpy as np
from scipy.interpolate import interp1d
import h5py
import xarray
# consider atmosphere
try:
import lowtran
except ImportError as e:
logging.error(f"failure to load LOWTRAN, proceeding without atmospheric absorption model. {e}")
lowtran = None
"""
gets optical System Transmittance from filter, sensor window, and QE spec.
<NAME> 2014
references:
BG3 filter datasheet: http://www.howardglass.com/pdf/bg3_datasheet.pdf
QE: http://www.andor.com/pdfs/specifications/Andor_iXon_Ultra_897_Specifications.pdf
http://occult.mit.edu/instrumentation/MORIS/Documents/DU-897_BI.pdf
window: http://www.andor.com/pdfs/specifications/Andor_Camera_Windows_Supplementary_Specifications.pdf
"""
def getSystemT(newLambda, bg3fn: Path, windfn: Path, qefn: Path, obsalt_km, zenang_deg, verbose: bool = False) -> xarray.Dataset:
bg3fn = Path(bg3fn).expanduser()
windfn = Path(windfn).expanduser()
qefn = Path(qefn).expanduser()
newLambda = np.asarray(newLambda)
# %% atmospheric absorption
if lowtran is not None:
c1 = {
"model": 5,
"h1": obsalt_km,
"angle": zenang_deg,
"wlshort": newLambda[0],
"wllong": newLambda[-1],
}
if verbose:
print("loading LOWTRAN7 atmosphere model...")
atmT = lowtran.transmittance(c1)["transmission"].squeeze()
try:
atmTcleaned = atmT.values.squeeze()
atmTcleaned[atmTcleaned == 0] = np.spacing(1) # to avoid log10(0)
fwl = interp1d(atmT.wavelength_nm, np.log(atmTcleaned), axis=0)
except AttributeError: # problem with lowtran
fwl = interp1d(newLambda, np.log(np.ones_like(newLambda)), kind="linear")
else:
fwl = interp1d(newLambda, np.log(np.ones_like(newLambda)), kind="linear")
atmTinterp = np.exp(fwl(newLambda))
if not np.isfinite(atmTinterp).all():
logging.error("problem in computing LOWTRAN atmospheric attenuation, results are suspect!")
# %% BG3 filter
with h5py.File(bg3fn, "r") as f:
try:
assert isinstance(f["/T"], h5py.Dataset), "we only allow one transmission curve per file" # simple legacy behavior
fbg3 = interp1d(f["/wavelength"], np.log(f["/T"]), kind="linear", bounds_error=False)
except KeyError:
raise KeyError("could not find /wavelength in {}".format(f.filename))
try:
fname = f["T"].attrs["name"].item()
if isinstance(fname, bytes):
fname = fname.decode("utf8")
except KeyError:
fname = ""
# %% camera window
with h5py.File(windfn, "r") as f:
fwind = interp1d(f["/lamb"], np.log(f["/T"]), kind="linear")
# %% quantum efficiency
with h5py.File(qefn, "r") as f:
fqe = interp1d(f["/lamb"], np.log(f["/QE"]), kind="linear")
# %% collect results into DataArray
T = xarray.Dataset(
{
"filter": ("wavelength_nm", np.exp(fbg3(newLambda))),
"window": ("wavelength_nm", np.exp(fwind(newLambda))),
"qe": ("wavelength_nm", np.exp(fqe(newLambda))),
"atm": ("wavelength_nm", atmTinterp),
},
coords={"wavelength_nm": newLambda},
attrs={"filename": fname},
)
T["sysNObg3"] = T["window"] * T["qe"] * T["atm"]
T["sys"] = T["sysNObg3"] * T["filter"]
return T
| 2.359375 | 2 |
spec_creation/autofill_eval_spec.py | hariv/e-mission-eval-public-data | 0 | 12769281 | import argparse
import logging
import json
import copy
import arrow
import requests
import osmapi
import re
import polyline as pl
import osrm as osrm
import shapely.geometry as geo
sensing_configs = json.load(open("sensing_regimes.all.specs.json"))
def validate_and_fill_datetime(current_spec):
ret_spec = copy.copy(current_spec)
timezone = current_spec["region"]["timezone"]
ret_spec["start_ts"] = arrow.get(current_spec["start_fmt_date"], tzinfo=timezone).timestamp
ret_spec["end_ts"] = arrow.get(current_spec["end_fmt_date"], tzinfo=timezone).timestamp
return ret_spec
def node_to_geojson_coords(node_id):
osm = osmapi.OsmApi()
node_details = osm.NodeGet(node_id)
return [node_details["lon"], node_details["lat"]]
def get_route_coords(mode, waypoint_coords):
if mode == "CAR" \
or mode == "WALKING" \
or mode == "BICYCLING" \
or mode == "BUS":
# Use OSRM
overview_geometry_params = {"overview": "full",
"geometries": "polyline", "steps": "false"}
route_coords = osrm.get_route_points(mode, waypoint_coords, overview_geometry_params)
return route_coords
else:
raise NotImplementedError("OSRM does not support train modes at this time")
def _fill_coords_from_id(loc):
if loc is None:
return None
if "osm_id" in loc["properties"]:
if loc["geometry"]["type"] == "Point":
loc["geometry"]["coordinates"] = node_to_geojson_coords(loc["properties"]["osm_id"])
elif loc["geometry"]["type"] == "Polygon":
# get coords for way returns a tuple of (nodes, points)
loc["geometry"]["coordinates"] = [[coords_swap(c) for c in get_coords_for_way(loc["properties"]["osm_id"])[1]]]
else:
assert "coordinates" in loc["geometry"],\
"Location %s does not have either an osmid or specified set of coordinates"
return loc
def validate_and_fill_calibration_tests(curr_spec):
modified_spec = copy.copy(curr_spec)
calibration_tests = modified_spec["calibration_tests"]
for t in calibration_tests:
_fill_coords_from_id(t["start_loc"])
_fill_coords_from_id(t["end_loc"])
t["config"] = sensing_configs[t["config"]["id"]]
return modified_spec
def coords_swap(lon_lat):
return list(reversed(lon_lat))
def get_route_from_osrm(t, start_coords, end_coords):
if "route_waypoints" in t:
waypoints = t["route_waypoints"]
waypoint_coords = [node_to_geojson_coords(node_id) for node_id in waypoints]
t["waypoint_coords"] = {
"type": "Feature",
"properties": {},
"geometry": {
"type": "Polygon",
"coordinates": waypoint_coords
}
}
elif "waypoint_coords" in t:
waypoint_coords = t["waypoint_coords"]["geometry"]["coordinates"]
else:
waypoint_coords = []
logging.debug("waypoint_coords = %s..." % waypoint_coords[0:3])
route_coords = get_route_coords(t["mode"],
[start_coords] + waypoint_coords + [end_coords])
return route_coords
def get_route_from_polyline(t):
return pl.PolylineCodec().decode(t["polyline"])
# Porting the perl script at
# https://wiki.openstreetmap.org/wiki/Relations/Relations_to_GPX to python
def get_way_list(relation_details):
wl = []
for member in relation_details["member"]:
# print(member["ref"], member["type"])
assert member["type"] != "relation", "This is a parent relation for child %d, expecting only child relations" % member["ref"]
if member["type"] == "way" and member["role"] != "platform":
wl.append(member["ref"])
return wl
# way details is an array of n-1 node entries followed by a way entry
# the way entry has an "nd" field which is an array of node ids in the correct
# order the n-1 node entries are not necessarily in the correct order but
# provide the id -> lat,lng mapping
# Note also that the way can sometimes have the nodes in the reversed order
# e.g. way 367132251 in relation 9605483 is reversed compared to ways
# 368345083 and 27422567 before it
# this function automatically detects that and reverses the node array
def get_coords_for_way(wid, prev_last_node=-1):
osm = osmapi.OsmApi()
lat = {}
lon = {}
coords_list = []
way_details = osm.WayFull(wid)
# print("Processing way %d with %d nodes" % (wid, len(way_details) - 1))
for e in way_details:
if e["type"] == "node":
lat[e["data"]["id"]] = e["data"]["lat"]
lon[e["data"]["id"]] = e["data"]["lon"]
if e["type"] == "way":
assert e["data"]["id"] == wid, "Way id mismatch! %d != %d" % (e["data"]["id"], wl[0])
ordered_node_array = e["data"]["nd"]
if prev_last_node != -1 and ordered_node_array[-1] == prev_last_node:
print("LAST entry %d matches prev_last_node %d, REVERSING order for %d" %
(ordered_node_array[-1], prev_last_node, wid))
ordered_node_array = list(reversed(ordered_node_array))
for on in ordered_node_array:
# Returning lat,lon instead of lon,lat to be consistent with
# the returned values from OSRM. Since we manually swap the
# values later
coords_list.append([lat[on], lon[on]])
return ordered_node_array, coords_list
def get_coords_for_relation(rid, start_node, end_node):
osm = osmapi.OsmApi()
relation_details = osm.RelationGet(rid)
wl = get_way_list(relation_details)
print("Relation %d mapped to %d ways" % (rid, len(wl)))
coords_list = []
on_list = []
prev_last_node = -1
for wid in wl:
w_on_list, w_coords_list = get_coords_for_way(wid, prev_last_node)
on_list.extend(w_on_list)
coords_list.extend(w_coords_list)
prev_last_node = w_on_list[-1]
print("After adding %d entries from wid %d, curr count = %d" % (len(w_on_list), wid, len(coords_list)))
start_index = on_list.index(start_node)
end_index = on_list.index(end_node)
assert start_index <= end_index, "Start index %d is before end %d" % (start_index, end_index)
return coords_list[start_index:end_index+1]
def get_route_from_relation(t):
# get_coords_for_relation assumes that start and end are both nodes
return get_coords_for_relation(t["relation"]["relation_id"],
t["relation"]["start_node"], t["relation"]["end_node"])
def validate_and_fill_leg(orig_leg):
# print(t)
t = copy.copy(orig_leg)
t["type"] = "TRAVEL"
# These are now almost certain to be polygons
# and probably user-drawn, not looked up from OSM
# so what we will get here is an geojson representation of a polygon
# TODO: Drop support for single point
start_polygon = _fill_coords_from_id(t["start_loc"])
end_polygon = _fill_coords_from_id(t["end_loc"])
print("Raw polygons: start = %s..., end = %s..." %
(start_polygon["geometry"]["coordinates"][0][0:3],
end_polygon["geometry"]["coordinates"][0][0:3]))
# there are three possible ways in which users can specify routes
# - waypoints from OSM, which we will map into coordinates and then
# move to step 2
# - list of coordinates, which we will use to find route coordinates
# using OSRM
# - a relation with start and end nodes, used only for public transit trips
# - a polyline, which we can get from external API calls such as OTP or Google Maps
# Right now, we leave the integrations unspecified because there is not
# much standardization other than with google maps
# For example, the VTA trip planner () clearly uses OTP
# () but the path (api/otp/plan?) is different from the one for our OTP
# integration (otp/routers/default/plan?)
# But once people figure out the underlying call, they can copy-paste the
# geometry into the spec.
if "polyline" in t:
route_coords = get_route_from_polyline(t)
elif "relation" in t:
route_coords = get_route_from_relation(t)
else:
# We need to find a point within the polygon to pass to the routing engine
start_coords_shp = geo.Polygon(start_polygon["geometry"]["coordinates"][0]).representative_point()
start_coords = geo.mapping(start_coords_shp)["coordinates"]
end_coords_shp = geo.Polygon(end_polygon["geometry"]["coordinates"][0]).representative_point()
end_coords = geo.mapping(end_coords_shp)["coordinates"]
print("Representative_coords: start = %s, end = %s" % (start_coords, end_coords))
route_coords = get_route_from_osrm(t, start_coords, end_coords)
t["route_coords"] = {
"type": "Feature",
"properties": {},
"geometry": {
"type": "LineString",
"coordinates": [coords_swap(rc) for rc in route_coords]
}
}
return t
def get_hidden_access_transfer_walk_segments(prev_l, l):
# print("prev_l = %s, l = %s" % (prev_l, l))
if prev_l is None and l["mode"] != "WALKING":
# This is the first leg and is a vehicular trip,
# need to add an access leg to represent the walk to where the
# vehicle will be parked. This is unknown at spec creation time,
# so we don't have any ground truth for it
return [{
"id": "walk_start",
"type": "ACCESS",
"mode": "WALKING",
"name": "Walk from the building to your vehicle",
"loc": l["start_loc"],
}]
if l is None and prev_l["mode"] != "WALKING":
# This is the first leg and is a vehicular trip,
# need to add an access leg to represent the walk to where the
# vehicle will be parked. This is unknown at spec creation time,
# so we don't have any ground truth for it
return [{
"id": "walk_start",
"type": "ACCESS",
"mode": "WALKING",
"name": "Walk from your vehicle to the building",
"loc": prev_l["end_loc"]
}]
# The order of the checks is important because we want the STOPPED to come
# after the WALKING
ret_list = []
if prev_l is not None and l is not None and\
prev_l["mode"] != "WALKING" and l["mode"] != "WALKING":
# transferring between vehicles, add a transit transfer
# without a ground truthed trajectory
# NOTE: unlike the first two cases, we are NOT returning here
# we will run the next check as well, because for most
# transit transfers, there will be both a transfer and a stop
ret_list.append({
"id": "tt_%s_%s" % (prev_l["mode"], l["mode"]),
"type": "TRANSFER",
"mode": "WALKING",
"name": "Transfer between %s and %s at %s" %\
(prev_l["mode"], l["mode"], prev_l["end_loc"]["properties"]["name"]),
"loc": l["start_loc"]
})
if l is not None and "multiple_occupancy" in l and l["multiple_occupancy"] == True:
ret_list.append({
"id": "wait_for_%s" % (l["mode"]),
"type": "WAITING",
"mode": "STOPPED",
"name": "Wait for %s at %s" %\
(l["mode"], l["start_loc"]["properties"]["name"]),
"loc": l["start_loc"]
})
# return from the last two checks
return ret_list
def validate_and_fill_eval_trips(curr_spec):
modified_spec = copy.copy(curr_spec)
eval_trips = modified_spec["evaluation_trips"]
for t in eval_trips:
if "legs" in t:
print("Filling multi-modal trip %s" % t["id"])
prev_l = None
ret_leg_list = []
for i, l in enumerate(t["legs"]):
print("Filling leg %s" % l["id"])
# Add in shim legs like the ones to walk to/from your vehicle
# or to transfer between transit modes
shim_legs = get_hidden_access_transfer_walk_segments(prev_l, l)
print("Got shim legs %s, extending" % ([sl["id"] for sl in shim_legs]))
ret_leg_list.extend(shim_legs)
ret_leg_list.append(validate_and_fill_leg(l))
prev_l = l
shim_legs = get_hidden_access_transfer_walk_segments(prev_l, None)
assert len(shim_legs) <= 1, "Last leg should not have a transfer shim"
print("Got shim legs %s, extending" % ([sl["id"] for sl in shim_legs]))
ret_leg_list.extend(shim_legs)
t["legs"] = ret_leg_list
else:
print("Filling unimodal trip %s" % t["id"])
# unimodal trip, let's add shims if necessary
# the filled spec will always be multimodal
# since the only true unimodal trip is walking
# and it is easier to assume that there are always legs
# specially since we are adding complexity with the type of trips
# (ACCESS, TRANSFER, TRAVEL)
unmod_trip = copy.deepcopy(t)
t.clear()
t["id"] = unmod_trip["id"]
t["name"] = unmod_trip["name"]
t["legs"] = []
before_shim_leg = get_hidden_access_transfer_walk_segments(None, unmod_trip)
assert len(before_shim_leg) <= 1, "First leg should not have a transfer shim"
print("Got shim legs %s, extending" % ([sl["id"] for sl in before_shim_leg]))
t["legs"].extend(before_shim_leg)
t["legs"].append(validate_and_fill_leg(unmod_trip))
after_shim_leg = get_hidden_access_transfer_walk_segments(unmod_trip, None)
assert len(after_shim_leg) <= 1, "Last leg should not have a transfer shim"
print("Got shim legs %s, extending" % ([sl["id"] for sl in after_shim_leg]))
t["legs"].extend(after_shim_leg)
return modified_spec
def validate_and_fill_sensing_settings(curr_spec):
modified_spec = copy.copy(curr_spec)
for ss in modified_spec["sensing_settings"]:
for phoneOS, compare_list in ss.items():
ss[phoneOS] = {}
ss[phoneOS]["compare"] = compare_list
ss[phoneOS]["name"] = " v/s ".join(compare_list)
ss[phoneOS]["sensing_configs"] = [sensing_configs[cr] for cr in compare_list]
return modified_spec
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(prog="autofill_eval_spec")
parser.add_argument("in_spec_file", help="file to autofill")
parser.add_argument("out_spec_file", help="autofilled version of in_spec_file")
args = parser.parse_args()
print("Reading input from %s" % args.in_spec_file)
current_spec = json.load(open(args.in_spec_file))
dt_spec = validate_and_fill_datetime(current_spec)
calib_spec = validate_and_fill_calibration_tests(dt_spec)
eval_spec = validate_and_fill_eval_trips(calib_spec)
settings_spec = validate_and_fill_sensing_settings(eval_spec)
print("Writing output to %s" % args.out_spec_file)
json.dump(settings_spec, open(args.out_spec_file, "w"), indent=2)
| 2.109375 | 2 |
mysite/SocialApp/utils.py | asmao7/Cmput404W2021 | 3 | 12769282 | <filename>mysite/SocialApp/utils.py<gh_stars>1-10
"""
Contains useful helper functions
"""
import requests, json
from requests.auth import HTTPBasicAuth
from .models import Author, Post, Comment, PostCategory, InboxItem, Followers, ForeignServer
def AuthorToJSON(author):
"""
Converts an Author object into a JSON-compatible dictionary.
Returns None on failure.
"""
if not author:
return None
try:
json_dict = {
"type":"author",
"id":author.url,
"host":author.host,
"displayName":author.username,
"url":author.url,
"github":author.github
}
return json_dict
except:
return None
def AuthorListToJSON(authors):
"""
Converts a list of Author objects into a JSON-compatible list
of Authors. Returns an empty list on failure.
"""
if not authors:
return []
try:
author_list = []
for author in authors:
test_json = AuthorToJSON(author)
if test_json:
author_list.append(test_json)
return author_list
except:
return []
# TODO: Fill out size and paginate
def PostToJSON(post):
"""
Converts a Post object into a JSON-compatible dictionary.
Return None on failure.
"""
if not post:
return None
try:
json_dict = {
"type":"post",
"title":post.title,
"id":post.url,
"source":post.url,
"origin":post.url,
"description":post.description,
"contentType":post.content_type,
"content":post.content,
"author":AuthorToJSON(post.author),
"categories":PostCategoryListToStringList(post.categories),
"count":Comment.objects.filter(post=post).count(),
"size":0,
"comments":CommentListToJSON(Comment.objects.filter(post=post)),
"published":str(post.published),
"visibility":post.visibility,
"unlisted":post.unlisted
}
return json_dict
except:
return None
def FollowerFinalJSON(follower_list):
"""
Converts Followe object into a JSON-compatible dictionary.
Returns an empty list on failure.
"""
if not follower_list:
json_dict = {
"type":"followers",
"items": []
}
return json
try:
json_dict = {
"type":"followers",
"items": follower_list
}
return json_dict
except:
json_dict = {
"type":"followers",
"items": []
}
return json_dict
def PostListToJSON(posts):
"""
Converts a list of Post objects into a JSON-compatible list
of Posts. Returns an empty list on failure.
"""
if not posts:
return []
try:
post_list = []
for post in posts:
test_json = PostToJSON(post)
if test_json:
post_list.append(test_json)
return post_list
except:
return []
def CommentToJSON(comment):
"""
Converts a Comment object into a JSON-compatible dictionary.
Return None on failure.
"""
if not comment:
return None
if not comment.author_json:
return None
# Used to fetch updated author representation to attach to a comment
# Was too slow in practice (heroku servers take a long time to wake up if they haven't had a request in a while), so our data can get stale now
try:
#basic_auth = GetURLBasicAuth(comment.author_url)
#response = None
#if (basic_auth):
#response = requests.get(comment.author_url, auth=basic_auth)
#else:
#response = requests.get(comment.author_url)
#author = None
#if response.ok:
#try:
#author = response.json()
#except:
#pass
author = json.loads(comment.author_json)
if (author):
json_dict = {
"type":"comment",
"author":author,
"comment":comment.comment,
"contentType":comment.content_type,
"published":str(comment.published),
"id":comment.url
}
return json_dict
else:
return None
except:
return None
def CommentListToJSON(comments):
"""
Converts a list of Comment objects into a JSON-compatible list
of Comments. Returns an empty list on failure.
"""
if not comments:
return []
try:
comment_list = []
for comment in comments:
test_json = CommentToJSON(comment)
if test_json:
comment_list.append(test_json)
return comment_list
except:
return []
def ObjectLikeToJSON(like):
"""
Converts a like on an object to JSON
"""
if not like:
return None
if not like.author_json:
return None
# Used to fetch updated author representation for likes
# this is slow, so we stopped doing it and rely on stale data for likes
try:
# basic_auth = GetURLBasicAuth(like.author_url)
# response = None
# if (basic_auth):
# response = requests.get(like.author_url, auth=basic_auth)
#else:
#response = requests.get(like.author_url)
#if response.ok:
#author = response.json()
author = json.loads(like.author_json)
if (author):
json_dict = {
"summary": "{} Likes your content".format(author["displayName"]),
"type": "Like",
"author": author,
"object": like.object_url
}
return json_dict
else:
return None
except:
return None
def ObjectLikeListToJSON(likes):
"""
Converts a list of ObjectLike objects into a JSON-compatible list
of likes. Returns an empty list on failure.
"""
if not likes:
return []
try:
likes_list = []
for like in likes:
test_json = ObjectLikeToJSON(like)
if test_json:
likes_list.append(test_json)
return likes_list
except:
return []
def PostCategoryListToStringList(categories):
"""
Converts a collection of Category objects into a JSON-compatible
list of strings. Return empty list on failure.
"""
if not categories:
return []
try:
category_list = []
for category in categories:
category_list.append(category.name)
return category_list
except:
return []
def StringListToPostCategoryList(category_list):
"""
Converts a list of strings into ORM categories. Will add
new categories to the database if they do not exist.
Return empty list on failure.
"""
if not category_list:
return []
try:
categories = []
for category in category_list:
try:
test_cat = PostCategory.objects.get(name=category)
categories.append(test_cat)
except:
test_cat = PostCategory.objects.create(name=category)
categories.append(test_cat)
return categories
except:
return []
def InboxItemToJSON(item):
"""
Converts an InboxItem object into a JSON-compatible dictionary.
Prefers to just use a json string. If `item` has something in its `link`
field, request the InboxItem's link and rely on APIs to return the
right JSONs. Recommended to just use `json_str`.
Returns a placeholder dictionary on failure.
item - an InboxItem object
"""
if not item:
return None
placeholder = {
"type":"",
"title":"Something went wrong.",
"id":"",
"source":"",
"origin":"",
"description":"There was a shared item here, but we couldn't retrieve it.",
"contentType":"text/plain",
"content":"",
"author":{},
"categories":"",
"count":0,
"size":0,
"comments":"",
"published":"",
"visibility":"PUBLIC",
"unlisted":True
}
if item.link != "" and item.json_str == "":
try:
r = requests.get(item.link)
d = r.json() # returns JSON, not Dict
return d
except Exception as e:
# Can't get the object from `link` eg. doesn't exist
print(e)
placeholder["id"] = item.link
placeholder["content"] = str(e)
return placeholder
else:
# Use json_str instead
try:
d = json.loads(item.json_str)
return d
except Exception as e:
print(e)
placeholder["content"] = str(e)
return placeholder
def FriendRequestToJson(requesting_author, requested_author):
"""
Converts a Friend Request object into a JSON-compatible dictionary.
Return None on failure.
"""
if not requesting_author:
return None
if not requested_author:
return None
try:
json_dict = {
"type":"Follow",
"summary": requesting_author['displayName'] + " wants to follow " + requested_author['displayName'],
"actor":requesting_author,
"object":requested_author,
}
return json_dict
except:
return None
def ValidateForeignPostJSON(post):
"""
Returns True if JSON conforms to the correct specs. Returns false otherwise.
"""
if "title" not in post:
return False
if "visibility" not in post:
return False
if "contentType" not in post:
return False
if "content" not in post:
return False
if "author" not in post:
return False
contentType = post["contentType"]
if(contentType != "text/plain" and contentType != "text/markdown" and
contentType != "application/base64" and contentType != "image/png;base64" and
contentType != "image/jpeg;base64"):
return False
for comment in post["comments"]:
commentContentType = comment["contentType"]
if (commentContentType != "text/plain" and commentContentType != "text/markdown"):
return False
return True
def GetURLBasicAuth(url):
"""
Gets basic auth credentials for this URL
"""
for server in ForeignServer.objects.all():
if server.host_name:
if server.host_name in url and server.username and server.password:
return HTTPBasicAuth(server.username, server.password) | 2.640625 | 3 |
miasmod_data_ui.py | DarkStarSword/miasmata-fixes | 10 | 12769283 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'miasmod_data.ui'
#
# Created: Tue Apr 29 18:40:05 2014
# by: pyside-uic 0.2.15 running on PySide 1.2.1
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_MiasmataData(object):
def setupUi(self, MiasmataData):
MiasmataData.setObjectName("MiasmataData")
MiasmataData.resize(713, 490)
self.verticalLayout_3 = QtGui.QVBoxLayout(MiasmataData)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.save = QtGui.QPushButton(MiasmataData)
self.save.setEnabled(False)
self.save.setObjectName("save")
self.horizontalLayout_2.addWidget(self.save)
self.show_diff = QtGui.QPushButton(MiasmataData)
self.show_diff.setEnabled(False)
self.show_diff.setObjectName("show_diff")
self.horizontalLayout_2.addWidget(self.show_diff)
self.lblVersion = QtGui.QLabel(MiasmataData)
self.lblVersion.setEnabled(False)
self.lblVersion.setObjectName("lblVersion")
self.horizontalLayout_2.addWidget(self.lblVersion)
self.version = QtGui.QLineEdit(MiasmataData)
self.version.setEnabled(False)
self.version.setMaximumSize(QtCore.QSize(84, 16777215))
self.version.setObjectName("version")
self.horizontalLayout_2.addWidget(self.version)
spacerItem = QtGui.QSpacerItem(20, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.verticalLayout_3.addLayout(self.horizontalLayout_2)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.treeView = QtGui.QTreeView(MiasmataData)
self.treeView.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.treeView.setAlternatingRowColors(True)
self.treeView.setRootIsDecorated(False)
self.treeView.setUniformRowHeights(True)
self.treeView.setAllColumnsShowFocus(True)
self.treeView.setObjectName("treeView")
self.verticalLayout.addWidget(self.treeView)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label_4 = QtGui.QLabel(MiasmataData)
self.label_4.setObjectName("label_4")
self.horizontalLayout_3.addWidget(self.label_4)
self.search = QtGui.QLineEdit(MiasmataData)
self.search.setObjectName("search")
self.horizontalLayout_3.addWidget(self.search)
self.clear_search = QtGui.QPushButton(MiasmataData)
self.clear_search.setObjectName("clear_search")
self.horizontalLayout_3.addWidget(self.clear_search)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.horizontalLayout.addLayout(self.verticalLayout)
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.formLayout = QtGui.QFormLayout()
self.formLayout.setObjectName("formLayout")
self.label = QtGui.QLabel(MiasmataData)
self.label.setObjectName("label")
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.label)
self.name = QtGui.QLineEdit(MiasmataData)
self.name.setReadOnly(True)
self.name.setObjectName("name")
self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.name)
self.type = QtGui.QComboBox(MiasmataData)
self.type.setEnabled(False)
self.type.setObjectName("type")
self.formLayout.setWidget(2, QtGui.QFormLayout.FieldRole, self.type)
self.label_2 = QtGui.QLabel(MiasmataData)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_2)
self.label_3 = QtGui.QLabel(MiasmataData)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_3)
self.value_line = QtGui.QLineEdit(MiasmataData)
self.value_line.setObjectName("value_line")
self.formLayout.setWidget(3, QtGui.QFormLayout.FieldRole, self.value_line)
self.verticalLayout_2.addLayout(self.formLayout)
spacerItem1 = QtGui.QSpacerItem(20, 0, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem1)
self.value_list = QtGui.QListView(MiasmataData)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(100)
sizePolicy.setHeightForWidth(self.value_list.sizePolicy().hasHeightForWidth())
self.value_list.setSizePolicy(sizePolicy)
self.value_list.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.value_list.setAlternatingRowColors(True)
self.value_list.setUniformItemSizes(True)
self.value_list.setObjectName("value_list")
self.verticalLayout_2.addWidget(self.value_list)
self.value_hex = QtGui.QPlainTextEdit(MiasmataData)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(100)
sizePolicy.setHeightForWidth(self.value_hex.sizePolicy().hasHeightForWidth())
self.value_hex.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Courier New")
font.setWeight(75)
font.setBold(True)
self.value_hex.setFont(font)
self.value_hex.setReadOnly(True)
self.value_hex.setObjectName("value_hex")
self.verticalLayout_2.addWidget(self.value_hex)
self.gridLayout_2 = QtGui.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem2, 2, 1, 1, 1)
self.new_key = QtGui.QPushButton(MiasmataData)
self.new_key.setEnabled(False)
self.new_key.setObjectName("new_key")
self.gridLayout_2.addWidget(self.new_key, 1, 0, 1, 1)
self.delete_node = QtGui.QPushButton(MiasmataData)
self.delete_node.setEnabled(False)
self.delete_node.setObjectName("delete_node")
self.gridLayout_2.addWidget(self.delete_node, 2, 2, 1, 1)
self.new_value = QtGui.QPushButton(MiasmataData)
self.new_value.setEnabled(False)
self.new_value.setObjectName("new_value")
self.gridLayout_2.addWidget(self.new_value, 2, 0, 1, 1)
self.undo = QtGui.QPushButton(MiasmataData)
self.undo.setEnabled(False)
self.undo.setObjectName("undo")
self.gridLayout_2.addWidget(self.undo, 1, 2, 1, 1)
self.verticalLayout_2.addLayout(self.gridLayout_2)
self.horizontalLayout.addLayout(self.verticalLayout_2)
self.horizontalLayout.setStretch(0, 3)
self.horizontalLayout.setStretch(1, 2)
self.verticalLayout_3.addLayout(self.horizontalLayout)
self.actionNew_Key = QtGui.QAction(MiasmataData)
self.actionNew_Key.setObjectName("actionNew_Key")
self.actionNew_Value = QtGui.QAction(MiasmataData)
self.actionNew_Value.setObjectName("actionNew_Value")
self.actionUndo_Changes = QtGui.QAction(MiasmataData)
self.actionUndo_Changes.setObjectName("actionUndo_Changes")
self.actionDelete = QtGui.QAction(MiasmataData)
self.actionDelete.setObjectName("actionDelete")
self.actionInsert_Row = QtGui.QAction(MiasmataData)
self.actionInsert_Row.setObjectName("actionInsert_Row")
self.actionRemove_Row = QtGui.QAction(MiasmataData)
self.actionRemove_Row.setObjectName("actionRemove_Row")
self.lblVersion.setBuddy(self.version)
self.label_4.setBuddy(self.search)
self.label.setBuddy(self.name)
self.label_2.setBuddy(self.type)
self.label_3.setBuddy(self.value_line)
self.retranslateUi(MiasmataData)
QtCore.QObject.connect(self.actionNew_Key, QtCore.SIGNAL("triggered()"), MiasmataData.insert_key)
QtCore.QObject.connect(self.actionNew_Value, QtCore.SIGNAL("triggered()"), MiasmataData.insert_value)
QtCore.QObject.connect(self.new_key, QtCore.SIGNAL("clicked()"), MiasmataData.insert_key)
QtCore.QObject.connect(self.new_value, QtCore.SIGNAL("clicked()"), MiasmataData.insert_value)
QtCore.QObject.connect(self.delete_node, QtCore.SIGNAL("clicked()"), MiasmataData.delete_node)
QtCore.QObject.connect(self.undo, QtCore.SIGNAL("clicked()"), MiasmataData.undo)
QtCore.QObject.connect(self.actionUndo_Changes, QtCore.SIGNAL("triggered()"), MiasmataData.undo)
QtCore.QObject.connect(self.actionDelete, QtCore.SIGNAL("triggered()"), MiasmataData.delete_node)
QtCore.QObject.connect(self.clear_search, QtCore.SIGNAL("clicked()"), self.search.clear)
QtCore.QMetaObject.connectSlotsByName(MiasmataData)
MiasmataData.setTabOrder(self.treeView, self.search)
MiasmataData.setTabOrder(self.search, self.clear_search)
MiasmataData.setTabOrder(self.clear_search, self.name)
MiasmataData.setTabOrder(self.name, self.type)
MiasmataData.setTabOrder(self.type, self.value_line)
MiasmataData.setTabOrder(self.value_line, self.value_list)
MiasmataData.setTabOrder(self.value_list, self.value_hex)
MiasmataData.setTabOrder(self.value_hex, self.new_key)
MiasmataData.setTabOrder(self.new_key, self.new_value)
MiasmataData.setTabOrder(self.new_value, self.undo)
MiasmataData.setTabOrder(self.undo, self.delete_node)
MiasmataData.setTabOrder(self.delete_node, self.save)
MiasmataData.setTabOrder(self.save, self.show_diff)
MiasmataData.setTabOrder(self.show_diff, self.version)
def retranslateUi(self, MiasmataData):
self.save.setText(QtGui.QApplication.translate("MiasmataData", "&Save...", None, QtGui.QApplication.UnicodeUTF8))
self.show_diff.setText(QtGui.QApplication.translate("MiasmataData", "Show &mod changes...", None, QtGui.QApplication.UnicodeUTF8))
self.lblVersion.setText(QtGui.QApplication.translate("MiasmataData", "&Version:", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("MiasmataData", "&Search:", None, QtGui.QApplication.UnicodeUTF8))
self.clear_search.setText(QtGui.QApplication.translate("MiasmataData", "&Clear", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("MiasmataData", "&Name:", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("MiasmataData", "&Type:", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("MiasmataData", "&Value:", None, QtGui.QApplication.UnicodeUTF8))
self.new_key.setText(QtGui.QApplication.translate("MiasmataData", "New &Key", None, QtGui.QApplication.UnicodeUTF8))
self.delete_node.setText(QtGui.QApplication.translate("MiasmataData", "&Delete Node...", None, QtGui.QApplication.UnicodeUTF8))
self.new_value.setText(QtGui.QApplication.translate("MiasmataData", "New V&alue", None, QtGui.QApplication.UnicodeUTF8))
self.undo.setText(QtGui.QApplication.translate("MiasmataData", "&Undo Changes to Node", None, QtGui.QApplication.UnicodeUTF8))
self.actionNew_Key.setText(QtGui.QApplication.translate("MiasmataData", "New Key", None, QtGui.QApplication.UnicodeUTF8))
self.actionNew_Value.setText(QtGui.QApplication.translate("MiasmataData", "New Value", None, QtGui.QApplication.UnicodeUTF8))
self.actionUndo_Changes.setText(QtGui.QApplication.translate("MiasmataData", "Undo Changes", None, QtGui.QApplication.UnicodeUTF8))
self.actionDelete.setText(QtGui.QApplication.translate("MiasmataData", "Delete", None, QtGui.QApplication.UnicodeUTF8))
self.actionInsert_Row.setText(QtGui.QApplication.translate("MiasmataData", "Insert Row", None, QtGui.QApplication.UnicodeUTF8))
self.actionRemove_Row.setText(QtGui.QApplication.translate("MiasmataData", "Remove Row", None, QtGui.QApplication.UnicodeUTF8))
| 1.6875 | 2 |
set_up_grasp_models/check_models/mass_balance_checks.py | martamatos/set_up_grasp_models | 0 | 12769284 | <gh_stars>0
def check_flux_balance(data_dict: dict) -> bool:
"""
When all fluxes are specified in the measRates sheet, check if all metabolites are mass balanced (well, the ones
that are marked as balanced in the mets sheet).
If everything is fine flag is 0, otherwise it is set to 1.
Args:
data_dict: a dictionary that represents the excel file with the GRASP model.
Returns:
Whether or not all metabolites mass is balanced.
"""
print('\nChecking if the fluxes for each metabolite production/consumptions add up to zero.\n')
flag = False
flux_df = data_dict['measRates']
mets_df = data_dict['mets']
stoic_df = data_dict['stoic']
if len(stoic_df.index) == len(flux_df.index):
met_in_rxns = dict()
for col in stoic_df.columns:
rxn_list = stoic_df.loc[stoic_df[col].ne(0), col]
met_in_rxns[col] = rxn_list.to_dict()
balanced_mets = set(mets_df.loc[mets_df['balanced?'].eq(1), 'balanced?'].index.values)
mean_col = flux_df.columns[0]
for met in met_in_rxns.keys():
flux_balance = sum([met_in_rxns[met][key] * flux_df.loc[key, mean_col] for key in met_in_rxns[met].keys()])
if abs(flux_balance) > 10**-8 and met in balanced_mets:
print(f'The flux for {met} is not balanced. The difference in flux is {flux_balance}')
flag = True
elif flux_balance == 0 and met not in balanced_mets:
print(f'{met} should be in balanced mets')
flag = True
if flag is False:
print('Everything seems to be OK.')
else:
print('Not all fluxes are specified in measRates.\n')
return flag
def check_balanced_metabolites(data_dict: dict) -> bool:
"""
Checks if metabolites that are both consumed and produced in the stoichiometric matrix are marked as balanced and
the other way around. Checking for mass balances is more accurate though.
If everything is fine flag is 0, otherwise it is set to 1.
Args:
data_dict: a dictionary that represents the excel file with the GRASP model.
Returns:
Whether or not metabolites are marked balanced/fixed correctly.
"""
print('\nChecking if metabolites are both consumed and produced in the stoichiometric matrix, and if',
'so checks if they are marked as balanced in the mets sheet. However, the metabolite might be',
'balanced/not balanced anyways depending on the flux of the reactions that consume/produce it,',
'so take this with a grain of salt.\n')
flag = False
stoic_df = data_dict['stoic']
mets_df = data_dict['mets']
for i, met in enumerate(stoic_df.columns):
if stoic_df[met].gt(0).any() and stoic_df[met].lt(0).any():
if mets_df['balanced?'][i] == 0:
print(f'{met} is marked as not balanced but it seems to be balanced.')
flag = True
else:
if mets_df['balanced?'][i] == 1:
print(f'{met} is marked as balanced but it does not seem to be balanced.')
flag = True
if flag is False:
print('Everything seems to be OK.')
return flag | 2.9375 | 3 |
tests/test_features.py | ShayestehHS/jw_nx | 0 | 12769285 | import datetime
from datetime import timedelta
from tests.base import *
from jw_nx.tokens import AccessToken, RefreshToken
from jw_nx.settings import api_settings
class TestFeatures(BaseTest):
def test_invalid_algorithm(self):
""" Test setting invalid algorithm is raising error """
ac, re = self.login()
default_value = api_settings.JW_NX_ALGORITHM
api_settings.JW_NX_ALGORITHM = 'invalid'
response = self.with_token(ac).client.post(verify_url)
api_settings.JW_NX_ALGORITHM = default_value
self.assertIn('Unrecognized algorithm type', str(response.data['detail']))
self.assertEqual(response.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)
def test_verify_with_expired_access_token_and_leeway(self):
""" Test that expired token is valid in leeway time """
ac, re = self.login()
# Update expiration time
access = AccessToken()
access.payload = access.decode(ac)
access.payload['exp'] -= 24 * 60 * 60 # On day ago
api_settings.JW_NX_LEEWAY = timedelta(days=1, seconds=10)
with self.assertNumQueries(1):
response = self.with_token(str(access)).client.post(verify_url)
api_settings.JW_NX_LEEWAY = 0
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_return_expiration(self):
""" Test that setting JW_NX_RETURN_EXPIRATION = True, login endpoint is return expiration """
user = self.create_test_user()
payload = {'username': user.username, 'password': self.password}
api_settings.JW_NX_RETURN_EXPIRATION = True
response = self.client.post(login_url, data=payload, format='json')
api_settings.JW_NX_RETURN_EXPIRATION = False
check_response = self.client.post(login_url, data=payload, format='json')
self.assertIn('access_token_expiration', response.data)
self.assertIn('refresh_token_expiration', response.data)
self.assertIn('access_token', response.data)
self.assertIn('refresh_token', response.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
####### check_response assertion
self.assertNotIn('access_token_expiration', check_response.data)
self.assertNotIn('refresh_token_expiration', check_response.data)
self.assertIn('access_token', check_response.data)
self.assertIn('refresh_token', check_response.data)
self.assertEqual(check_response.status_code, status.HTTP_200_OK)
def test_last_login(self):
""" Test that setting JW_NX_UPDATE_LAST_LOGIN = True, after logging user in, last_update of user is update """
user = self.create_test_user()
before_login_time = user.last_login or datetime.datetime.now()
payload = {'username': user.username, 'password': <PASSWORD>}
api_settings.JW_NX_UPDATE_LAST_LOGIN = True
response = self.client.post(login_url, data=payload, format='json')
api_settings.JW_NX_UPDATE_LAST_LOGIN = False
user.refresh_from_db()
self.assertNotEqual(before_login_time, user.last_login)
self.assertGreater(user.last_login, before_login_time)
def test_refresh_expiration(self):
""" Test that refresh_token expiration time, is equal to `JW_NX_REFRESH_TOKEN_LIFETIME` """
user = self.create_test_user()
payload = {'username': user.username, 'password': <PASSWORD>}
default_value = api_settings.JW_NX_REFRESH_TOKEN_LIFETIME
api_settings.JW_NX_REFRESH_TOKEN_LIFETIME = timedelta(seconds=100)
response = self.client.post(login_url, data=payload, format='json')
api_settings.JW_NX_REFRESH_TOKEN_LIFETIME = default_value
refresh = RefreshToken()
payload = refresh.decode(response.data['refresh_token'])
expiration_second = payload['exp'] - payload['iat']
self.assertEqual(expiration_second, 100)
def test_access_expiration(self):
""" Test that access_token expiration time, is equal to `JW_NX_ACCESS_TOKEN_LIFETIME` """
user = self.create_test_user()
payload = {'username': user.username, 'password': <PASSWORD>}
default_value = api_settings.JW_NX_REFRESH_TOKEN_LIFETIME
api_settings.JW_NX_ACCESS_TOKEN_LIFETIME = timedelta(minutes=0.5)
response = self.client.post(login_url, data=payload, format='json')
api_settings.JW_NX_REFRESH_TOKEN_LIFETIME = default_value
access = AccessToken()
refresh_payload = access.decode(response.data['refresh_token'])
access_payload = access.decode(response.data['access_token'])
expiration_second = access_payload['exp'] - refresh_payload['iat']
self.assertEqual(expiration_second, timedelta(minutes=0.5).total_seconds())
| 2.59375 | 3 |
urlUpdated.py | GrissomE/urlUpdated | 0 | 12769286 | <gh_stars>0
import time, os, hashlib, json, re, requests
from bs4 import BeautifulSoup
from twilio.rest import Client
from dotenv import load_dotenv
def log(text):
print(f'{time.strftime("%Y %m %d - %H:%M:%S")}: {text}')
def error(text, exception):
log(f'{text} while {exception.with_traceback}')
def load_json(filename):
try:
file_abs_path = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(file_abs_path, filename), encoding='utf-8') as file:
return json.load(file)
except Exception as e:
error(f'Failed to load file {filename}', e)
def write_json(filename, contents):
try:
file_abs_path = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(file_abs_path, filename), 'w') as file:
json.dump(contents, file)
except Exception as e:
error(f'Failed to write file {filename}', e)
def send_sms(text):
load_dotenv()
account_sid = os.environ["SID"]
auth_token = os.environ["Key"]
from_num = os.environ["From"]
dest = os.environ["To"]
try:
client = Client(account_sid, auth_token)
message = client.messages.create(
to=dest,
from_=from_num,
body=text)
log(f'Notified {message.sid}')
except Exception as e:
error(f'Failed sending SMS', e)
def parse_page(page):
soup = BeautifulSoup(page, 'html.parser')
return soup.getText()
def get_page(url):
#Chrome User Agent
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36'}
page = requests.get(url, headers=headers)
bs = parse_page(page.text)
return bs.encode('utf-8')
def init_url(url, url_hash, hashes):
response = get_page(url)
page_hash = hashlib.sha256(response).hexdigest()
return update_hash(url_hash, page_hash, hashes)
def update_hash(url_hash, page_hash, hashes):
hashes[url_hash] = page_hash
return True
def check_url(url, url_hash, hashes):
try:
response = get_page(url)
page_hash = hashlib.sha256(response).hexdigest()
if page_hash != hashes[url_hash]:
domain_name = (re.search(r'://([A-Za-z_0-9.-]+).*', url))
message = f'{domain_name.group(1)} changed: \n\n{url}'
send_sms(message)
return update_hash(url_hash, page_hash, hashes)
else: return False
except Exception as e:
error(f'Failed checking URL: {url}', e)
if __name__ == "__main__":
hashes = load_json('hashes.json')
urls = load_json('urls.json')
updated_hash = False
for url in urls:
url_hash = hashlib.sha256(url.encode('utf-8')).hexdigest()
if url_hash in hashes:
updated_hash |= check_url(url, url_hash, hashes)
else:
updated_hash |= init_url(url, url_hash, hashes)
if updated_hash:
write_json('hashes.json', hashes) | 2.375 | 2 |
module03/solution.py | GNKirov/kiwitcmsgnkirov | 1 | 12769287 | def sum_of_digits(number):
"""
Calculating sum of digits of number
"""
sum = 0
number = abs(number)
while number > 0:
sum += number % 10
number = number // 10
return sum
def to_digits(number):
"""
Calculating the digits of the number
"""
digits = []
while number > 0:
digits.append(number % 10)
number = number // 10
digits.reverse()
return digits
def to_number(digits):
"""
Returning number containing digits
"""
number = 0
digits.reverse()
for digit_index in range(0, len(digits)):
number += digits[digit_index] * 10 ** digit_index
return number
def count_vowels(string):
"""
Calculating vowels
"""
vowels = "aeiouy"
return calculate_char_occurrences_in_str(vowels, string)
def count_consonants(string):
"""
Calculating consonants
"""
consonants = "bcdfghjklmnpqrstvwxz"
return calculate_char_occurrences_in_str(consonants, string)
def calculate_char_occurrences_in_str(characters, string):
"""
Calculating the occurrences
"""
occurrences = 0
string = string.casefold()
for character in characters:
occurrences += string.count(character)
return occurrences
def prime_number(number):
"""
Checks if a number is prime Checking for prime numbers
"""
for i in range(2, number - 1):
if number % i == 0:
return False
return number >= 1
def fact_digits(number):
"""
Calculating the sum of the factorials
"""
sum_fact = 0
for digit in to_digits(number):
sum_fact += fact(digit)
return sum_fact
def fact(number):
"""
Calculating the factorial of a number
"""
result = 1
for number in range(1, number + 1):
result *= number
return result
def fibonacci(elements_num):
"""
Calculating fibonacci sequence
"""
sequence = []
for i in range(1, elements_num + 1):
if i <= 2:
sequence.append(1)
continue
next_num = sequence[-2] + sequence[-1]
sequence.append(next_num)
return sequence
def fib_number(elements_num):
"""
Calculating fibonacci sequence
"""
string = ""
for number in fibonacci(elements_num):
string += str(number)
return int(string)
def palindrome(input):
"""
Checking for palindrome
"""
input = str(input)
return input[::-1] == input
def char_histogram(string):
"""
Map with characters
"""
result = {}
for char in string:
result[char] = string.count(char)
return result | 4.3125 | 4 |
ibmsecurity/isam/base/network/felb/attributes/log.py | ibm-enio/ibmsecurity | 2 | 12769288 | import logging
logger = logging.getLogger(__name__)
module_uri = "/isam/felb/configuration/logging"
requires_modules = None
requires_versions = None
def get(isamAppliance):
"""
Retrieves logging configuration attributes
"""
return isamAppliance.invoke_get("Retrieving logging configuration", module_uri)
def update(isamAppliance, local, remote_address, remote_port, remote_facility, check_mode=False, force=False):
"""
Updates logging configuration
"""
change_required, json_data = _check(isamAppliance, local, remote_address, remote_port, remote_facility)
if force is True or change_required is True:
return isamAppliance.invoke_put("Updating Configuration", module_uri, json_data,
requires_modules=requires_modules, requires_version=requires_versions)
if change_required is False:
return isamAppliance.create_return_object(changed=False)
def _check(isamAppliance, local, remote_address, remote_port, remote_facility):
"""
Check for idempotency
"""
check_obj = get(isamAppliance)
change_required = False
# If the configuration is local, remote entries are not used
if local is True:
json_data = {
"local": True,
"remote_address": "",
"remote_port": None,
"remote_facility": None
}
if check_obj['data']['local'] != local:
change_required = True
return change_required, json_data
else:
if check_obj['data']['remote_address'] != remote_address:
change_required = True
if check_obj['data']['remote_port'] != remote_port:
change_required = True
if check_obj['data']['remote_facility'] != remote_facility:
change_required = True
json_data = {
"local": local,
"remote_address": remote_address,
"remote_port": remote_port,
"remote_facility": remote_facility
}
return change_required, json_data
| 2.421875 | 2 |
Assingments/module03/week04/monday/sulfaroa.inclass.py | tonysulfaro/MI-250 | 0 | 12769289 | <gh_stars>0
# stuff for something
| 1.085938 | 1 |
backend/apps/mapview/utils.py | match4healthcare/match4healthcare | 2 | 12769290 | <reponame>match4healthcare/match4healthcare<gh_stars>1-10
import json
from math import asin, cos, radians, sin, sqrt
from os.path import abspath, dirname, join
current_location = dirname(abspath(__file__))
with open(join(current_location, "files/plzs_merged.json")) as f:
plzs = json.loads(f.read())
def haversine(lon1, lat1, lon2, lat2):
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
return 2 * 6371 * asin(sqrt(a))
def get_plzs_close_to(countrycode, plz, distance_in_km):
lon1, lat1, _ = plzs[countrycode][plz]
close = []
for other_plz, (lon2, lat2, ort) in plzs[countrycode].items():
dist = haversine(lon1, lat1, lon2, lat2)
if dist < distance_in_km:
close.append(other_plz)
return close
def get_plz_data(countrycode, plz):
lat, lon, ort = plzs[countrycode][plz]
return {"latitude": lat, "longitude": lon, "city": ort}
| 2.796875 | 3 |
pypy/iterator_generator_coroutine/first_of_all/3_generator_send_func.py | DowsonJones/test_test | 1 | 12769291 | <reponame>DowsonJones/test_test
def consumer():
r = ''
while True:
n = yield r # 返回的是r, 通过send函数接收的是n, 通过next和send都可以唤醒执行
# if not n:
# return
print('[CONSUMER] consuming %s' % n)
r = '200 OK'
def produce(c):
n = 0
while n < 5:
n = n + 1
print('[PRODUCE] producing %s' % n)
r = c.send(n)
print('[PRODUCE] consumer return %s' % r)
c.close()
if __name__ == '__main__':
c = consumer()
next(c) # 必须使用None启动生成器 TypeError: can't send non-None value to a just-started generator
produce(c)
| 2.984375 | 3 |
tests/args/regex/test_args.py | benwhalley/pytest-bdd | 1 | 12769292 | """Step arguments tests."""
import functools
import re
from pytest_bdd import given, parsers, scenario, then, when
import pytest
from pytest_bdd import exceptions
scenario_when = functools.partial(scenario, "../when_arguments.feature")
scenario_args = functools.partial(scenario, "../args_steps.feature")
@scenario_args("Every step takes a parameter with the same name")
def test_steps():
pass
@scenario_when("Argument in when, step 1")
def test_argument_in_when_step_1():
pass
@scenario_when("Argument in when, step 2")
def test_argument_in_when_step_2():
pass
def test_multiple_given(request):
"""Using the same given fixture raises an error."""
@scenario_args("Using the same given fixture raises an error")
def test():
pass
with pytest.raises(exceptions.GivenAlreadyUsed):
test(request)
@given(parsers.re(r"I have (?P<euro>\d+) Euro"), converters=dict(euro=int))
def i_have(euro, values):
assert euro == values.pop(0)
@when(parsers.re(r"I pay (?P<euro>\d+) Euro"), converters=dict(euro=int))
def i_pay(euro, values, request):
assert euro == values.pop(0)
@then(parsers.re(r"I should have (?P<euro>\d+) Euro"), converters=dict(euro=int))
def i_should_have(euro, values):
assert euro == values.pop(0)
@given(parsers.re(r"I have an argument (?P<arg>\d+)"))
def argument(arg):
"""I have an argument."""
return dict(arg=arg)
@when(parsers.re(r"I get argument (?P<arg>\d+)"))
def get_argument(argument, arg):
"""Getting argument."""
argument["arg"] = arg
@then(parsers.re(r"My argument should be (?P<arg>\d+)"))
def assert_that_my_argument_is_arg(argument, arg):
"""Assert that arg from when equals arg."""
assert argument["arg"] == arg
| 2.859375 | 3 |
heuristics.py | MasterOdin/Connect4-AI | 0 | 12769293 | """
AI Heuristics for various evaluation methods
"""
from __future__ import print_function
import random
__author__ = "Matthew 'MasterOdin' Peveler"
__license__ = "The MIT License (MIT)"
class Heuristic(object):
"""
This is the heuristic interface
"""
@staticmethod
def get_best_column(board):
"""
Get the best column based on some heuristic
:param board:
:return:
"""
raise NotImplementedError("Not yet implemented")
@staticmethod
def get_column_value(board, column):
"""
Get the particular value for a column based on some heuristic
:param board:
:param column:
:return:
"""
raise NotImplementedError("Not yet implemented")
class Random(Heuristic):
"""
Random heuristic. Just returns valid random column
"""
NAME = "Random"
def __init__(self):
pass
@staticmethod
def get_best_column(board):
col = -1
while not board.can_add_piece(col):
col = random.randint(0, board.COLUMNS)
return col
@staticmethod
def get_column_value(board, column):
pass
class MinMax(Heuristic):
"""
MinMax heuristic for AI agent
"""
NAME = "MinMax"
def __init__(self):
pass
@staticmethod
def get_best_column(board):
pass
@staticmethod
def get_column_value(board, column):
pass
| 3.515625 | 4 |
server/axeshome/util.py | kevinmcguinness/axes-home | 2 | 12769294 | #
# (c) Copyright 2015 <NAME>. All Rights Reserved.
#
"""
Various utilities.
"""
from flask.ext.restful import abort
from bson.objectid import ObjectId
from base64 import b64decode
from werkzeug.routing import BaseConverter
class AxesURIConverter(BaseConverter):
"""
Flask routing converter for AXES URIs of the form::
axes:/path
For example:
axes:/cAXES/v20080512_12...e_clips_investigates/s000000120
The converter strips off the leading 'axes:' part to return the path
"""
regex = r'axes:.*?'
def to_python(self, value):
return value[5:]
supported_mimetypes = {
'image/png': '.png',
'image/jpeg': '.jpg',
'image/bmp': '.bmp',
'image/gif': '.gif',
}
def find_or_404(collection, objectid):
try:
objectid = ObjectId(objectid)
except:
abort(404, message="invalid object id")
item = collection.find_one(objectid)
if not item:
error = "resource with id {} does not exist".format(str(objectid))
abort(404, message=error)
return item
def clause_type(text):
try:
type, value = text.split(':', 1)
except:
raise ValueError('Parse error')
# prepend hash if necessary
if not type.startswith('#'):
type = '#' + type
return { 'type': type, 'text': value }
def parse_data_url(data_url):
"""
Parse a data url into a tuple of params and the encoded data.
E.g.
>>> data_url = "data:image/png;base64,ABC123xxx"
>>> params, encoded_data = parse_data_url(data_url)
>>> params
('image/png', 'base64')
>>> data
'ABC123xxx'
"""
# e.g. data:image/png;base64,xxx..
if not data_url.startswith('data:'):
raise ValueError('not a data url')
data_url = data_url[5:]
params, data = data_url.split(',')
params = params.split(';')
return params, data
def get_image_data_and_extension_from_data_url(data_url):
"""
Parse image data encoded in a data URL and return the decoded (raw) data
and an appropriate file extension to use.
"""
params, data = parse_data_url(data_url)
if len(params) < 2:
raise ValueError('invalid data url: not enough params')
mimetype = params[0]
encoding = params[-1]
if encoding != 'base64':
raise ValueError('Unsupported encoding: {}'.format(encoding))
if mimetype not in supported_mimetypes:
raise ValueError('Unsupported mimetype: {}'.format(mimetype))
data = b64decode(data)
extension = supported_mimetypes[mimetype]
return data, extension
| 2.46875 | 2 |
Libs/priors.py | VahidHeidari/StrBEAM | 0 | 12769295 |
NUM_ALLELES = 3
ALLELE_PRIOR = 1.0 / NUM_ALLELES
ALPHA = 1.5
MINUS_INFINITE = -1000000000
mBound = 6
jA = 10.0
jB = 0.5
pA = 0.1
pD = 0.1
pC = 1.0 - pD
pI = 0.1
if __name__ == '__main__':
print('This is a configuration file!')
| 1.679688 | 2 |
lesson_7/ultralight.py | windn19/Python_Developer | 1 | 12769296 | <gh_stars>1-10
from docx.shared import Cm # импорт из библиотеки класса для обработки изображений
from docxtpl import DocxTemplate, InlineImage
# импорт из библиотеки классов генерации doc документов из шаблонов и включения в них изображений
def get_context(result_sku_list):
"""
возвращает словарь значений, которые будут отправлены в шаблон
result_sku_list: данные для отправки в документ
"""
return {
'brand': result_sku_list[0],
'model': result_sku_list[1],
'sup': result_sku_list[2],
'price': result_sku_list[3]
}
def from_template(result_sku_list, template, signature):
"""
Создание документа и включение в него рисунка
result_sku_list: данные для передачи в документ
template: файл шаблона doc-документа
signature: файл рисунка для документа
"""
template = DocxTemplate(template) # создание шаблона документа
context = get_context(result_sku_list) # получение данных в виде словаря
img_size = Cm(8) # преобразование изображения к нужному размеру
acc = InlineImage(template, signature, img_size) # создания объекта класса для вставки в документ
context['acc'] = acc # добавление объекта с рисунком в словарь данных
template.render(context) # создание шаблона на основе словаря данных
template.save('rep12.docx') # сохранение его в файле.
def generate_report(result_sku_list):
"""
Создание doc-документа для данных
result_sku_list: данные для передачи в документ
"""
template = 'repor1.docx' # шаблон документа
signature = 'IiGd2vv6-Cc.jpg' # изображение для вставки в шаблон
from_template(result_sku_list, template, signature) # вызов метода для генератора документа
with open('exp.txt', mode='r') as f: # открытие файла для чтения данных
data = f.read() # передача всех данных из файла - как строку.
generate_report(data.split(';'))
# вызов метода для создания документа, разделяя полученные данные по ";" и превращая их в список
| 2.234375 | 2 |
stonesoup/updater/tests/test_ensemble.py | 0sm1um/Stone-Soup | 1 | 12769297 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""Test for updater.ensemble module"""
import numpy as np
import datetime
from stonesoup.models.measurement.linear import LinearGaussian
from stonesoup.types.detection import Detection
from stonesoup.types.hypothesis import SingleHypothesis
from stonesoup.types.prediction import EnsembleStatePrediction
from stonesoup.types.state import EnsembleState
from stonesoup.updater.ensemble import EnsembleUpdater
def test_ensemble():
# Initialize variables
measurement_model = LinearGaussian(ndim_state=2, mapping=[0],
noise_covar=np.array([[0.04]]))
timestamp = datetime.datetime(2021, 3, 5, 22, 3, 17)
num_vectors = 100
test_ensemble = EnsembleState.generate_ensemble(
np.array([[-6.45], [0.7]]),
np.array([[4.1123, 0.0013],
[0.0013, 0.0365]]), num_vectors)
# Create Prediction, Measurement, and Updater
prediction = EnsembleStatePrediction(test_ensemble,
timestamp=timestamp)
measurement = Detection(np.array([[-6.23]]), timestamp)
updater = EnsembleUpdater(measurement_model)
# Construct hypothesis
hypothesis = SingleHypothesis(prediction=prediction,
measurement=measurement)
# Run updater
updated_state = updater.update(hypothesis)
assert updated_state.timestamp == timestamp
assert updated_state.hypothesis.prediction == prediction
assert updated_state.hypothesis.measurement == measurement
assert updated_state.ndim == updated_state.hypothesis.prediction.ndim
assert updated_state.num_vectors == \
updated_state.hypothesis.prediction.num_vectors
assert np.allclose(updated_state.sqrt_covar @ updated_state.sqrt_covar.T,
updated_state.covar)
# Test updater runs with measurement prediction already in hypothesis.
test_measurement_prediction = updater.predict_measurement(prediction)
hypothesis = SingleHypothesis(prediction=prediction,
measurement=measurement,
measurement_prediction=test_measurement_prediction)
updated_state = updater.update(hypothesis)
assert updated_state.timestamp == timestamp
assert updated_state.hypothesis.prediction == prediction
assert updated_state.hypothesis.measurement == measurement
assert updated_state.ndim == updated_state.hypothesis.prediction.ndim
assert updated_state.num_vectors == \
updated_state.hypothesis.prediction.num_vectors
assert np.allclose(updated_state.sqrt_covar @ updated_state.sqrt_covar.T,
updated_state.covar)
| 2.265625 | 2 |
sdk/python/pulumi_oci/blockchain/get_blockchain_platform.py | EladGabay/pulumi-oci | 5 | 12769298 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetBlockchainPlatformResult',
'AwaitableGetBlockchainPlatformResult',
'get_blockchain_platform',
]
@pulumi.output_type
class GetBlockchainPlatformResult:
"""
A collection of values returned by getBlockchainPlatform.
"""
def __init__(__self__, blockchain_platform_id=None, ca_cert_archive_text=None, compartment_id=None, component_details=None, compute_shape=None, defined_tags=None, description=None, display_name=None, federated_user_id=None, freeform_tags=None, host_ocpu_utilization_infos=None, id=None, idcs_access_token=None, is_byol=None, is_multi_ad=None, lifecycle_details=None, load_balancer_shape=None, platform_role=None, platform_shape_type=None, replicas=None, service_endpoint=None, service_version=None, state=None, storage_size_in_tbs=None, storage_used_in_tbs=None, time_created=None, time_updated=None, total_ocpu_capacity=None):
if blockchain_platform_id and not isinstance(blockchain_platform_id, str):
raise TypeError("Expected argument 'blockchain_platform_id' to be a str")
pulumi.set(__self__, "blockchain_platform_id", blockchain_platform_id)
if ca_cert_archive_text and not isinstance(ca_cert_archive_text, str):
raise TypeError("Expected argument 'ca_cert_archive_text' to be a str")
pulumi.set(__self__, "ca_cert_archive_text", ca_cert_archive_text)
if compartment_id and not isinstance(compartment_id, str):
raise TypeError("Expected argument 'compartment_id' to be a str")
pulumi.set(__self__, "compartment_id", compartment_id)
if component_details and not isinstance(component_details, dict):
raise TypeError("Expected argument 'component_details' to be a dict")
pulumi.set(__self__, "component_details", component_details)
if compute_shape and not isinstance(compute_shape, str):
raise TypeError("Expected argument 'compute_shape' to be a str")
pulumi.set(__self__, "compute_shape", compute_shape)
if defined_tags and not isinstance(defined_tags, dict):
raise TypeError("Expected argument 'defined_tags' to be a dict")
pulumi.set(__self__, "defined_tags", defined_tags)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if federated_user_id and not isinstance(federated_user_id, str):
raise TypeError("Expected argument 'federated_user_id' to be a str")
pulumi.set(__self__, "federated_user_id", federated_user_id)
if freeform_tags and not isinstance(freeform_tags, dict):
raise TypeError("Expected argument 'freeform_tags' to be a dict")
pulumi.set(__self__, "freeform_tags", freeform_tags)
if host_ocpu_utilization_infos and not isinstance(host_ocpu_utilization_infos, list):
raise TypeError("Expected argument 'host_ocpu_utilization_infos' to be a list")
pulumi.set(__self__, "host_ocpu_utilization_infos", host_ocpu_utilization_infos)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if idcs_access_token and not isinstance(idcs_access_token, str):
raise TypeError("Expected argument 'idcs_access_token' to be a str")
pulumi.set(__self__, "idcs_access_token", idcs_access_token)
if is_byol and not isinstance(is_byol, bool):
raise TypeError("Expected argument 'is_byol' to be a bool")
pulumi.set(__self__, "is_byol", is_byol)
if is_multi_ad and not isinstance(is_multi_ad, bool):
raise TypeError("Expected argument 'is_multi_ad' to be a bool")
pulumi.set(__self__, "is_multi_ad", is_multi_ad)
if lifecycle_details and not isinstance(lifecycle_details, str):
raise TypeError("Expected argument 'lifecycle_details' to be a str")
pulumi.set(__self__, "lifecycle_details", lifecycle_details)
if load_balancer_shape and not isinstance(load_balancer_shape, str):
raise TypeError("Expected argument 'load_balancer_shape' to be a str")
pulumi.set(__self__, "load_balancer_shape", load_balancer_shape)
if platform_role and not isinstance(platform_role, str):
raise TypeError("Expected argument 'platform_role' to be a str")
pulumi.set(__self__, "platform_role", platform_role)
if platform_shape_type and not isinstance(platform_shape_type, str):
raise TypeError("Expected argument 'platform_shape_type' to be a str")
pulumi.set(__self__, "platform_shape_type", platform_shape_type)
if replicas and not isinstance(replicas, dict):
raise TypeError("Expected argument 'replicas' to be a dict")
pulumi.set(__self__, "replicas", replicas)
if service_endpoint and not isinstance(service_endpoint, str):
raise TypeError("Expected argument 'service_endpoint' to be a str")
pulumi.set(__self__, "service_endpoint", service_endpoint)
if service_version and not isinstance(service_version, str):
raise TypeError("Expected argument 'service_version' to be a str")
pulumi.set(__self__, "service_version", service_version)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if storage_size_in_tbs and not isinstance(storage_size_in_tbs, float):
raise TypeError("Expected argument 'storage_size_in_tbs' to be a float")
pulumi.set(__self__, "storage_size_in_tbs", storage_size_in_tbs)
if storage_used_in_tbs and not isinstance(storage_used_in_tbs, float):
raise TypeError("Expected argument 'storage_used_in_tbs' to be a float")
pulumi.set(__self__, "storage_used_in_tbs", storage_used_in_tbs)
if time_created and not isinstance(time_created, str):
raise TypeError("Expected argument 'time_created' to be a str")
pulumi.set(__self__, "time_created", time_created)
if time_updated and not isinstance(time_updated, str):
raise TypeError("Expected argument 'time_updated' to be a str")
pulumi.set(__self__, "time_updated", time_updated)
if total_ocpu_capacity and not isinstance(total_ocpu_capacity, int):
raise TypeError("Expected argument 'total_ocpu_capacity' to be a int")
pulumi.set(__self__, "total_ocpu_capacity", total_ocpu_capacity)
@property
@pulumi.getter(name="blockchainPlatformId")
def blockchain_platform_id(self) -> str:
return pulumi.get(self, "blockchain_platform_id")
@property
@pulumi.getter(name="caCertArchiveText")
def ca_cert_archive_text(self) -> str:
return pulumi.get(self, "ca_cert_archive_text")
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> str:
"""
Compartment Identifier
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="componentDetails")
def component_details(self) -> 'outputs.GetBlockchainPlatformComponentDetailsResult':
"""
Blockchain Platform component details.
"""
return pulumi.get(self, "component_details")
@property
@pulumi.getter(name="computeShape")
def compute_shape(self) -> str:
"""
Compute shape - STANDARD or ENTERPRISE_SMALL or ENTERPRISE_MEDIUM or ENTERPRISE_LARGE or ENTERPRISE_EXTRA_LARGE or ENTERPRISE_CUSTOM
"""
return pulumi.get(self, "compute_shape")
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Mapping[str, Any]:
"""
Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace.bar-key": "value"}`
"""
return pulumi.get(self, "defined_tags")
@property
@pulumi.getter
def description(self) -> str:
"""
Platform Instance Description
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
Platform Instance Display name, can be renamed
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="federatedUserId")
def federated_user_id(self) -> str:
return pulumi.get(self, "federated_user_id")
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Mapping[str, Any]:
"""
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
"""
return pulumi.get(self, "freeform_tags")
@property
@pulumi.getter(name="hostOcpuUtilizationInfos")
def host_ocpu_utilization_infos(self) -> Sequence['outputs.GetBlockchainPlatformHostOcpuUtilizationInfoResult']:
"""
List of OcpuUtilization for all hosts
"""
return pulumi.get(self, "host_ocpu_utilization_infos")
@property
@pulumi.getter
def id(self) -> str:
"""
unique identifier that is immutable on creation
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="idcsAccessToken")
def idcs_access_token(self) -> str:
return pulumi.get(self, "idcs_access_token")
@property
@pulumi.getter(name="isByol")
def is_byol(self) -> bool:
"""
Bring your own license
"""
return pulumi.get(self, "is_byol")
@property
@pulumi.getter(name="isMultiAd")
def is_multi_ad(self) -> bool:
"""
True for multi-AD blockchain plaforms, false for single-AD
"""
return pulumi.get(self, "is_multi_ad")
@property
@pulumi.getter(name="lifecycleDetails")
def lifecycle_details(self) -> str:
"""
An message describing the current state in more detail. For example, can be used to provide actionable information for a resource in Failed state.
"""
return pulumi.get(self, "lifecycle_details")
@property
@pulumi.getter(name="loadBalancerShape")
def load_balancer_shape(self) -> str:
"""
Type of Load Balancer shape - LB_100_MBPS or LB_400_MBPS. Default is LB_100_MBPS.
"""
return pulumi.get(self, "load_balancer_shape")
@property
@pulumi.getter(name="platformRole")
def platform_role(self) -> str:
"""
Role of platform - FOUNDER or PARTICIPANT
"""
return pulumi.get(self, "platform_role")
@property
@pulumi.getter(name="platformShapeType")
def platform_shape_type(self) -> str:
"""
Type of Platform shape - DEFAULT or CUSTOM
"""
return pulumi.get(self, "platform_shape_type")
@property
@pulumi.getter
def replicas(self) -> 'outputs.GetBlockchainPlatformReplicasResult':
"""
Number of replicas of service components like Rest Proxy, CA and Console
"""
return pulumi.get(self, "replicas")
@property
@pulumi.getter(name="serviceEndpoint")
def service_endpoint(self) -> str:
"""
Service endpoint URL, valid post-provisioning
"""
return pulumi.get(self, "service_endpoint")
@property
@pulumi.getter(name="serviceVersion")
def service_version(self) -> str:
"""
The version of the Platform Instance.
"""
return pulumi.get(self, "service_version")
@property
@pulumi.getter
def state(self) -> str:
"""
The current state of the Platform Instance.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="storageSizeInTbs")
def storage_size_in_tbs(self) -> float:
"""
Storage size in TBs
"""
return pulumi.get(self, "storage_size_in_tbs")
@property
@pulumi.getter(name="storageUsedInTbs")
def storage_used_in_tbs(self) -> float:
"""
Storage used in TBs
"""
return pulumi.get(self, "storage_used_in_tbs")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
The time the the Platform Instance was created. An RFC3339 formatted datetime string
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> str:
"""
The time the Platform Instance was updated. An RFC3339 formatted datetime string
"""
return pulumi.get(self, "time_updated")
@property
@pulumi.getter(name="totalOcpuCapacity")
def total_ocpu_capacity(self) -> int:
"""
Number of total OCPUs allocated to the platform cluster
"""
return pulumi.get(self, "total_ocpu_capacity")
class AwaitableGetBlockchainPlatformResult(GetBlockchainPlatformResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetBlockchainPlatformResult(
blockchain_platform_id=self.blockchain_platform_id,
ca_cert_archive_text=self.ca_cert_archive_text,
compartment_id=self.compartment_id,
component_details=self.component_details,
compute_shape=self.compute_shape,
defined_tags=self.defined_tags,
description=self.description,
display_name=self.display_name,
federated_user_id=self.federated_user_id,
freeform_tags=self.freeform_tags,
host_ocpu_utilization_infos=self.host_ocpu_utilization_infos,
id=self.id,
idcs_access_token=self.idcs_access_token,
is_byol=self.is_byol,
is_multi_ad=self.is_multi_ad,
lifecycle_details=self.lifecycle_details,
load_balancer_shape=self.load_balancer_shape,
platform_role=self.platform_role,
platform_shape_type=self.platform_shape_type,
replicas=self.replicas,
service_endpoint=self.service_endpoint,
service_version=self.service_version,
state=self.state,
storage_size_in_tbs=self.storage_size_in_tbs,
storage_used_in_tbs=self.storage_used_in_tbs,
time_created=self.time_created,
time_updated=self.time_updated,
total_ocpu_capacity=self.total_ocpu_capacity)
def get_blockchain_platform(blockchain_platform_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetBlockchainPlatformResult:
"""
This data source provides details about a specific Blockchain Platform resource in Oracle Cloud Infrastructure Blockchain service.
Gets information about a Blockchain Platform identified by the specific id
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_blockchain_platform = oci.blockchain.get_blockchain_platform(blockchain_platform_id=oci_blockchain_blockchain_platform["test_blockchain_platform"]["id"])
```
:param str blockchain_platform_id: Unique service identifier.
"""
__args__ = dict()
__args__['blockchainPlatformId'] = blockchain_platform_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:blockchain/getBlockchainPlatform:getBlockchainPlatform', __args__, opts=opts, typ=GetBlockchainPlatformResult).value
return AwaitableGetBlockchainPlatformResult(
blockchain_platform_id=__ret__.blockchain_platform_id,
ca_cert_archive_text=__ret__.ca_cert_archive_text,
compartment_id=__ret__.compartment_id,
component_details=__ret__.component_details,
compute_shape=__ret__.compute_shape,
defined_tags=__ret__.defined_tags,
description=__ret__.description,
display_name=__ret__.display_name,
federated_user_id=__ret__.federated_user_id,
freeform_tags=__ret__.freeform_tags,
host_ocpu_utilization_infos=__ret__.host_ocpu_utilization_infos,
id=__ret__.id,
idcs_access_token=__ret__.idcs_access_token,
is_byol=__ret__.is_byol,
is_multi_ad=__ret__.is_multi_ad,
lifecycle_details=__ret__.lifecycle_details,
load_balancer_shape=__ret__.load_balancer_shape,
platform_role=__ret__.platform_role,
platform_shape_type=__ret__.platform_shape_type,
replicas=__ret__.replicas,
service_endpoint=__ret__.service_endpoint,
service_version=__ret__.service_version,
state=__ret__.state,
storage_size_in_tbs=__ret__.storage_size_in_tbs,
storage_used_in_tbs=__ret__.storage_used_in_tbs,
time_created=__ret__.time_created,
time_updated=__ret__.time_updated,
total_ocpu_capacity=__ret__.total_ocpu_capacity)
| 1.703125 | 2 |
etl/exporterWorker.py | MaastrichtUniversity/irods-open-access-repo | 2 | 12769299 | <gh_stars>1-10
#!/usr/bin/env python3
import os
import sys
import signal
import time
import logging
import pika
import json
from etl.dataverseManager.irods2Dataverse import DataverseExporter
from etl.irodsManager.irodsClient import irodsClient
log_level = os.environ['LOG_LEVEL']
logging.basicConfig(level=logging.getLevelName(log_level), format='%(asctime)s %(levelname)s %(message)s')
logger = logging.getLogger('root')
def extend_folder_path(session, selected_list):
"""
If a folder is part of the original selected list, this function will recursively walk into it to retrieve all
its files children and add them to the original selected list
"""
extended_list = []
for path in selected_list.split(",\t"):
absolute_path = "/nlmumc/projects/" + path
# Check if the path is collection
if session.collections.exists(absolute_path):
collection = session.collections.get(absolute_path)
for coll, sub, files in collection.walk():
for file in files:
extended_list.append(file.path.replace("/nlmumc/projects/", ""))
# Or a file
else:
extended_list.append(path)
return extended_list
def collection_etl(ch, method, properties, body):
try:
data = json.loads(body.decode("utf-8"))
logger.info(f" [x] Received %r" % data)
except:
logger.error("Failed body message parsing")
else:
path = "/nlmumc/projects/" + data['project'] + "/" + data['collection']
irods_client = irodsClient(host=os.environ['IRODS_HOST'], port=1247, user=os.environ['IRODS_USER'],
password=os.environ['IRODS_PASS'], zone='nlmumc')
irods_client.prepare(path, data['repository'])
logger.info(f" [x] Create {data['repository']} exporter worker")
exporter = None
if data['repository'] == "Dataverse":
exporter = DataverseExporter()
if exporter is not None:
data['restrict_list'] = extend_folder_path(irods_client.session, data['restrict_list'])
exporter.init_export(irods_client, data)
ch.basic_ack(delivery_tag=method.delivery_tag)
logger.info(" [x] Sent projectCollection.exporter.executed")
return True
def main(channel, retry_counter=None):
channel.queue_declare(queue='repository.collection-etl', durable=True)
channel.queue_bind(
exchange='datahub.events_tx',
queue='repository.collection-etl',
routing_key='projectCollection.exporter.requested'
)
channel.basic_consume(
collection_etl,
queue='repository.collection-etl',
)
# When connection closed, try again 10 time otherwise quit.
if retry_counter < 10:
retry_counter += 1
else:
logger.error("Retry connection failed for 10 minutes. Exiting!")
exit(1)
try:
logger.info(' [x] Waiting for queue repository.collection-etl')
channel.start_consuming()
except pika.exceptions.ConnectionClosed:
logger.error(
"Failed with pika.exceptions.ConnectionClosed: Sleeping for 60 secs before next try."
+ " This was try " + str(retry_counter))
time.sleep(60)
new_connection = pika.BlockingConnection(parameters)
new_ch = new_connection.channel()
main(new_ch, retry_counter)
def sigterm_handler():
sys.exit(0)
if __name__ == "__main__":
signal.signal(signal.SIGTERM, sigterm_handler)
credentials = pika.PlainCredentials(os.environ['RABBITMQ_USER'], os.environ['RABBITMQ_PASS'])
parameters = pika.ConnectionParameters(host=os.environ['RABBITMQ_HOST'],
port=5672,
virtual_host='/',
credentials=credentials,
heartbeat=600,
blocked_connection_timeout=300)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
try:
sys.exit(main(channel, retry_counter=0))
finally:
connection.close()
logger.info("Exiting")
| 2.25 | 2 |
atlassian/__init__.py | approximatenumber/atlassian-python-api | 0 | 12769300 | <reponame>approximatenumber/atlassian-python-api<gh_stars>0
from .confluence import Confluence
from .jira import Jira
from .bitbucket import Bitbucket
from .bitbucket import Bitbucket as Stash
from .portfolio import Portfolio
from .bamboo import Bamboo
from .crowd import Crowd
from .service_desk import ServiceDesk
__all__ = ['Confluence', 'Jira', 'Bitbucket', 'Portfolio', 'Bamboo', 'Stash', 'Crowd', 'ServiceDesk']
| 1.265625 | 1 |
tests/test_pyfuncs.py | aspuru-guzik-group/funsies | 28 | 12769301 | """Test of Funsies python functions capabilities."""
# std
from io import BytesIO
from typing import Dict
# funsies
from funsies import _pyfunc as p
from funsies._constants import Encoding
def capitalize(inputs: Dict[str, bytes]) -> Dict[str, bytes]:
"""Capitalize artifacts."""
out = {}
for key, val in inputs.items():
out[key] = val.decode().upper().encode()
return out
def capitalize2(inputs: Dict[str, bytes]) -> Dict[str, str]:
"""Capitalize artifacts."""
out = {}
for key, val in inputs.items():
out[key] = val.decode().upper()
return out
def test_fun_wrap() -> None:
"""Test the instantiation of a Funsie class."""
out = p.python_funsie(
capitalize, inputs={"in": Encoding.blob}, outputs={"in": Encoding.blob}
)
assert out is not None
def test_fun_run() -> None:
"""Test running python function."""
cmd = p.python_funsie(
capitalize, inputs={"in": Encoding.blob}, outputs={"in": Encoding.blob}
)
inp = {"in": BytesIO(b"bla bla bla")}
out = p.run_python_funsie(cmd, inp)
assert out["in"] == b"BLA BLA BLA"
def test_fun_run_json() -> None:
"""Test running python function that outputs a JSON."""
cmd = p.python_funsie(
capitalize2, inputs={"in": Encoding.blob}, outputs={"in": Encoding.json}
)
inp = {"in": BytesIO(b"bla bla bla")}
out = p.run_python_funsie(cmd, inp)
assert out["in"] == "BLA BLA BLA"
| 3.09375 | 3 |
super function.py | blulady/python | 0 | 12769302 | <gh_stars>0
class A:
def feature1(self):
print("Feature 1 working")
def feature2(self):
print("Feature 2 working")
a1 = A()
a1.feature1()
class B(A):
def feature3(self):
print("feature 3 working")
def feature4(self):
print("feature 4 working")
b1 = B()
b1.feature2()
| 3 | 3 |
_multiselect_combobox_v2.py | iLLiCiTiT/PyQtMultiselectionCombobox | 0 | 12769303 | <filename>_multiselect_combobox_v2.py<gh_stars>0
import sys
from Qt import QtWidgets, QtCore, QtGui
class ViewFilter(QtCore.QSortFilterProxyModel):
def filterAcceptsRow(self, source_row, parent_index):
index = parent_index.child(source_row, 0)
checkstate = index.data(QtCore.Qt.CheckStateRole)
return checkstate == QtCore.Qt.Checked
class MultiselectionCombobox(QtWidgets.QFrame):
def __init__(self, parent=None):
super(MultiselectionCombobox, self).__init__(parent)
model = QtGui.QStandardItemModel()
view_filter = ViewFilter()
view_filter.setSourceModel(model)
view = QtWidgets.QListView(self)
view.setModel(view_filter)
view.setResizeMode(QtWidgets.QListView.Adjust)
view.setFlow(QtWidgets.QListView.LeftToRight)
view.setLayoutMode(QtWidgets.QListView.Batched)
arrow_btn = QtWidgets.QPushButton("E", self)
layout = QtWidgets.QHBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
layout.addWidget(view)
layout.addWidget(arrow_btn)
arrow_btn.clicked.connect(self._on_arrow_click)
self.view = view
self.model = model
self.arrow_btn = arrow_btn
def addItem(self, icon=None, text=None, userData=None):
item = QtGui.QStandardItem()
item.setData(text, QtCore.Qt.DisplayRole)
item.setData(icon, QtCore.Qt.DecorationRole)
item.setData(userData, QtCore.Qt.UserRole)
item.setData(QtCore.Qt.Unchecked, QtCore.Qt.CheckStateRole)
self.model.appendRow(item)
def addItems(self, texts):
new_items = []
for text in texts:
item = QtGui.QStandardItem()
item.setData(text, QtCore.Qt.DisplayRole)
item.setData(QtCore.Qt.Unchecked, QtCore.Qt.CheckStateRole)
new_items.append(item)
self.model.invisibleRootItem().appendRows(new_items)
def _on_arrow_click(self):
menu = QtWidgets.QMenu()
for row in range(self.model.rowCount()):
index = self.model.index(row, 0)
text = index.data(QtCore.Qt.DisplayRole)
action = QtWidgets.QAction(text, menu)
menu.addAction(action)
result = menu.exec_(QtGui.QCursor.pos())
print(result)
class MainWindow(QtWidgets.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
texts = ["one", "two"]
combobox = QtWidgets.QComboBox()
# combobox.setMinimumHeight(100)
multiselect_combobox = MultiselectionCombobox(self)
combobox.addItems(texts)
multiselect_combobox.addItems(texts)
layout = QtWidgets.QVBoxLayout(self)
layout.addWidget(combobox)
layout.addWidget(multiselect_combobox)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
| 2.515625 | 3 |
archive/Changesets/mbf-76446/Source/Tools/Python/BioIronPython/Web.py | jdm7dv/Microsoft-Biology-Foundation | 0 | 12769304 | <reponame>jdm7dv/Microsoft-Biology-Foundation
# Copyright Microsoft Corporation. All rights reserved.
import Util
Util.add_biodotnet_reference("Bio")
Util.add_biodotnet_reference("Bio.WebServiceHandlers")
from Bio.Web import *
from Bio.Web.Blast import *
from System.IO import *
_service = NCBIBlastHandler()
_config_params = ConfigParameters()
_config_params.UseBrowserProxy = 1
_service.Configuration = _config_params
def submit_blast_search(seq):
"Submits a BLAST search for the given sequence.\n\
Returns a job ID to use in polling the service."
_search_params = BlastParameters()
_search_params.Add("Program", "blastn")
_search_params.Add("Database", "nr")
_search_params.Add("Expect", "1e-10")
_search_params.Add("CompositionBasedStatistics", "0")
job_id = _service.SubmitRequest(seq, _search_params)
status = _service.GetRequestStatus(job_id)
if status.Status != ServiceRequestStatus.Waiting & status.Status != ServiceRequestStatus.Ready:
raise Exception, "Unexpected BLAST service request status: " + status.Status.ToString()
return job_id
def poll_blast_results(job_id):
"Fetches the BLAST results for the given job ID.\n\
Returns a xml string containing BlastResult."
_search_params = BlastParameters()
_search_params.Add("Program", "blastn")
_search_params.Add("Database", "nr")
_search_params.Add("Expect", "1e-10")
_search_params.Add("CompositionBasedStatistics", "0")
return _service.GetResult(job_id, _search_params)
def parse_blast_results(result_string):
"Parses the given BLAST results.\n\
Returns a list of BlastResult objects."
reader = StringReader(result_string)
return _service.Parser.Parse(reader) | 1.835938 | 2 |
python-package/lets_plot/__init__.py | OLarionova-HORIS/lets-plot | 0 | 12769305 | <filename>python-package/lets_plot/__init__.py<gh_stars>0
#
# Copyright (c) 2019. JetBrains s.r.o.
# Use of this source code is governed by the MIT license that can be found in the LICENSE file.
#
from pkgutil import extend_path
from typing import Dict
# To handle the situation when 'datalore' package is shared my modules in different locations.
__path__ = extend_path(__path__, __name__)
from ._version import __version__
from ._global_settings import _settings, is_production
from .plot import *
from .export import *
from .frontend_context import *
from .settings_utils import *
__all__ = (plot.__all__ +
bistro.__all__ +
frontend_context.__all__ +
settings_utils.__all__ +
export.__all__ +
['LetsPlot'])
from .frontend_context import _configuration as cfg
class LetsPlot:
@classmethod
def setup_html(cls, isolated_frame: bool = None, offline: bool = None, show_status: bool = False) -> None:
"""
Configures Lets-Plot HTML output.
Depending on the usage LetsPlot generates different HTML to show plots.
In most cases LetsPlot will detect type of the environment automatically. Auto-detection
can be overritten using this method parameters.
Parameters
----------
isolated_frame : bool, optional, default None - auto-detect
If `True`, generate HTLM which can be used in `iframe` or in a standalone HTML document
If `False`, pre-load Lets-Plot JS library. Notebook cell output will only consist of HTML for the plot rendering.
offline : bool, optional, default None - evaluated to 'connected' mode in production environment.
If `True`, full Lets-Plot JS bundle will be added to the notebook. Use this option if you would like
to work with notebook without the Internet connection.
If `False`, load Lets-Plot JS library from CDN.
show_status : bool, optional, default False
Whether to show status of loading of the Lets-Plot JS library.
Only applicable when the Lets-Plot JS library is preloaded.
"""
if not (isinstance(isolated_frame, bool) or isolated_frame is None):
raise ValueError("'isolated' argument is not boolean: {}".format(type(isolated_frame)))
if not (isinstance(offline, bool) or offline is None):
raise ValueError("'offline' argument is not boolean: {}".format(type(offline)))
cfg._setup_html_context(isolated_frame, offline, show_status)
@classmethod
def set(cls, settings: Dict):
if is_production():
_settings.update(settings)
else:
_settings.update({'dev_' + key: value for key, value in settings.items()})
| 2.109375 | 2 |
HMongo/__init__.py | dominon12/HMongo | 1 | 12769306 | <filename>HMongo/__init__.py
from HMongo.MongoDB import MongoDB
| 1.226563 | 1 |
catalogs/views.py | paulsuh/mwa2 | 155 | 12769307 | <filename>catalogs/views.py
"""
catalogs//views.py
"""
from django.http import HttpResponse
from catalogs.models import Catalog
import json
import logging
LOGGER = logging.getLogger('munkiwebadmin')
def catalog_view(request):
'''Returns list of catalog names in JSON format'''
catalog_list = Catalog.list()
LOGGER.debug("Got request for catalog names")
return HttpResponse(json.dumps(catalog_list),
content_type='application/json')
def json_catalog_data(request):
'''Returns complied and sorted catalog data in JSON format'''
LOGGER.debug("Got request for catalog data")
return HttpResponse(json.dumps(Catalog.catalog_info()),
content_type='application/json')
def get_pkg_ref_count(request, pkg_path):
'''Returns the number of pkginfo files referencing a given pkg_path'''
LOGGER.debug("Got request for pkg ref count for %s", pkg_path)
return HttpResponse(json.dumps(Catalog.get_pkg_ref_count(pkg_path)),
content_type='application/json')
| 2.71875 | 3 |
hypatia/physics.py | defcon201/hypatia-engine | 251 | 12769308 | <filename>hypatia/physics.py<gh_stars>100-1000
"""Physical attributes of things.
Right now, not much differs it from the constants
module, but there will surely be much more to do
with physics as time progresses.
See Also:
:mod:`constants`
"""
import pygame
from hypatia import constants
class Velocity(object):
"""Eight-directional velocity."""
def __init__(self, x=0, y=0):
"""Speed in pixels per second per axis. Values may be negative.
Args:
x (int|None): --
y (int|None): --
"""
self.x = x
self.y = y
# this really isn't used, yet
class Position(object):
"""The position of an object.
Scaffolding.
"""
def __init__(self, x, y, size):
"""Extrapolate position info from supplied info.
Args:
x (int|float): how many pixels from the left of the scene.
y (int|float): how many pixels from the top of the scene.
size (tuple): (x, y) pixel dimensions of object being
represented.
"""
self.rect = pygame.Rect((x, y), size)
self.float = (float(x), float(y))
self.int = (x, y)
class AbsolutePosition(Position):
"""The absolute pixel coordinate in regard to the scene.
Scaffolding.
"""
pass
| 3.21875 | 3 |
PathPlanning/ClosedLoopRRTStar/closed_loop_rrt_star_car.py | MerdanBay/PythonRobotics | 1 | 12769309 | <gh_stars>1-10
"""
Path planning Sample Code with Closed loop RRT for car like robot.
author: AtsushiSakai(@Atsushi_twi)
"""
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import pure_pursuit
import unicycle_model
sys.path.append(os.path.dirname(
os.path.abspath(__file__)) + "/../ReedsSheppPath/")
sys.path.append(os.path.dirname(
os.path.abspath(__file__)) + "/../RRTStarReedsShepp/")
try:
import reeds_shepp_path_planning
from rrt_star_reeds_shepp import RRTStarReedsShepp
except ImportError:
raise
show_animation = True
class ClosedLoopRRTStar(RRTStarReedsShepp):
"""
Class for Closed loop RRT star planning
"""
def __init__(self, start, goal, obstacle_list, rand_area,
max_iter=200,
connect_circle_dist=50.0,
robot_radius=0.0
):
super().__init__(start, goal, obstacle_list, rand_area,
max_iter=max_iter,
connect_circle_dist=connect_circle_dist,
robot_radius=robot_radius
)
self.target_speed = 10.0 / 3.6
self.yaw_th = np.deg2rad(3.0)
self.xy_th = 0.5
self.invalid_travel_ratio = 5.0
def planning(self, animation=True):
"""
do planning
animation: flag for animation on or off
"""
# planning with RRTStarReedsShepp
super().planning(animation=animation)
# generate coruse
path_indexs = self.get_goal_indexes()
flag, x, y, yaw, v, t, a, d = self.search_best_feasible_path(
path_indexs)
return flag, x, y, yaw, v, t, a, d
def search_best_feasible_path(self, path_indexs):
print("Start search feasible path")
best_time = float("inf")
fx, fy, fyaw, fv, ft, fa, fd = None, None, None, None, None, None, None
# pure pursuit tracking
for ind in path_indexs:
path = self.generate_final_course(ind)
flag, x, y, yaw, v, t, a, d = self.check_tracking_path_is_feasible(
path)
if flag and best_time >= t[-1]:
print("feasible path is found")
best_time = t[-1]
fx, fy, fyaw, fv, ft, fa, fd = x, y, yaw, v, t, a, d
print("best time is")
print(best_time)
if fx:
fx.append(self.end.x)
fy.append(self.end.y)
fyaw.append(self.end.yaw)
return True, fx, fy, fyaw, fv, ft, fa, fd
return False, None, None, None, None, None, None, None
def check_tracking_path_is_feasible(self, path):
cx = np.array([state[0] for state in path])[::-1]
cy = np.array([state[1] for state in path])[::-1]
cyaw = np.array([state[2] for state in path])[::-1]
goal = [cx[-1], cy[-1], cyaw[-1]]
cx, cy, cyaw = pure_pursuit.extend_path(cx, cy, cyaw)
speed_profile = pure_pursuit.calc_speed_profile(
cx, cy, cyaw, self.target_speed)
t, x, y, yaw, v, a, d, find_goal = pure_pursuit.closed_loop_prediction(
cx, cy, cyaw, speed_profile, goal)
yaw = [reeds_shepp_path_planning.pi_2_pi(iyaw) for iyaw in yaw]
if not find_goal:
print("cannot reach goal")
if abs(yaw[-1] - goal[2]) >= self.yaw_th * 10.0:
print("final angle is bad")
find_goal = False
travel = unicycle_model.dt * sum(np.abs(v))
origin_travel = sum(np.hypot(np.diff(cx), np.diff(cy)))
if (travel / origin_travel) >= self.invalid_travel_ratio:
print("path is too long")
find_goal = False
tmp_node = self.Node(x, y, 0)
tmp_node.path_x = x
tmp_node.path_y = y
if not self.check_collision(
tmp_node, self.obstacle_list, self.robot_radius):
print("This path is collision")
find_goal = False
return find_goal, x, y, yaw, v, t, a, d
def get_goal_indexes(self):
goalinds = []
for (i, node) in enumerate(self.node_list):
if self.calc_dist_to_goal(node.x, node.y) <= self.xy_th:
goalinds.append(i)
print("OK XY TH num is")
print(len(goalinds))
# angle check
fgoalinds = []
for i in goalinds:
if abs(self.node_list[i].yaw - self.end.yaw) <= self.yaw_th:
fgoalinds.append(i)
print("OK YAW TH num is")
print(len(fgoalinds))
return fgoalinds
def main(gx=6.0, gy=7.0, gyaw=np.deg2rad(90.0), max_iter=100):
print("Start" + __file__)
# ====Search Path with RRT====
obstacle_list = [
(5, 5, 1),
(4, 6, 1),
(4, 8, 1),
(4, 10, 1),
(6, 5, 1),
(7, 5, 1),
(8, 6, 1),
(8, 8, 1),
(8, 10, 1)
] # [x,y,size(radius)]
# Set Initial parameters
start = [0.0, 0.0, np.deg2rad(0.0)]
goal = [gx, gy, gyaw]
closed_loop_rrt_star = ClosedLoopRRTStar(start, goal,
obstacle_list,
[-2.0, 20.0],
max_iter=max_iter)
flag, x, y, yaw, v, t, a, d = closed_loop_rrt_star.planning(
animation=show_animation)
if not flag:
print("cannot find feasible path")
# Draw final path
if show_animation:
closed_loop_rrt_star.draw_graph()
plt.plot(x, y, '-r')
plt.grid(True)
plt.pause(0.001)
plt.subplots(1)
plt.plot(t, [np.rad2deg(iyaw) for iyaw in yaw[:-1]], '-r')
plt.xlabel("time[s]")
plt.ylabel("Yaw[deg]")
plt.grid(True)
plt.subplots(1)
plt.plot(t, [iv * 3.6 for iv in v], '-r')
plt.xlabel("time[s]")
plt.ylabel("velocity[km/h]")
plt.grid(True)
plt.subplots(1)
plt.plot(t, a, '-r')
plt.xlabel("time[s]")
plt.ylabel("accel[m/ss]")
plt.grid(True)
plt.subplots(1)
plt.plot(t, [np.rad2deg(td) for td in d], '-r')
plt.xlabel("time[s]")
plt.ylabel("Steering angle[deg]")
plt.grid(True)
plt.show()
if __name__ == '__main__':
main()
| 2.859375 | 3 |
argument-similarity/inference_no_topic_info.py | avouacr/acl2019-BERT-argument-classification-and-clustering | 0 | 12769310 | """Recompute BERT predictions on UKP dev/test without topic information."""
import os
import csv
import itertools
from tqdm import tqdm
from pytorch_pretrained_bert.tokenization import BertTokenizer
import torch
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
from train import InputExample, convert_examples_to_features
from SigmoidBERT import SigmoidBERT
def inference(bert_output, test_file, eval_batch_size=32):
"""Perform inference."""
# Import fine-tuned BERT model
max_seq_length = 64
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = BertTokenizer.from_pretrained(bert_output, do_lower_case=True)
model = SigmoidBERT.from_pretrained(bert_output)
model.to(device)
model.eval()
# Import test data
test_sentences = set()
with open(test_file, 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter='\t', quotechar=None)
for splits in csvreader:
splits = map(str.strip, splits)
__, sentence_a, sentence_b, __ = splits
test_sentences.add(sentence_a)
test_sentences.add(sentence_b)
comb_iter = itertools.combinations(test_sentences, 2)
input_examples = []
output_examples = []
for sentence_a, sentence_b in comb_iter:
input_examples.append(InputExample(text_a=sentence_a,
text_b=sentence_b,
label=-1))
output_examples.append([sentence_a, sentence_b, -1])
eval_features = convert_examples_to_features(input_examples, max_seq_length, tokenizer)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=eval_batch_size)
# Inference
predicted_logits = []
with torch.no_grad():
for input_ids, input_mask, segment_ids in tqdm(eval_dataloader, desc="Batch"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
logits = model(input_ids, segment_ids, input_mask)
logits = logits.detach().cpu().numpy()
predicted_logits.extend(logits[:, 0])
for idx, logit in enumerate(predicted_logits):
output_examples[idx].append(logit)
# Export results
eval_mode = os.path.basename(test_file).split(".")[0]
output_pred_file = os.path.join(bert_output,
f"{eval_mode}_predictions_epoch_3_no_topic_info.tsv")
with open(output_pred_file, "w") as writer:
for idx, example in enumerate(output_examples):
sentence_a, sentence_b, gold_label, pred_logit = example
writer.write("\t".join([sentence_a.replace("\n", " ").replace("\t", " "),
sentence_b.replace("\n", " ").replace("\t", " "),
str(gold_label), str(pred_logit)]))
writer.write("\n")
| 2.53125 | 3 |
venv/lib/python3.6/site-packages/ansible_collections/ansible/windows/tests/unit/plugins/filter/test_quote.py | usegalaxy-no/usegalaxy | 1 | 12769311 | # Copyright (c) 2020 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
import re
from ansible.errors import AnsibleFilterError
from ansible_collections.ansible.windows.plugins.filter.quote import quote
def test_invalid_shell_type():
expected = "Invalid shell specified, valid shell are None, 'cmd', or 'powershell'"
with pytest.raises(AnsibleFilterError, match=re.escape(expected)):
quote('abc', shell='fake')
@pytest.mark.parametrize('value, expected', [
# https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments?view=vs-2019
(['a b c', 'd', 'e'], r'"a b c" d e'),
(['ab"c', '\\', 'd'], r'"ab\"c" \ d'),
([r'a\\\b', 'de fg', 'h'], r'a\\\b "de fg" h'),
([r'a\\b c', 'd', 'e'], r'"a\\b c" d e'),
# http://daviddeley.com/autohotkey/parameters/parameters.htm#WINCREATE
('CallMeIshmael', r'CallMeIshmael'),
('Call Me Ishmael', r'"Call Me Ishmael"'),
('CallMe"Ishmael', r'"CallMe\"Ishmael"'),
('Call Me Ishmael\\', r'"Call Me Ishmael\\"'),
(r'CallMe\"Ishmael', r'"CallMe\\\"Ishmael"'),
(r'a\\\b', r'a\\\b'),
('C:\\TEST A\\', r'"C:\TEST A\\"'),
(r'"C:\TEST A\"', r'"\"C:\TEST A\\\""'),
# Other tests
(['C:\\Program Files\\file\\', 'arg with " quote'], r'"C:\Program Files\file\\" "arg with \" quote"'),
({'key': 'abc'}, r'key=abc'),
({'KEY2': 'a b c'}, r'KEY2="a b c"'),
({'Key3': r'a\\b c \" def "'}, r'Key3="a\\b c \\\" def \""'),
('{"a": ["b", "c' + "'" + ' d", "d\\"e"], "f": "g\\\\\\"g\\\\i\\""}',
'"{\\"a\\": [\\"b\\", \\"c' + "'" + ' d\\", \\"d\\\\\\"e\\"], \\"f\\": \\"g\\\\\\\\\\\\\\"g\\\\i\\\\\\"\\"}"'),
(None, '""'),
('', '""'),
(['', None, ''], '"" "" ""'),
])
def test_quote_c(value, expected):
actual = quote(value)
assert expected == actual
@pytest.mark.parametrize('value, expected', [
('arg1', 'arg1'),
(None, '""'),
('', '""'),
('arg1 and 2', '^"arg1 and 2^"'),
('malicious argument\\"&whoami', '^"malicious argument\\\\^"^&whoami^"'),
('C:\\temp\\some ^%file% > nul', '^"C:\\temp\\some ^^^%file^% ^> nul^"'),
])
def test_quote_cmd(value, expected):
actual = quote(value, shell='cmd')
assert expected == actual
@pytest.mark.parametrize('value, expected', [
('arg1', "'arg1'"),
(None, "''"),
('', "''"),
('Double " quotes', "'Double \" quotes'"),
("Single ' quotes", "'Single '' quotes'"),
("'Multiple '''' single '' quotes '", "'''Multiple '''''''' single '''' quotes '''"),
(u"a'b\u2018c\u2019d\u201ae\u201bf", u"'a''b\u2018\u2018c\u2019\u2019d\u201a\u201ae\u201b\u201bf'")
])
def test_quote_powershell(value, expected):
actual = quote(value, shell='powershell')
assert expected == actual
| 2.015625 | 2 |
model.py | cvvsu/maskNPF | 0 | 12769312 | import os, glob
import numpy as np
import pandas as pd
from multiprocessing import Pool
from PIL import Image
from tqdm import tqdm
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import tkinter as tk
import warnings
warnings.filterwarnings("ignore")
import torch
from torchvision import transforms
from utils import get_next_day, mkdirs, psd2im
from utils import get_instance_segmentation_model
from utils import reshape_mask
from utils import get_GR, get_SE
class NPFDetection(object):
"""Class for NPF detection."""
def __init__(self, opt):
super().__init__()
self.opt = opt
self.cpu_count = os.cpu_count() // 2 + 1
self.dataroot = os.path.join(opt.dataroot, opt.station)
self.station = opt.station
self.vmax = None if opt.dynamic_vmax else opt.vmax
self.tm_res = opt.time_res
self.df = pd.read_csv(os.path.join(self.dataroot, self.station+'.csv'), parse_dates=[0], index_col=0)
self.days = sorted(np.unique(self.df.index.date.astype(str)).tolist())
print(f'There are {len(self.days)} days of data to be processed.')
self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
self.key_index = 0
def draw_one_day_images(self):
"""Draw NPF images with one-day unit"""
self.savefp = os.path.join(self.dataroot, 'images', 'one_day')
mkdirs(self.savefp)
self.dimg = 1
if self.cpu_count >= 8:
with Pool(self.cpu_count) as p:
p.map(self.draw_image, self.days)
else:
for day in tqdm(self.days):
self.draw_image(day)
def draw_two_day_images(self):
"""Draw NPF images with two-day unit"""
self.savefp = os.path.join(self.dataroot, 'images', 'two_day')
mkdirs(self.savefp)
self.dimg = 2
if self.cpu_count >= 8:
with Pool(self.cpu_count) as p:
p.map(self.draw_image, self.days)
else:
for day in tqdm(self.days):
self.draw_image(day)
def draw_image(self, day):
"""Draw an NPF image"""
if self.dimg == 1:
if not os.path.exists(os.path.join(self.savefp, day+'.png')):
try:
psd2im(self.df.loc[day], use_xaxis=False, use_yaxis=False, vmax=self.vmax, savefp=self.savefp, show_figure=False)
except Exception:
print(f'Cannot draw the NPF image for current day {day}.')
elif self.dimg == 2:
day_ = get_next_day(day)
if day_ in self.days and not os.path.exists(os.path.join(self.savefp, day+'_'+day_+'.png')):
try:
psd2im(self.df.loc[day:day_], use_xaxis=False, use_yaxis=False, vmax=self.vmax, savefp=self.savefp, show_figure=False)
except Exception:
print(f'Cannot draw the NPF image for current day {day}_{day_}.')
def detect_one_day_masks(self):
"""Detect masks for one-day NPF images"""
self.load_model()
size = (self.opt.im_size, self.opt.im_size)
res = {}
for im_path in glob.glob(os.path.join(self.dataroot, 'images/one_day')+'/*.png'):
mask = self.detect_mask(im_path, size)
if mask is not None:
res.update(mask)
print(f'Detected {len(res)} one-day masks whose scores are higher than {self.opt.scores:.2f}.')
savefp = os.path.join(self.dataroot, 'masks')
mkdirs(savefp)
np.save(os.path.join(savefp, 'one_day.npy'), res)
def detect_two_day_masks(self):
"""Detect masks for two-day NPF images"""
self.load_model()
size = (self.opt.im_size*2, self.opt.im_size)
res = {}
for im_path in glob.glob(os.path.join(self.dataroot, 'images/two_day')+'/*.png'):
mask = self.detect_mask(im_path, size)
if mask is not None:
res.update(mask)
print(f'Detected {len(res)} two-day masks whose scores are higher than {self.opt.scores:.2f}.')
savefp = os.path.join(self.dataroot, 'masks')
mkdirs(savefp)
np.save(os.path.join(savefp, 'two_day.npy'), res)
def load_model(self):
# load the pre-trained Mask R-CNN model
self.model = get_instance_segmentation_model()
self.model.load_state_dict(torch.load(f'{self.opt.ckpt_dir}/{self.opt.model_name}'))
self.model.to(self.device)
self.model.eval()
@torch.no_grad()
def detect_mask(self, im_path, size):
"""Detect valid masks for NPF images"""
# get mask
im = Image.open(im_path).convert('RGB').resize(size, Image.ANTIALIAS)
ts = transforms.ToTensor()(im)
out = self.model([ts.to(self.device)])[0]
if len(out['scores']) == 0:
return None
else:
idx_bool = out['scores'].cpu().numpy() >= self.opt.scores
index = [i for i, item in enumerate(idx_bool) if item]
if len(index) == 0:
return None
else:
masks = out['masks'][index].squeeze(1).cpu().numpy() >= self.opt.mask_thres
day = im_path.split(os.sep)[-1].split('.')[0].split('_')[0]
return {day: masks}
def visualize_masks(self):
self.masks_oneday = np.load(os.path.join(self.dataroot, 'masks', 'one_day.npy'), allow_pickle=True).tolist()
self.masks_twoday = np.load(os.path.join(self.dataroot, 'masks', 'two_day.npy'), allow_pickle=True).tolist()
self.keys = sorted(list(self.masks_oneday.keys()))
self.keys_ = sorted(list(self.masks_twoday.keys()))
self.len_keys = len(self.keys)
self.win = tk.Tk()
self.win.title('NPF Detection')
self.fig = Figure(dpi=100)
self.canvas = FigureCanvasTkAgg(self.fig, master=self.win)
graph_widget = self.canvas.get_tk_widget()
graph_widget.grid(row=0, column=0, rowspan=2, columnspan=4, ipadx=200, sticky = tk.NW)
self.fig1 = Figure(dpi=100)
self.canvas1 = FigureCanvasTkAgg(self.fig1, master=self.win)
graph_widget1 = self.canvas1.get_tk_widget()
graph_widget1.grid(row=2, column=0, rowspan=2, columnspan=4, ipadx=200, sticky = tk.NW)
tk.Label(self.win, text='Select the one-day mask (select only one mask currently)').grid(row=0, column=5, columnspan=5, ipadx=50)
tk.Label(self.win, text='Select the two-day mask (select only one mask currently)').grid(row=2, column=5, columnspan=5, ipadx=50)
self.plot_next()
tk.Button(self.win,text="Prev",command=self.plot_prev).grid(row=5,column=3, columnspan=5, sticky=tk.W)
tk.Button(self.win,text="Next",command=self.plot_next).grid(row=5,column=7, columnspan=5, sticky=tk.W)
self.win.mainloop()
def plot(self):
self.fig.clear()
self.fig1.clear()
self.key = self.keys[self.key_index]
self.visualize_oneday_mask(self.fig, self.key)
if self.key in self.keys_:
self.visualize_twoday_mask(self.fig1, self.key)
self.canvas.draw_idle()
self.canvas1.draw_idle()
def plot_prev(self):
self.plot()
self.key_index -= 1
tk.Label(self.win, text=f'{self.key_index}/{self.len_keys}', fg='blue').grid(row=4, column=7, ipadx=50)
if self.key_index < 0:
tk.messagebox.showerror(title='Warning', message='You are at the begining, please click the Next button.')
def plot_next(self):
self.plot()
self.key_index += 1
tk.Label(self.win, text=f'{self.key_index}/{self.len_keys}', fg='blue').grid(row=4, column=7, ipadx=50)
if self.key_index == self.len_keys - 1:
tk.messagebox.showinfo(title='Warning', message='Good job! All masks have been checked!')
def visualize_oneday_mask(self, fig, day):
masks = self.masks_oneday[day]
num_masks = masks.shape[0]
ax = fig.add_subplot(1, num_masks+1, 1)
im = Image.open(os.path.join(self.dataroot, 'images/one_day', day+'.png'))
im = im.resize((self.opt.im_size, self.opt.im_size), Image.ANTIALIAS)
ax.imshow(np.array(im))
ax.set_title(day)
ax.axis('off')
# plot masks
for i in range(masks.shape[0]):
ax = fig.add_subplot(1, num_masks+1, i+2)
ax.imshow(masks[i], cmap='gray')
ax.set_title(f'mask {i}')
ax.axis('off')
for i in range(5):
ck_btn = tk.Checkbutton(self.win, text=f'one-day mask {i}')
ck_btn.grid(row=1, column=5+i, ipadx=10, ipady=5)
ck_btn.config(command=lambda btn=ck_btn:self.save_mask(btn))
def visualize_twoday_mask(self, fig, day):
day_ = get_next_day(day)
masks_ = self.masks_twoday[day]
num_masks = masks_.shape[0]
ax = fig.add_subplot(1, num_masks+1, 1)
im_ = Image.open(os.path.join(self.dataroot, 'images/two_day', day+'_'+day_+'.png'))
im_ = im_.resize((self.opt.im_size*2, self.opt.im_size), Image.ANTIALIAS)
ax.imshow(np.array(im_))
ax.set_title(day+'_'+day_)
ax.axis('off')
for i in range(masks_.shape[0]):
ax = fig.add_subplot(1, num_masks+1, i+2)
ax.imshow(masks_[i], cmap='gray')
ax.set_title(f'mask {i}')
ax.axis('off')
for i in range(5):
ck_btn_ = tk.Checkbutton(self.win, text=f'two-day mask {i}')
ck_btn_.grid(row=3, column=5+i, ipadx=10, ipady=5)
ck_btn_.config(command=lambda btn=ck_btn_:self.save_mask(btn))
def save_mask(self, btn):
text = btn.cget('text')
idx = int(text[-1])
if 'one-day' in text:
savefp = os.path.join(self.dataroot, 'masks/one_day')
mkdirs(savefp)
np.save(os.path.join(savefp, f'{self.key}.npy'), self.masks_oneday[self.key][idx])
elif 'two-day' in text:
savefp = os.path.join(self.dataroot, 'masks/two_day')
mkdirs(savefp)
np.save(os.path.join(savefp, f'{self.key}.npy'), self.masks_twoday[self.key][idx])
def get_SE_GR(self, day):
df = self.df.loc[day]
mask = np.load(os.path.join(self.dataroot, 'masks/one_day', day+'.npy'), allow_pickle=True)
mask = reshape_mask(mask, df.shape)
try:
st, et = get_SE(df, mask)
gr_dict = get_GR(df, mask, self.tm_res, savefp=self.savefp, vmax=self.vmax)
except:
# print(day)
return
try:
mask_ = np.load(os.path.join(self.dataroot, 'masks/two_day', day+'.npy'), allow_pickle=True)
df_ = self.df.loc[day:get_next_day(day)]
mask_ = reshape_mask(mask_, df_.shape)
st_two, et_two = get_SE(df_, mask_)
except:
st_two, et_two = st, et
save_dict = {**{
'date': [day],
'start_time_one': [st],
'end_time_one': [et],
'start_time_two': [st_two],
'end_time_two': [et_two]
}, **gr_dict}
pd.DataFrame(save_dict).to_csv(os.path.join(self.savefp, f'{day}.csv'), index=False)
def save_SE_GR(self):
r"""
obtain and save the start time, end time and the growth rates.
"""
files = sorted(glob.glob(os.path.join(self.dataroot, 'masks/one_day')+'/*.npy'))
days = [file.split(os.sep)[-1].split('.')[0] for file in files]
print(f'Calculating growth rates for {len(days)} days.')
self.savefp = os.path.join(self.dataroot, 'GR')
mkdirs(self.savefp)
if self.cpu_count >= 8:
with Pool(self.cpu_count) as p:
p.map(self.get_SE_GR, days)
else:
for day in tqdm(days):
self.get_SE_GR(day)
| 2.171875 | 2 |
mom/model.py | Sebastiencreoff/mongo_tool | 0 | 12769313 | #! /usr/bin/env python
import functools
import uuid
from . import session
from . import utils
class Model(object):
session = session.Session()
def __init__(self, data=None):
self._id = str(uuid.uuid1())
self.in_progress = False
self.read_only = False
self.update = False
if data:
self.read_only = True
for k, v in utils.class_from_dict(
type(self).__name__, data,
self.JSON_SCHEMA,
self.EXCLUDED_KEYS).items():
setattr(self, k, v)
self.done = True
def __setattr__(self, key, value):
self.__dict__[key] = value
if key == 'done':
if not self.read_only:
Model.session.add(self)
elif hasattr(self, 'done') and not self.in_progress:
self.__dict__['update'] = True
Model.session.update(self)
def __del__(self):
Model.session.delete(self)
def id(self):
return self._id
def to_dict(self):
return {
'_id': self._id
}
def with_update(f):
@functools.wraps(f)
def wrapped(inst, *args, **kwargs):
inst.__dict__['in_progress'] = True
result = f(inst, *args, **kwargs)
inst.__dict__['in_progress'] = False
inst.__dict__['update'] = True
Model.session.update(inst)
return result
return wrapped
| 2.3125 | 2 |
python/cumulus/cumulus/snodas/core/config.py | USACE/cumulus-geolambda | 2 | 12769314 | # Application Install Directory (Relative to Inside Container)
SNODAS_APP = '/app/cumulus/snodas/core'
# Archive of UNMASKED files downloaded from NSIDC
SNODAS_RAW_UNMASKED = '/app/data/snodas/raw_unmasked'
# Archive of MASKED files downloaded from NSIDC
SNODAS_RAW_MASKED = '/app/data/snodas/raw_masked'
| 0.964844 | 1 |
blog/urls.py | flaab/pz-django-blog | 5 | 12769315 | <reponame>flaab/pz-django-blog
from django.conf.urls import url
from django.urls import path
from .feeds import LatestPostFeed
from . import views
# App name
app_name = 'blog'
# Url Patterns
urlpatterns = [
# Default lists all posts
url('^$', views.post_list, name='post_list'),
# List post by category
path('category/<slug:category_slug>/', views.post_list, name = 'post_list_by_category'),
# List post by tag
path('tag/<slug:tag_slug>/', views.post_list, name = 'post_list_by_tag'),
# List post by author
path('author/<int:author_id>/', views.post_list, name = 'post_list_by_author'),
# Read a post
path('<int:year>/<int:month>/<int:day>/<slug:post>/', views.post_detail, name = 'post_detail'),
# Discussion
path('discussion/<int:year>/<int:month>/<int:day>/<slug:post>/', views.post_discussion, name = 'post_discussion'),
# Flatpage
path('flatpage/<slug:slug>/', views.flatpage, name = 'flatpage'),
# Search
path('search/', views.post_search, name = 'post_search'),
# RSS Feed
path('feed/', LatestPostFeed(), name = 'post_feed')
]
| 2.140625 | 2 |
nk_ape/__init__.py | NewKnowledge/ape | 0 | 12769316 | <filename>nk_ape/__init__.py
import sys
import logging
from .ape import Ape
from .config import ONTOLOGY_PATH, EMBEDDING_PATH, LOG_LEVEL
logging.basicConfig(
level=LOG_LEVEL,
stream=sys.stdout,
)
| 1.796875 | 2 |
WRF_snapshots.py | NCAR/HWT_mode | 1 | 12769317 | """
Copied from WRF_SPC.py Sep 20, 2019.
Given a model initialization time and a valid time, plot crefuh around hagelslag objects.
"""
import argparse
import datetime
import pdb
import os
import sys
import pandas as pd
import numpy as np
import fieldinfo # levels and color tables - Adapted from /glade/u/home/wrfrt/wwe/python_scripts/fieldinfo.py 20190125.
from wrf import to_np, getvar, get_cartopy, latlon_coords
from metpy.units import units
from netCDF4 import Dataset
import cartopy
import matplotlib
matplotlib.use("Agg") # allows dav slurm jobs
import matplotlib.pyplot as plt
import matplotlib.colors as colors
# =============Arguments===================
parser = argparse.ArgumentParser(description = "Plot WRF and SPC storm reports",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-f", "--fill", type=str, default= 'crefuh', help='netCDF variable name for contour fill field')
parser.add_argument("-b", "--barb", choices=["shr06", "wind10m",""], type=str, default="wind10m", help='wind barbs')
parser.add_argument("-c", "--contour", type=str, default=None, help='contour field')
parser.add_argument("-o", "--outdir", type=str, default='.', help="name of output path")
parser.add_argument("-p", "--padding", type=float, nargs=4, help="padding on west, east, south and north side in km",
default=[175.,175.,175.,175.])
parser.add_argument("--timeshift", type=int, default=0, help="hours to shift background field")
parser.add_argument("--arrow", action='store_true', help="Add storm motion vector from hagelslag")
parser.add_argument("--no-fineprint", action='store_true', help="Don't write image details at bottom")
parser.add_argument("--force_new", action='store_true', help="overwrite any old outfile, if it exists")
parser.add_argument("--no-counties", action='store_true', help="Don't draw county borders (can be slow)")
parser.add_argument("--no-mask", action='store_true', help="Don't draw object mask")
parser.add_argument('-i', "--idir", type=str, default="/glade/p/mmm/parc/sobash/NSC/3KM_WRF_POST_12sec_ts",
help="path to WRF output files")
parser.add_argument('-s', "--stride", type=int, default=1, help="plot every stride points. speed things up with stride>1")
parser.add_argument('-t', "--trackdir", type=str, default="/glade/scratch/ahijevyc/track_data_ncarstorm_3km_REFL_1KM_AGL_csv",
help="path to hagelslag track-step files")
parser.add_argument("--patchdir", type=str, default="/glade/scratch/ahijevyc/track_data_ncarstorm_3km_REFL_1KM_AGL_nc",
help="path to hagelslag netCDF patches")
parser.add_argument("initial_time", type=lambda d: datetime.datetime.strptime(d, '%Y%m%d%H'),
help="model initialization date and hour, yyyymmddhh")
parser.add_argument("valid_time", type=lambda d: datetime.datetime.strptime(d, '%Y%m%d%H'),
help="model valid date and hour, yyyymmddhh")
parser.add_argument("-d", "--debug", action='store_true')
# Assign arguments to simply-named variables
args = parser.parse_args()
barb = args.barb
contour = args.contour
fill = args.fill
odir = args.outdir
padding = args.padding
timeshift = args.timeshift
arrow = args.arrow
no_fineprint = args.no_fineprint
force_new = args.force_new
no_counties = args.no_counties
no_mask = args.no_mask
idir = args.idir
stride = args.stride
patchdir = args.patchdir
trackdir = args.trackdir
initial_time = args.initial_time
valid_time = args.valid_time
debug = args.debug
if debug:
print(args)
# Derive lead time and make sure it is between 12 and 36 hours.
lead_time = valid_time - initial_time
if lead_time < datetime.timedelta(hours=7) or lead_time > datetime.timedelta(hours=36):
print("lead_time:",lead_time, "not between 7 and 36 hours")
#sys.exit(1)
def update_scale_labels(scale_xy):
# Update labels on axes with the distance along each axis.
# Cartopy axes do not have a set_xlabel() or set_ylabel() method. Add labels manually.
xspan = ax.get_xlim()
yspan = ax.get_ylim()
xlabel = "%dkm" % (round((xspan[1]-xspan[0])/1000.))
ylabel = "%dkm" % (round((yspan[1]-yspan[0])/1000.))
x, y = scale_xy
x.set_text(xlabel)
y.set_text(ylabel)
# Read hagelslag track_step csv file into pandas DataFrame.
mysterious_suffix = '' # '_13' or '_12'
tracks = trackdir + '/' + initial_time.strftime('track_step_NCARSTORM_d01_%Y%m%d-%H%M')+mysterious_suffix+'.csv'
if debug:
print("reading csv file",tracks)
df = pd.read_csv(tracks, parse_dates=['Run_Date', 'Valid_Date'])
# Throw out everything except requested valid times.
df = df[df.Valid_Date == valid_time]
if df.empty:
print("csv track step file", tracks, " has no objects at requested valid time",valid_time,". That is probably fine.")
sys.exit(0)
# Throw out weak UH objects
good_UH = 25
igood_UH = df['UP_HELI_MAX_max'] >= good_UH
if 'UP_HELI_MIN_min' in df.columns:
igood_UH = igood_UH | (df['UP_HELI_MIN_min'].abs() >= good_UH)
print("ignoring",(~igood_UH).sum(),"object with abs(UH) <",good_UH)
if debug:
if 'UP_HELI_MIN_min' in df.columns:
print(df[~igood_UH][["Step_ID","UP_HELI_MAX_max","UP_HELI_MIN_min"]])
else:
print(df[~igood_UH][["Step_ID","UP_HELI_MAX_max"]])
df = df[igood_UH]
if df.empty:
print("csv track step file", tracks, " has no good UH objects at requested valid time",valid_time,". That is probably fine.")
sys.exit(0)
# List of all png files that will be created.
pngfiles = odir + '/' + df.Step_ID + "_" + "{:+1.0f}".format(timeshift) + ".png"
if all([os.path.isfile(p) for p in pngfiles]) and not force_new:
# Exit if pngs all already exist and force_new option was not used.
print(initial_time, valid_time, "{:+1.0f}".format(timeshift) +"h",fill,"finished. Moving on.")
sys.exit(0)
if not no_mask:
# Read netCDF patches
patches = patchdir + '/' + initial_time.strftime('NCARSTORM_%Y%m%d-%H%M_d01_model_patches.nc')
pnc = Dataset(patches,'r')
masks = pnc.variables["masks"][:]
mlons = pnc.variables["lon"][:]
mlats = pnc.variables["lat"][:]
mtrack_ids = pnc.variables["track_id"][:]
mtrack_steps = pnc.variables["track_step"][:]
mask_centroid_lats = pnc.variables["centroid_lat"][:]
mask_centroid_lons = pnc.variables["centroid_lon"][:]
pnc.close()
# Get color map, levels, and netCDF variable name appropriate for requested variable (from fieldinfo module).
info = fieldinfo.nsc[fill]
if debug:
print("found nsc in fieldinfo.py. Using",info)
cmap = colors.ListedColormap(info['cmap'])
levels = info['levels']
fill = info['fname'][0]
# Get wrfout filename
history_time = valid_time + datetime.timedelta(hours=timeshift)
wrfout = idir + '/' + initial_time.strftime('%Y%m%d%H') + '/' + history_time.strftime('diags_d01_%Y-%m-%d_%H_%M_%S.nc')
if debug: print("About to open "+wrfout)
wrfnc = Dataset(wrfout,"r")
if fill not in wrfnc.variables:
print("variable "+ fill + " not found")
print("choices:", wrfnc.variables.keys())
sys.exit(1)
# Get a 2D var from wrfout file. It has projection info.
if debug:
print("getvar...")
cvar = getvar(wrfnc,fill)
wrflat, wrflon = latlon_coords(cvar)
# get cartopy mapping object
if debug: print("get_cartopy...")
WRF_proj = get_cartopy(cvar)
fineprint0 = 'fill '+fill+" ("+cvar.units+") "
if 'units' in info.keys():
cvar.metpy.convert_units(info['units'])
if hasattr(cvar, 'long_name'):
label = cvar.long_name
elif hasattr(cvar, 'description'):
label = cvar.description
# convert WRF lat/lons to x,y
pts = WRF_proj.transform_points(cartopy.crs.PlateCarree(), to_np(wrflon[::stride,::stride]), to_np(wrflat[::stride,::stride])) # Transform lon/lat to x and y (in meters) in WRF projection.
x, y, z = pts[:,:,0], pts[:,:,1], pts[:,:,2]
fig = plt.figure(figsize=(10,10))
if debug: print("plt.axes()")
ax = plt.axes(projection=WRF_proj)
ax.add_feature(cartopy.feature.STATES.with_scale('10m'), linewidth=0.35, alpha=0.55)
# Set title (month and hour)
ax.set_title(history_time.strftime("%b %HZ"))
# Empty fineprint placeholder in lower left corner of image.
fineprint_obj = plt.annotate(text=fineprint0, xy=(0,5), xycoords=('axes fraction', 'figure pixels'), va="bottom", fontsize=4)
if cvar.min() > levels[-1] or cvar.max() < levels[0]:
print('levels',levels,'out of range of cvar', cvar.values.min(), cvar.values.max())
sys.exit(1)
if debug:
print('levels:',levels, 'cmap:', cmap.colors)
if debug:
print("plotting filled contour",cvar.name,"...")
cfill = ax.contourf(x, y, to_np(cvar[::stride,::stride]), levels=levels, cmap=cmap)
# Color bar
cb = plt.colorbar(cfill, ax=ax, format='%.0f', shrink=0.52, orientation='horizontal')
if hasattr(cvar,"units"):
cb.set_label(label+" ("+cvar.units+")", fontsize="small")
if len(levels) < 10:
# label every level if there is room.
cb.set_ticks(levels)
cb.ax.tick_params(labelsize='xx-small')
cb.outline.set_linewidth(0.5)
# Create 2 annotation object placeholders for spatial scale. Will be updated with each set_extent().
scale_kw = {"ha":"center","rotation_mode":"anchor","xycoords":"axes fraction","textcoords":"offset points"}
scale_xy = ( ax.annotate("", (0.5, 0), xytext=(0,-5), va='top', rotation='horizontal', **scale_kw),
ax.annotate("", (0, 0.5), xytext=(-5,0), va='bottom', rotation='vertical', **scale_kw) )
# Special case of composite reflectivity, UH overlay
if args.fill == 'crefuh':
max_uh = getvar(wrfnc,info['fname'][1])
min_uh = getvar(wrfnc,info['fname'][2])
max_uh_threshold = info['max_threshold']
min_uh_threshold = info['min_threshold']
print("UH max:", max_uh.max().values)
print("UH min:", min_uh.min().values)
if max_uh.max() > max_uh_threshold:
print("Filled contour UH >",max_uh_threshold)
# Don't use contourf if the data fall outside the levels range. You will get ValueError: 'bboxes' cannot be empty.
# See https://github.com/SciTools/cartopy/issues/1290
cs1 = ax.contourf(x, y, to_np(max_uh), levels=[max_uh_threshold,1000], colors='black',
alpha=0.3 )
if debug: print("solid contour UH >",max_uh_threshold)
cs2 = ax.contour(x, y, to_np(max_uh), levels=max_uh_threshold*np.arange(1,6), colors='black',
linestyles='solid', linewidths=0.4 )
fineprint0 += "UH>"+str(max_uh_threshold) +" "+ max_uh.units + " "
# Oddly, the zero contour is plotted if there are no other valid contours
if 0.0 in cs2.levels:
print("uh has zero contour for some reason. Hide it")
if debug:
pdb.set_trace()
for i in cs2.collections: i.remove()
if min_uh.min() < min_uh_threshold:
print("Filled UH contour <",min_uh_threshold)
# Don't use contourf if the data fall outside the levels range. You will get ValueError: 'bboxes' cannot be empty.
# See https://github.com/SciTools/cartopy/issues/1290
negUH1 = ax.contourf(x, y, to_np(min_uh), levels=[-1000, min_uh_threshold], colors='black',
alpha=0.3 )
if debug: print("dashed contour UH <",min_uh_threshold)
negUH2 = ax.contour(x, y, to_np(min_uh), levels=min_uh_threshold*np.arange(6,0,-1), colors='black',
linestyles='dashed', linewidths=0.4 )
fineprint0 += "UH<"+str(-min_uh_threshold) +" "+ min_uh.units + " "
if 0.0 in negUH2.levels:
print("neg uh has a zero contour. Hide it")
if debug:
pdb.set_trace()
for i in negUH2.collections: i.remove()
# Read my own county shape file.
if not no_counties:
if debug:
print("About to draw counties")
reader = cartopy.io.shapereader.Reader('/glade/work/ahijevyc/share/shapeFiles/cb_2013_us_county_500k/cb_2013_us_county_500k.shp')
counties = list(reader.geometries())
# Create custom cartopy feature that can be added to the axes.
COUNTIES = cartopy.feature.ShapelyFeature(counties, cartopy.crs.PlateCarree())
ax.add_feature(COUNTIES, facecolor="none", edgecolor='black', alpha=0.25, linewidth=0.2)
if barb:
# Get barb netCDF variable name appropriate for requested variable (from fieldinfo module).
info = fieldinfo.nsc[barb]
if debug:
print("found nsc in fieldinfo.py. Using",info)
if args.barb == 'wind10m': u,v = getvar(wrfnc, 'uvmet10', units='kt')
if args.barb == 'shr06':
u = getvar(wrfnc, 'USHR6')*1.93
v = getvar(wrfnc, 'VSHR6')*1.93
u.attrs['units'] = 'kt'
v.attrs['units'] = 'kt'
# Density of barbs stays the same, no matter the domain size (padding)
# larger domain = greater stride
skip = int(round(np.max([(padding[0]+padding[1]), (padding[2]+padding[3])])/50))
if args.fill == 'crefuh': alpha=0.6
else: alpha=1.0
if debug: print("plotBarbs: starting barbs")
# barbs already oriented with map projection. In Basemap, we needed to use m.rotate_vector().
cs2 = ax.barbs(x[::skip*stride,::skip*stride], y[::skip*stride,::skip*stride],
to_np(u)[::skip*stride,::skip*stride], to_np(v)[::skip*stride,::skip*stride], color='black',
alpha=alpha, length=5, linewidth=0.25, sizes={'emptybarb':0.05} )
fineprint0 += "wind barb (" + u.units + ") "
if contour:
# Get netCDF variable name appropriate for requested variable from fieldinfo module.
info = fieldinfo.nsc[contour]
if debug:
print("found nsc in fieldinfo.py. Using",info)
cvar = getvar(wrfnc, info['fname'][0])
if 'units' in info.keys():
cvar.metpy.convert_units(info['units'])
levels = info['levels']
# could use levels from fieldinfo module, but default is often less cluttered.
alpha=0.4
if debug: print("starting "+contour+" contours")
cr = ax.contour(x[::stride,::stride], y[::stride,::stride],
cvar[::stride,::stride], levels=levels, colors='black', alpha=alpha,
linewidths=0.75)
clab = ax.clabel(cr, inline=False, fmt='%.0f', fontsize=6)
fineprint0 += "contour "+contour+" (" + cvar.units + ") "
for lon,lat,stepid,trackid,u,v,pngfile in zip(df.Centroid_Lon, df.Centroid_Lat,df.Step_ID,df.Track_ID,df.Storm_Motion_U,df.Storm_Motion_V,pngfiles):
fineprint = fineprint0 + "\nwrfout " + os.path.realpath(wrfout)
if not no_mask:
fineprint += "\npatches "+patches
fineprint += "\ntracks "+tracks
fineprint += "\ntrackid "+trackid
fineprint += "\ncreated "+str(datetime.datetime.now(tz=None)).split('.')[0]
if not no_fineprint: # show fineprint
fineprint_obj.set_text(fineprint)
x, y = WRF_proj.transform_point(lon, lat, cartopy.crs.PlateCarree()) # Transform lon/lat to x and y (in meters) in WRF projection.
ax.set_extent([x-padding[0]*1000., x+padding[1]*1000., y-padding[2]*1000., y+padding[3]*1000.], crs=WRF_proj)
track_id_int = int(trackid.split('_')[-1])
step_id_int = int(stepid.split('_')[-1])
# Contour object mask
if not no_mask:
# Find matching mask track id and step. For some reason, steps start with 1 in netCDF patches file
matches = (mtrack_ids == track_id_int) & (mtrack_steps == step_id_int+1)
ip = np.where(matches)[0][0]
if not any(matches):
pdb.set_trace()
tolerance = 0.025 # TODO: figure out why centroid of csv object and nc patch differ at all
if np.abs(lon-mask_centroid_lons[ip]) > tolerance:
print(stepid,lon,mask_centroid_lons[ip])
if np.abs(lat-mask_centroid_lats[ip]) > tolerance:
print(stepid,lat,mask_centroid_lats[ip])
mask = masks[ip]
mlon = mlons[ip]
mlat = mlats[ip]
mcntr = ax.contour(mlon, mlat, mask, levels=[0,10], colors='black', alpha=0.6,
linewidths=2., linestyles="solid", zorder=2, transform=cartopy.crs.PlateCarree())
# Update axes labels (distance along axes).
update_scale_labels(scale_xy)
if arrow:
# Storm motion vector points from previous location to present location.
smv = ax.arrow(x-u, y-v, u, v, color=mcntr.colors, alpha=mcntr.get_alpha(), # Can't get head to show. Tried quiver, plot, head_width, head_length..., annotate...
linewidth=1, zorder=2, capstyle='round', transform=WRF_proj) # tried length_includes_head=True, but zero-size gives ValueError about shape Nx2 needed.
# Save image.
plt.savefig(pngfile, dpi=175)
print('created ' + os.path.realpath(pngfile))
if arrow: smv.remove()
# Remove object mask contour
if not no_mask:
for i in mcntr.collections: i.remove()
if debug: pdb.set_trace()
plt.close(fig)
print("to sort -2 -1 +0 +1 +2 numerically:")
print("ls d01*png | sort -g -k 1."+str(len(stepid)+2))
print("to trim whitespace:")
print("convert -crop 980x1012+390+173 in.png out.png")
| 2.109375 | 2 |
bika/lims/exportimport/instruments/sysmex/xs/i500.py | hocinebendou/bika.gsoc | 0 | 12769318 | <reponame>hocinebendou/bika.gsoc<filename>bika/lims/exportimport/instruments/sysmex/xs/i500.py<gh_stars>0
""" Sysmex XS 500i
"""
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from . import SysmexXSImporter, SysmexXSCSVParser
import json
import traceback
title = "Sysmex XS - 500i"
def getForm(instrument_name, request):
"""
Since 500i and 1000i print the same results structure (https://jira.bikalabs.com/browse/LIMS-1571), this function
will be overwrote on i1000 importer to save code.
:param instrument_name: a string containing the instrument's name with the format: 'sysmex_xs_500i'
:param request: the request object
:return: a dictionary with the requests results.
"""
d = {'infile': request.form[instrument_name + '_file'],
'fileformat': request.form[instrument_name + '_format'],
'artoapply': request.form[instrument_name + '_artoapply'],
'override': request.form[instrument_name + '_override'],
'sample': request.form.get(instrument_name + '_sample',
'requestid'),
'instrument': request.form.get(instrument_name + '_instrument', None)}
return d
def Import(context, request, instrumentname='sysmex_xs_500i'):
""" Sysmex XS - 500i analysis results
"""
# I don't really know how this file works, for this reason I added an 'Analysis Service selector'.
# If non Analysis Service is selected, each 'data' column will be interpreted as a different Analysis Service. In
# the case that an Analysis Service was selected, all the data columns would be interpreted as different data from
# an unique Analysis Service.
formitems = getForm(instrumentname, request)
infile = formitems['infile']
fileformat = formitems['fileformat']
artoapply = formitems['artoapply']
override = formitems['override']
sample = formitems['sample']
instrument = formitems['instrument']
errors = []
logs = []
warns = []
# Load the most suitable parser according to file extension/options/etc...
parser = None
if not hasattr(infile, 'filename'):
errors.append(_("No file selected"))
if fileformat == 'csv':
# Get the Analysis Service selected, if there is one.
analysis = request.form.get('analysis_service', None)
if analysis:
# Get default result key
defaultresult = request.form.get('default_result', None)
# Rise an error if default result is missing.
parser = SysmexXS500iCSVParser(infile, analysis, defaultresult) if defaultresult \
else errors.append(t(_("You should introduce a default result key.",
mapping={"fileformat": fileformat})))
else:
parser = SysmexXS500iCSVParser(infile)
else:
errors.append(t(_("Unrecognized file format ${fileformat}",
mapping={"fileformat": fileformat})))
if parser:
# Load the importer
status = ['sample_received', 'attachment_due', 'to_be_verified']
if artoapply == 'received':
status = ['sample_received']
elif artoapply == 'received_tobeverified':
status = ['sample_received', 'attachment_due', 'to_be_verified']
over = [False, False]
if override == 'nooverride':
over = [False, False]
elif override == 'override':
over = [True, False]
elif override == 'overrideempty':
over = [True, True]
sam = ['getRequestID', 'getSampleID', 'getClientSampleID']
if sample == 'requestid':
sam = ['getRequestID']
if sample == 'sampleid':
sam = ['getSampleID']
elif sample == 'clientsid':
sam = ['getClientSampleID']
elif sample == 'sample_clientsid':
sam = ['getSampleID', 'getClientSampleID']
importer = SysmexXS500iImporter(parser=parser,
context=context,
idsearchcriteria=sam,
allowed_ar_states=status,
allowed_analysis_states=None,
override=over,
instrument_uid=instrument)
tbex = ''
try:
importer.process()
except:
tbex = traceback.format_exc()
errors = importer.errors
logs = importer.logs
warns = importer.warns
if tbex:
errors.append(tbex)
results = {'errors': errors, 'log': logs, 'warns': warns}
return json.dumps(results)
class SysmexXS500iCSVParser(SysmexXSCSVParser):
def getAttachmentFileType(self):
return "Sysmex XS 500i CSV"
class SysmexXS500iImporter(SysmexXSImporter):
def getKeywordsToBeExcluded(self):
return []
| 2.359375 | 2 |
BNB_Search/Board.py | Nurl4n/Artificial-Intelligence | 0 | 12769319 | <filename>BNB_Search/Board.py
import random
import copy
from copy import deepcopy
MAX_PRINT = 2
CUR_PRINT = 0
MAX_COLUMN = 4
MAX_ROW = 4
solution_length_from_board = 0
goal = [["1", "2", "3","4"],
["2", "3", "4","5"],
["3", "4", "5","5"],
["4", "5", "5","_"]]
# Rather than normal copying we use deepcopy to avoid creating a pointer
# deepcopy recursively copies the whole list array
board = deepcopy(goal)
emptyLoc = [MAX_ROW-1, MAX_COLUMN-1] # store empty square position
# --------------------------------------------------------------
# --------------------------------------------------------------
# Print a board method
def print_board(board):
for i in range(MAX_ROW):
for j in range(MAX_COLUMN):
print (board[i][j],end=" ")
print()
# function must return something so we return blank
return ""
# --------------------------------------------------------------
# --------------------------------------------------------------
# Generating random boards
# We generate a board by getting a random move count between 5 and 10
# Then we use garble method to do that number of random moves
def gen_board():
move_count = random.randint(5,10)
rand_board = copy.deepcopy(board)
i = 0
while (i != move_count):
rand_board = garble(rand_board)
i += 1
return rand_board
# --------------------------------------------------------------
# --------------------------------------------------------------
# Find index of value
def find_indexOf(board, value):
for i in range(len(board)):
for j in range(len(board[i])):
if board[i][j] == value:
return (i, j)
return None
# --------------------------------------------------------------
# --------------------------------------------------------------
# Now to find legal moves e.g. not out of bounds
# This method ifnds all the possible moves for the
# blank cell's current location. Menaing that
# they update with each move
# This method gets all possible moves of the empty cell
# and stores them in an array
def find_possible_moves(board):
# Location of empty slot
(i, j) = find_indexOf(board, '_')
# Possible moves array
moves = []
# Left
if j > 0 :
moves.append('left')
# Right
if j < MAX_COLUMN - 1 :
moves.append('right')
# Down
if i < MAX_ROW - 1 :
moves.append('down')
# Up
if i > 0:
moves.append('up')
return moves
# --------------------------------------------------------------
# --------------------------------------------------------------
# After getting all possible moves in an array
# this method picks one move randomly and executes
# it by calling the move method
def garble(board):
# Possible moves
moves = find_possible_moves(board)
# Choose one move from the possible moves options randomly
choice = moves[random.randint(0, len(moves) - 1)]
# Moving the tile according to random choice
return move(board, choice)
# --------------------------------------------------------------
# --------------------------------------------------------------
# Move a cell of given board to a given direction
def move(board, direction):
# Location of Empty slot
(ci, cj) = find_indexOf(board, '_')
board_dup = copy.deepcopy(board)
# Apply move
if direction == 'up':
# print("moving up")
cell_to_move = board_dup[ci-1][cj]
board_dup[ci][cj] = cell_to_move
board_dup[ci-1][cj] = "_"
elif direction == 'down':
# print("moving down")
cell_to_move = board_dup[ci+1][cj]
board_dup[ci][cj] = cell_to_move
board_dup[ci+1][cj] = "_"
elif direction =='right':
# print("moving right")
cell_to_move = board_dup[ci][cj+1]
board_dup[ci][cj] = cell_to_move
board_dup[ci][cj+1] = "_"
elif direction == 'left':
# print("moving left")
cell_to_move = board_dup[ci][cj-1]
board_dup[ci][cj] = cell_to_move
board_dup[ci][cj-1] = "_"
return board_dup
# --------------------------------------------------------------
# --------------------------------------------------------------
def calculate_h2(state):
# Calculates and return the h2 heuristic for a given state
# H2 calculates the sum of the Manhattan distances of all the tiles for the puzzle
# Manhattan distance is the sum of the absolute value of the x and y difference of the current tile position from its goal state position
state_dict = {}
goal_dict = {}
heuristic = 0
# Create dictionaries of the current state and goal state
for row_index, row in enumerate(state):
for col_index, element in enumerate(row):
state_dict[element] = (row_index, col_index)
for row_index, row in enumerate(goal):
for col_index, element in enumerate(row):
goal_dict[element] = (row_index, col_index)
for tile, position in state_dict.items():
# Do not count the distance of the blank
if tile == "_":
pass
else:
# Calculate heuristic as the Manhattan distance
goal_position = goal_dict[tile]
heuristic += (abs(position[0] - goal_position[0]) + abs(position[1] - goal_position[1]))
return heuristic
# --------------------------------------------------------------
# --------------------------------------------------------------
def success(board, node_dict, numOf_nodes_generated, print_solution=True):
# Once the solution has been found, prints the solution path and the length of the solution path
global solution_length_from_board
if len(node_dict) >= 1:
# Find the final node from where we found the solution
for node_num, node in node_dict.items():
if node["state"] == goal:
final_node = node_dict[node_num]
break
# Generate the solution path from the final node to the start node
solution_path = generate_solution_path(board, final_node, node_dict, path=[([["1", "2", "3","4"],["2", "3", "4","5"],["3", "4", "5","5"],["4", "5", "5","_"]], "goal")])
solution_length = len(solution_path) - 2
solution_length_from_board = solution_length
print(solution_length_from_board)
else:
solution_path = []
solution_length = 0
solution_path = solution_path
if print_solution:
# Display the length of solution and solution path
print("Solution found!")
print("Solution Length: ", solution_length)
# For the first 2 puzzles we use this part to print their whole sequence as puzzles
global CUR_PRINT
if (CUR_PRINT < MAX_PRINT):
x = len(solution_path)-1
while x > 0:
print_board(solution_path[x][0])
print("Move {:d}: ".format(len(solution_path)-(x+1)), "Empty tile moved: ",solution_path[x][1])
print("")
x -= 1
CUR_PRINT = CUR_PRINT + 1
print("Total nodes generated:", numOf_nodes_generated)
# we use this part for others which prints only their solution sequence in text
if(CUR_PRINT >= MAX_PRINT):
# The solution path goes from final to start node. To display sequence of actions, reverse the solution path
print("Solution Path", list(map(lambda x: x[1], solution_path[::-1])))
print("Total nodes generated:", numOf_nodes_generated)
# --------------------------------------------------------------
# --------------------------------------------------------------
def generate_solution_path(board, node, node_dict, path):
# Return the solution path for display from final (goal) state to starting state
# If the node is the root, return the path
if node["parent"] == "root":
# If root is found, add the node and then return
path.append((node["state"], node["action"]))
return path
else:
# If the node is not the root, add the state and action to the solution path
state = node["state"]
parent_state = node["parent"]
action = node["action"]
#score = node["h_score"]
path.append((state, action))
# Find the parent of the node and recurse
for node_num, expanded_node in node_dict.items():
if expanded_node["state"] == parent_state:
return generate_solution_path(board, expanded_node, node_dict, path)
def get_Sol_Len():
return solution_length_from_board
| 3.9375 | 4 |
src/tokenization/vocab.py | mahsaghn/BBCNews | 0 | 12769320 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from collections import Counter
import numpy as np
from itertools import chain
import json
import torch
from typing import List
import sentencepiece as spm
import os
def pad_sents(sents, pad_token):
sents_padded = []
maxlen = max([len(sent) for sent in sents])
sents_padded = [sent + (maxlen - len(sent))*[pad_token] for sent in sents]
sents_padded = list(np.array(sents_padded).T)
return sents_padded
def read_corpus(file_path, source, vocab_size=2500):
data = []
sp = spm.SentencePieceProcessor()
sp.load('{}.model'.format(source))
with open(file_path, 'r', encoding='utf8') as f:
for line in f:
subword_tokens = sp.encode_as_pieces(line)
if source == 'tgt':
subword_tokens = ['<s>'] + subword_tokens + ['</s>']
data.append(subword_tokens)
return data
class VocabEntry(object):
def __init__(self, word2id=None):
if word2id:
self.word2id = word2id
else:
self.word2id = dict()
self.word2id['<pad>'] = 0 # Pad Token
self.word2id['<s>'] = 1 # Start Token
self.word2id['</s>'] = 2 # End Token
self.word2id['<unk>'] = 3 # Unknown Token
self.unk_id = self.word2id['<unk>']
self.id2word = {v: k for k, v in self.word2id.items()}
def __getitem__(self, word):
if word in self.word2id:
return self.word2id[word]
return self.unk_id
def __contains__(self, word):
return word in self.word2id
def __setitem__(self, key, value):
raise ValueError('vocabulary is readonly')
def __len__(self):
return len(self.word2id)
def __repr__(self):
return 'Vocabulary[size=%d]' % len(self)
def id2word(self, wid):
return self.id2word[wid]
def add(self, word):
if word not in self:
wid = self.word2id[word] = len(self)
self.id2word[wid] = word
return wid
else:
return self[word]
def words2indices(self, sents):
if type(sents[0]) == list:
return [[self[w] for w in s] for s in sents]
else:
return [self[w] for w in sents]
def indices2words(self, word_ids):
return [self.id2word[w_id] for w_id in word_ids]
def to_input_tensor(self, sents: List[List[str]], device: torch.device) -> torch.Tensor:
word_ids = self.words2indices(sents)
sents_t = pad_sents(word_ids, self['<pad>'])
sents_var = torch.tensor(sents_t, dtype=torch.long, device=device)
return sents_var
@staticmethod
def from_corpus(corpus, size, freq_cutoff=2):
vocab_entry = VocabEntry()
word_freq = Counter(chain(*corpus))
valid_words = [w for w, v in word_freq.items() if v >= freq_cutoff]
print('number of word types: {}, number of word types w/ frequency >= {}: {}'
.format(len(word_freq), freq_cutoff, len(valid_words)))
top_k_words = sorted(valid_words, key=lambda w: word_freq[w], reverse=True)[:size]
for word in top_k_words:
vocab_entry.add(word)
return vocab_entry
@staticmethod
def from_subword_list(subword_list):
vocab_entry = VocabEntry()
for subword in subword_list:
vocab_entry.add(subword)
return vocab_entry
class Vocab(object):
def __init__(self, vocab: VocabEntry):
self.src = vocab
@staticmethod
def build(sents) -> 'Vocab':
print('initialize vocabulary ..')
src = VocabEntry.from_subword_list(sents)
return Vocab(src)
def save(self, file_path):
if not os.path.exists('models/tokenization'):
os.mkdir('models/tokenization')
with open(file_path, 'w') as f:
json.dump(dict(src_word2id=self.src.word2id), f, indent=2, ensure_ascii=False)
@staticmethod
def load(file_path):
entry = json.load(open(file_path, 'r'))
word2id = entry['word2id']
return Vocab(VocabEntry(word2id))
def __repr__(self):
return 'Vocab(source %d words)' % (len(self.src))
def get_vocab_list(type_tokens,file_path_src, source, vocab_size):
if type_tokens == 'word':
spm.SentencePieceTrainer.train(input=file_path_src, model_prefix=source, vocab_size=vocab_size,model_type='word',pad_id=0,unk_id=3,bos_id=-1) # train the spm model
else:
spm.SentencePieceTrainer.train(input=file_path_src, model_prefix=source, vocab_size=vocab_size,unk_id=3,bos_id=-1) # train the spm model
sp = spm.SentencePieceProcessor() # create an instance; this saves .model and .vocab files
sp.load('{}.model'.format(source)) # loads tgt.model or src.model
sp_list = [sp.id_to_piece(piece_id) for piece_id in range(sp.get_piece_size())] # this is the list of subwords
return sp_list
def generate_five_set(src_path,dest_path):
# save 5 shuffled file in temp directory
all_text = []
with open(src_path, 'r') as sent_file:
all_text = [text for text in sent_file]
sen_num = len(all_text)
train_num = int(0.8*sen_num)
all_text = np.array(all_text)
for i in range(1, 6):
np.random.shuffle(all_text)
with open(dest_path+'/sentences_train{}.txt'.format(i), 'a') as out_file:
out_file.writelines(all_text[:train_num])
with open(dest_path+'/sentences_dev{}.txt'.format(i), 'a') as out_file:
out_file.writelines(all_text[train_num:])
# def eval_dev(dest_path,source):
# sp = spm.SentencePieceProcessor() # create an instance; this saves .model and .vocab files
# sp.load('{}.model'.format(source)) # loads tgt.model or src.model
# sent_dev = []
# with open(dest_path, 'r') as outfile:
# sent_dev = [sent for sent in outfile]
# tokenized = sp.Encode(input=sent_dev, out_type=int)
# return tokenized
if __name__ == '__main__':
if not os.path.exists('data/dataset_noen.txt'):
with open('data/dataset_sentences.txt','r') as withen_file:
all_text = [txt.lower() for txt in withen_file]
en = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','t','u','s','v','w','x','y','z', '"', '"', '»', '«', '/']
print(len(en))
cleaned = []
for txt in all_text:
newtxt = txt
for e in en:
newtxt = newtxt.replace(e,'')
newtxt = newtxt.replace('▁','')
cleaned.append(newtxt)
with open('data/dataset_noen.txt', 'a') as cleaned_file:
cleaned_file.writelines(cleaned)
src = 'data/dataset_noen.txt'
dest = 'src/tokenization/working_dir'
vocab_sizes = [15000,10000,5000,1000]
if not os.path.exists('src/tokenization/working_dir'):
os.mkdir('src/tokenization/working_dir')
generate_five_set(src, dest)
if not os.path.exists('src/tokenization/working_dir/outs'):
os.mkdir('src/tokenization/working_dir/outs')
if not os.path.exists('src/tokenization/working_dir/words_outs'):
os.mkdir('src/tokenization/working_dir/words_outs')
type_tokens = 'word' #'word'/'subword'
out_dir_name = 'outs' if type_tokens == 'subword' else 'words_outs'
model_reports = []
for vocab_size in vocab_sizes:
for i in range(1, 6):
if not os.path.exists(dest + '/{}/{}_{}'.format(out_dir_name,i, vocab_size)):
os.mkdir(dest + '/{}/{}_{}'.format(out_dir_name,i, vocab_size))
sent_path_train = dest+'/sentences_train{}.txt'.format(i)
sent_path_dev = dest + '/sentences_dev{}.txt'.format(i)
sents = get_vocab_list(type_tokens,sent_path_train, source=dest+'/{}/{}_{}/src{}_{}'.format(out_dir_name,i, vocab_size,i, vocab_size), vocab_size=vocab_size)
vocab = Vocab.build(sents)
final_vocab_file = dest + '/{}/{}_{}/vocab_file.json'.format(out_dir_name,i, vocab_size)
vocab.save(final_vocab_file)
with open( 'src/tokenization/working_dir/sentences_dev{}.txt'.format(i), 'r') as dev_data_file:
dev_sents = [['▁'+de for de in d.split(' ') ] for d in dev_data_file]
dev_token = vocab.src.words2indices(dev_sents)
dev_token_np = []
for devtok in dev_token:
for tok in devtok:
dev_token_np.append(tok)
dev_token_np = np.array(dev_token_np)
unk_num = np.count_nonzero(dev_token_np == 3)
model_reports.append("-Vocab size:{}\ti:{}\tNum tokens= {}\tNum unks:{}\tunkp:{}\n"\
.format(vocab_size,i,dev_token_np.shape[0],100*unk_num,unk_num/dev_token_np.shape[0]))
with open(dest + '/{}/{}_{}/dev.json'.format(out_dir_name,i,vocab_size) , 'a') as devfile:
json.dump(dict(tokens=dev_token), devfile, ensure_ascii=False)
with open("reports/word2vec_{}.txt".format(type_tokens),'a') as report_file:
report_file.writelines(model_reports)
vocab_sizes = 10000
sents = get_vocab_list(type_tokens,src, source='models/tokenization/src{}'.format(type_tokens), vocab_size=vocab_size)
vocab = Vocab.build(sents)
final_vocab_file = 'models/tokenization/vocab_file_{}.json'.format(type_tokens)
vocab.save(final_vocab_file)
| 2.421875 | 2 |
slimevolleygym/__init__.py | mgoulao/2v2-Slime-Volleyball | 2 | 12769321 | import slimevolleygym.game_settings
import slimevolleygym.slimevolley
from slimevolleygym.slimevolley import *
| 1.15625 | 1 |
utils/optimizer.py | wufanyou/Traffic4Cast-2020-TLab | 3 | 12769322 | from apex.optimizers import FusedLAMB, FusedAdam
from torch.optim.lr_scheduler import LambdaLR
from torch.optim import AdamW, Adam, SGD
import math
# https://huggingface.co/transformers/_modules/transformers/optimization.html#get_linear_schedule_with_warmup
def get_linear_schedule_with_warmup(
optimizer, num_warmup_steps, num_training_steps, last_epoch=-1
):
"""
Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0,
after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(
0.0,
float(num_training_steps - current_step)
/ float(max(1, num_training_steps - num_warmup_steps)),
)
return LambdaLR(optimizer, lr_lambda, last_epoch)
def get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps: int,
num_training_steps: int,
num_cycles: float = 0.5,
last_epoch: int = -1,
):
"""
Create a schedule with a learning rate that decreases following the values of the cosine function between the
initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the
initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
num_cycles (:obj:`float`, `optional`, defaults to 0.5):
The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
following a half-cosine).
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
progress = float(current_step - num_warmup_steps) / float(
max(1, num_training_steps - num_warmup_steps)
)
return max(
0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))
)
return LambdaLR(optimizer, lr_lambda, last_epoch)
def get_exponent_schedule_with_warmup(
optimizer,
num_warmup_steps: int,
exponent: float = 1 - 2e-3,
step: int = 10,
last_epoch: int = -1,
):
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
else:
return exponent ** ((current_step - num_warmup_steps) // step)
return LambdaLR(optimizer, lr_lambda, last_epoch)
def get_optim(cfg, model, dataset_iter_num):
cfg = cfg.OPTIM
optim_name = cfg.NAME
optimizer = None
assert optim_name in ["FusedLAMB", "AdamW", "Adam", "SGD"], "optimizer not allowed"
parameters = filter(lambda p: p.requires_grad, model.parameters())
if optim_name == "FusedLAMB":
optimizer = FusedLAMB(parameters, lr=cfg.INIT_LR, eps=cfg.ADAM_EPSILON)
if optim_name == "AdamW":
optimizer = AdamW(parameters, lr=cfg.INIT_LR, eps=cfg.ADAM_EPSILON)
if optim_name == "Adam":
optimizer = Adam(parameters, lr=cfg.INIT_LR, eps=cfg.ADAM_EPSILON)
if optim_name == "SGD":
optimizer = SGD(parameters, lr=cfg.INIT_LR, momentum=cfg.SGD_MOMENTUM)
warmup_step = int(cfg.WARM_UP_EPOCH * dataset_iter_num)
max_step = cfg.MAX_EPOCH * dataset_iter_num
if cfg.USE_LR_SCHEDULER:
if cfg.LR_SCHEDULER_TYPE == "get_exponent_schedule_with_warmup":
scheduler = get_exponent_schedule_with_warmup(
optimizer, warmup_step, exponent=cfg.EXPONENT
)
else:
scheduler = globals()[cfg.LR_SCHEDULER_TYPE](
optimizer, warmup_step, max_step
)
else:
scheduler = None
return optimizer, scheduler
| 2.90625 | 3 |
3rdparty/packages/all.py | cspanier/shift | 2 | 12769323 | #!/usr/bin/env python3
import sys
from build import Builder
package_name = Builder.package_name_from_filename(__file__)
dependencies = ('zlib-1.2.11',
'bzip2-1.0.8',
'jpeg-9c',
'zstd-be3bd70',
'tiff-4.0.10',
'icu4c-65_1',
'boost_1_71_0')
def prepare(builder):
return True
def build(builder):
return True
def cleanup(builder):
return True
if __name__ == "__main__":
print('You must not call this script directly.')
sys.exit(1)
| 1.960938 | 2 |
main/mad_libs.py | TechnicallyMay/mad_libs_generator | 0 | 12769324 | <filename>main/mad_libs.py
import find_pos
from random import choice
from collections import defaultdict
class MadLibs():
def __init__(self, file_name):
self.file_name = "../data/" + file_name + ".txt"
self.text = self.get_text()
tagged_text = find_pos.tag_pos(self.text)
self.pos = self.find_important_pos(tagged_text)
def get_text(self):
with open(self.file_name, 'r') as f:
return f.read().replace("\n", " ")
def find_important_pos(self, tagged):
important = defaultdict(list)
for tag, words in tagged.items():
if tag == "NN":
key = "Singular Noun"
elif tag == "NNS":
key = "Plural Noun"
elif tag == "NNP" or tag == "NNPS":
key = "Proper Noun"
elif tag == "VBD" or tag == "VBN":
key = "Past Verb"
elif tag[0] == "V":
key = "Present Verb"
elif tag[0] == "J":
key = "Adjective"
else:
continue
for word in words:
important[key].append(word)
return(important)
def pick_word(self):
random_key = choice(list(self.pos.keys()))
random_word = choice(self.pos[random_key])
self.pos[random_key] = [word for word in self.pos[random_key]
if word != random_word]
if self.pos[random_key] == []:
del(self.pos[random_key])
return (random_key, random_word)
def replace_word(self, to_replace, input):
words = self.text.split()
for i, word in enumerate(words):
new_word = input
if word[0].isupper():
new_word = new_word.capitalize()
if word[-1] == ',':
word = word.replace(",", "")
new_word += ","
if word.lower() == to_replace:
words[i] = new_word
self.text = " ".join(words)
def print(self):
for word in self.text.split():
if word[0].isupper() and word != "I" and word != "I'm":
print('\n' + word, end = " ")
else:
print(word, end = " ")
| 3.203125 | 3 |
tools/spaceSwitcher/python/spaceswitcher/ui.py | koborit/SpaceSwitcherSample | 0 | 12769325 | """
MIT License
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Documentation
Sub module for UI implementation.
"""
from pymel.core import *
import maya.OpenMayaUI as OpenMayaUI
from Qt import QtCore, QtGui, QtWidgets
from Qt.QtCore import Slot
try:
from shiboken2 import wrapInstance
except:
from shiboken import wrapInstance
from . import core
class Ui_SpaceSwitcherWindow(object):
def setupUi(self, SpaceSwitcherWindow):
SpaceSwitcherWindow.setObjectName("SpaceSwitcherWindow")
SpaceSwitcherWindow.setWindowModality(QtCore.Qt.NonModal)
SpaceSwitcherWindow.resize(246, 256)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(SpaceSwitcherWindow.sizePolicy().hasHeightForWidth())
SpaceSwitcherWindow.setSizePolicy(sizePolicy)
SpaceSwitcherWindow.setMinimumSize(QtCore.QSize(246, 256))
SpaceSwitcherWindow.setMaximumSize(QtCore.QSize(246, 256))
SpaceSwitcherWindow.setWindowTitle("SpaceSwitcher")
SpaceSwitcherWindow.setWindowOpacity(1.0)
SpaceSwitcherWindow.setToolTip("")
SpaceSwitcherWindow.setTabShape(QtWidgets.QTabWidget.Rounded)
self.centralWidget = QtWidgets.QWidget(SpaceSwitcherWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralWidget.sizePolicy().hasHeightForWidth())
self.centralWidget.setSizePolicy(sizePolicy)
self.centralWidget.setMinimumSize(QtCore.QSize(0, 0))
self.centralWidget.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.centralWidget.setLayoutDirection(QtCore.Qt.LeftToRight)
self.centralWidget.setObjectName("centralWidget")
self.layout_centralWidget = QtWidgets.QHBoxLayout(self.centralWidget)
self.layout_centralWidget.setSpacing(2)
self.layout_centralWidget.setContentsMargins(2, 2, 2, 2)
self.layout_centralWidget.setObjectName("layout_centralWidget")
self.frame_Root = QtWidgets.QFrame(self.centralWidget)
self.frame_Root.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_Root.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_Root.setObjectName("frame_Root")
self.layout_Root = QtWidgets.QVBoxLayout(self.frame_Root)
self.layout_Root.setSpacing(2)
self.layout_Root.setContentsMargins(2, 2, 2, 2)
self.layout_Root.setObjectName("layout_Root")
self.frame_Parent = QtWidgets.QFrame(self.frame_Root)
self.frame_Parent.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_Parent.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_Parent.setObjectName("frame_Parent")
self.layout_Parent = QtWidgets.QVBoxLayout(self.frame_Parent)
self.layout_Parent.setSpacing(4)
self.layout_Parent.setContentsMargins(2, 2, 2, 2)
self.layout_Parent.setObjectName("layout_Parent")
self.frame_LabelAndButton = QtWidgets.QFrame(self.frame_Parent)
self.frame_LabelAndButton.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_LabelAndButton.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_LabelAndButton.setObjectName("frame_LabelAndButton")
self.layout_LabelAndButton = QtWidgets.QHBoxLayout(self.frame_LabelAndButton)
self.layout_LabelAndButton.setSpacing(2)
self.layout_LabelAndButton.setContentsMargins(0, 0, 0, 0)
self.layout_LabelAndButton.setObjectName("layout_LabelAndButton")
self.label_Parent = QtWidgets.QLabel(self.frame_LabelAndButton)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_Parent.sizePolicy().hasHeightForWidth())
self.label_Parent.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_Parent.setFont(font)
self.label_Parent.setText("Parent")
self.label_Parent.setObjectName("label_Parent")
self.layout_LabelAndButton.addWidget(self.label_Parent)
self.pushButton_SetParent = QtWidgets.QPushButton(self.frame_LabelAndButton)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_SetParent.sizePolicy().hasHeightForWidth())
self.pushButton_SetParent.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.pushButton_SetParent.setFont(font)
self.pushButton_SetParent.setText("Set")
self.pushButton_SetParent.setObjectName("pushButton_SetParent")
self.layout_LabelAndButton.addWidget(self.pushButton_SetParent)
self.pushButton_ClearParent = QtWidgets.QPushButton(self.frame_LabelAndButton)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_ClearParent.sizePolicy().hasHeightForWidth())
self.pushButton_ClearParent.setSizePolicy(sizePolicy)
self.pushButton_ClearParent.setMaximumSize(QtCore.QSize(52, 16777215))
self.pushButton_ClearParent.setText("Clear")
self.pushButton_ClearParent.setObjectName("pushButton_ClearParent")
self.layout_LabelAndButton.addWidget(self.pushButton_ClearParent)
self.layout_Parent.addWidget(self.frame_LabelAndButton)
self.lineEdit_Parent = QtWidgets.QLineEdit(self.frame_Parent)
self.lineEdit_Parent.setText("")
self.lineEdit_Parent.setObjectName("lineEdit_Parent")
self.layout_Parent.addWidget(self.lineEdit_Parent)
self.layout_Root.addWidget(self.frame_Parent)
self.frame_CreateConstraint = QtWidgets.QFrame(self.frame_Root)
self.frame_CreateConstraint.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_CreateConstraint.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_CreateConstraint.setObjectName("frame_CreateConstraint")
self.layout_CreateConstraint = QtWidgets.QVBoxLayout(self.frame_CreateConstraint)
self.layout_CreateConstraint.setSpacing(0)
self.layout_CreateConstraint.setContentsMargins(2, 2, 2, 2)
self.layout_CreateConstraint.setObjectName("layout_CreateConstraint")
self.label_CreateConstraint = QtWidgets.QLabel(self.frame_CreateConstraint)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_CreateConstraint.sizePolicy().hasHeightForWidth())
self.label_CreateConstraint.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_CreateConstraint.setFont(font)
self.label_CreateConstraint.setToolTip("Create constraints: Select nodes to be constrained")
self.label_CreateConstraint.setText("Create Constraint")
self.label_CreateConstraint.setObjectName("label_CreateConstraint")
self.layout_CreateConstraint.addWidget(self.label_CreateConstraint)
self.frame_TranslateCheckBoxes = QtWidgets.QFrame(self.frame_CreateConstraint)
self.frame_TranslateCheckBoxes.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_TranslateCheckBoxes.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_TranslateCheckBoxes.setObjectName("frame_TranslateCheckBoxes")
self.layout_TranslateCheckBoxes = QtWidgets.QHBoxLayout(self.frame_TranslateCheckBoxes)
self.layout_TranslateCheckBoxes.setSpacing(8)
self.layout_TranslateCheckBoxes.setContentsMargins(0, 6, 0, 0)
self.layout_TranslateCheckBoxes.setObjectName("layout_TranslateCheckBoxes")
self.label_Translate = QtWidgets.QLabel(self.frame_TranslateCheckBoxes)
self.label_Translate.setText("Translate")
self.label_Translate.setObjectName("label_Translate")
self.layout_TranslateCheckBoxes.addWidget(self.label_Translate)
self.checkBox_TranslateX = QtWidgets.QCheckBox(self.frame_TranslateCheckBoxes)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.checkBox_TranslateX.sizePolicy().hasHeightForWidth())
self.checkBox_TranslateX.setSizePolicy(sizePolicy)
self.checkBox_TranslateX.setText("X")
self.checkBox_TranslateX.setChecked(True)
self.checkBox_TranslateX.setObjectName("checkBox_TranslateX")
self.layout_TranslateCheckBoxes.addWidget(self.checkBox_TranslateX)
self.checkBox_TranslateY = QtWidgets.QCheckBox(self.frame_TranslateCheckBoxes)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.checkBox_TranslateY.sizePolicy().hasHeightForWidth())
self.checkBox_TranslateY.setSizePolicy(sizePolicy)
self.checkBox_TranslateY.setText("Y")
self.checkBox_TranslateY.setChecked(True)
self.checkBox_TranslateY.setObjectName("checkBox_TranslateY")
self.layout_TranslateCheckBoxes.addWidget(self.checkBox_TranslateY)
self.checkBox_TranslateZ = QtWidgets.QCheckBox(self.frame_TranslateCheckBoxes)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.checkBox_TranslateZ.sizePolicy().hasHeightForWidth())
self.checkBox_TranslateZ.setSizePolicy(sizePolicy)
self.checkBox_TranslateZ.setText("Z")
self.checkBox_TranslateZ.setChecked(True)
self.checkBox_TranslateZ.setObjectName("checkBox_TranslateZ")
self.layout_TranslateCheckBoxes.addWidget(self.checkBox_TranslateZ)
self.layout_CreateConstraint.addWidget(self.frame_TranslateCheckBoxes)
self.frame_RotateCheckBoxes = QtWidgets.QFrame(self.frame_CreateConstraint)
self.frame_RotateCheckBoxes.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_RotateCheckBoxes.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_RotateCheckBoxes.setObjectName("frame_RotateCheckBoxes")
self.layout_RotateCheckBoxes = QtWidgets.QHBoxLayout(self.frame_RotateCheckBoxes)
self.layout_RotateCheckBoxes.setSpacing(8)
self.layout_RotateCheckBoxes.setContentsMargins(0, 0, 0, 0)
self.layout_RotateCheckBoxes.setObjectName("layout_RotateCheckBoxes")
self.label_Rotate = QtWidgets.QLabel(self.frame_RotateCheckBoxes)
self.label_Rotate.setText("Rotate")
self.label_Rotate.setObjectName("label_Rotate")
self.layout_RotateCheckBoxes.addWidget(self.label_Rotate)
self.checkBox_RotateX = QtWidgets.QCheckBox(self.frame_RotateCheckBoxes)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.checkBox_RotateX.sizePolicy().hasHeightForWidth())
self.checkBox_RotateX.setSizePolicy(sizePolicy)
self.checkBox_RotateX.setText("X")
self.checkBox_RotateX.setChecked(True)
self.checkBox_RotateX.setObjectName("checkBox_RotateX")
self.layout_RotateCheckBoxes.addWidget(self.checkBox_RotateX)
self.checkBox_RotateY = QtWidgets.QCheckBox(self.frame_RotateCheckBoxes)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.checkBox_RotateY.sizePolicy().hasHeightForWidth())
self.checkBox_RotateY.setSizePolicy(sizePolicy)
self.checkBox_RotateY.setText("Y")
self.checkBox_RotateY.setChecked(True)
self.checkBox_RotateY.setObjectName("checkBox_RotateY")
self.layout_RotateCheckBoxes.addWidget(self.checkBox_RotateY)
self.checkBox_RotateZ = QtWidgets.QCheckBox(self.frame_RotateCheckBoxes)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.checkBox_RotateZ.sizePolicy().hasHeightForWidth())
self.checkBox_RotateZ.setSizePolicy(sizePolicy)
self.checkBox_RotateZ.setText("Z")
self.checkBox_RotateZ.setChecked(True)
self.checkBox_RotateZ.setObjectName("checkBox_RotateZ")
self.layout_RotateCheckBoxes.addWidget(self.checkBox_RotateZ)
self.layout_CreateConstraint.addWidget(self.frame_RotateCheckBoxes)
self.frame_CreateConstraintButtons = QtWidgets.QFrame(self.frame_CreateConstraint)
self.frame_CreateConstraintButtons.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_CreateConstraintButtons.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_CreateConstraintButtons.setObjectName("frame_CreateConstraintButtons")
self.layout_CreateConstraintButtons = QtWidgets.QHBoxLayout(self.frame_CreateConstraintButtons)
self.layout_CreateConstraintButtons.setSpacing(2)
self.layout_CreateConstraintButtons.setContentsMargins(0, 0, 0, 0)
self.layout_CreateConstraintButtons.setObjectName("layout_CreateConstraintButtons")
self.pushButton_CreateConstraint = QtWidgets.QPushButton(self.frame_CreateConstraintButtons)
self.pushButton_CreateConstraint.setToolTip("")
self.pushButton_CreateConstraint.setText("Create")
self.pushButton_CreateConstraint.setObjectName("pushButton_CreateConstraint")
self.layout_CreateConstraintButtons.addWidget(self.pushButton_CreateConstraint)
self.pushButton_CreateAndBakeConstraint = QtWidgets.QPushButton(self.frame_CreateConstraintButtons)
self.pushButton_CreateAndBakeConstraint.setText("Create and Bake")
self.pushButton_CreateAndBakeConstraint.setObjectName("pushButton_CreateAndBakeConstraint")
self.layout_CreateConstraintButtons.addWidget(self.pushButton_CreateAndBakeConstraint)
self.layout_CreateConstraint.addWidget(self.frame_CreateConstraintButtons)
self.layout_Root.addWidget(self.frame_CreateConstraint)
self.frame_DeleteConstraint = QtWidgets.QFrame(self.frame_Root)
self.frame_DeleteConstraint.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_DeleteConstraint.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_DeleteConstraint.setObjectName("frame_DeleteConstraint")
self.layout_DeleteConstraint = QtWidgets.QVBoxLayout(self.frame_DeleteConstraint)
self.layout_DeleteConstraint.setSpacing(0)
self.layout_DeleteConstraint.setContentsMargins(2, 2, 2, 2)
self.layout_DeleteConstraint.setObjectName("layout_DeleteConstraint")
self.label_DeleteConstraint = QtWidgets.QLabel(self.frame_DeleteConstraint)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_DeleteConstraint.sizePolicy().hasHeightForWidth())
self.label_DeleteConstraint.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_DeleteConstraint.setFont(font)
self.label_DeleteConstraint.setToolTip("Delete constraints: Select constraining locators")
self.label_DeleteConstraint.setText("Delete Constraint")
self.label_DeleteConstraint.setObjectName("label_DeleteConstraint")
self.layout_DeleteConstraint.addWidget(self.label_DeleteConstraint)
self.frame_DeleteConstraintButtons = QtWidgets.QFrame(self.frame_DeleteConstraint)
self.frame_DeleteConstraintButtons.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_DeleteConstraintButtons.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_DeleteConstraintButtons.setObjectName("frame_DeleteConstraintButtons")
self.layout_DeleteConstraintButtons = QtWidgets.QHBoxLayout(self.frame_DeleteConstraintButtons)
self.layout_DeleteConstraintButtons.setSpacing(2)
self.layout_DeleteConstraintButtons.setContentsMargins(0, 4, 0, 0)
self.layout_DeleteConstraintButtons.setObjectName("layout_DeleteConstraintButtons")
self.pushButton_DeleteConstraint = QtWidgets.QPushButton(self.frame_DeleteConstraintButtons)
self.pushButton_DeleteConstraint.setToolTip("")
self.pushButton_DeleteConstraint.setText("Delete")
self.pushButton_DeleteConstraint.setObjectName("pushButton_DeleteConstraint")
self.layout_DeleteConstraintButtons.addWidget(self.pushButton_DeleteConstraint)
self.pushButton_BakeAndDeleteConstraint = QtWidgets.QPushButton(self.frame_DeleteConstraintButtons)
self.pushButton_BakeAndDeleteConstraint.setText("Bake and Delete")
self.pushButton_BakeAndDeleteConstraint.setObjectName("pushButton_BakeAndDeleteConstraint")
self.layout_DeleteConstraintButtons.addWidget(self.pushButton_BakeAndDeleteConstraint)
self.layout_DeleteConstraint.addWidget(self.frame_DeleteConstraintButtons)
self.layout_Root.addWidget(self.frame_DeleteConstraint)
self.frame_BakeRange = QtWidgets.QFrame(self.frame_Root)
self.frame_BakeRange.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_BakeRange.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_BakeRange.setObjectName("frame_BakeRange")
self.layout_BakeRange = QtWidgets.QVBoxLayout(self.frame_BakeRange)
self.layout_BakeRange.setSpacing(0)
self.layout_BakeRange.setContentsMargins(2, 2, 2, 2)
self.layout_BakeRange.setObjectName("layout_BakeRange")
self.frame_BakeRangeTop = QtWidgets.QFrame(self.frame_BakeRange)
self.frame_BakeRangeTop.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_BakeRangeTop.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_BakeRangeTop.setObjectName("frame_BakeRangeTop")
self.layout_BakeRangeTop = QtWidgets.QHBoxLayout(self.frame_BakeRangeTop)
self.layout_BakeRangeTop.setSpacing(0)
self.layout_BakeRangeTop.setContentsMargins(0, 0, 0, 0)
self.layout_BakeRangeTop.setObjectName("layout_BakeRangeTop")
self.label_BakeRange = QtWidgets.QLabel(self.frame_BakeRangeTop)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_BakeRange.sizePolicy().hasHeightForWidth())
self.label_BakeRange.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setWeight(75)
font.setBold(True)
self.label_BakeRange.setFont(font)
self.label_BakeRange.setText("Bake Range")
self.label_BakeRange.setObjectName("label_BakeRange")
self.layout_BakeRangeTop.addWidget(self.label_BakeRange)
self.pushButton_SetFromTimeline = QtWidgets.QPushButton(self.frame_BakeRangeTop)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_SetFromTimeline.sizePolicy().hasHeightForWidth())
self.pushButton_SetFromTimeline.setSizePolicy(sizePolicy)
self.pushButton_SetFromTimeline.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.pushButton_SetFromTimeline.setText("Set from timeline")
self.pushButton_SetFromTimeline.setObjectName("pushButton_SetFromTimeline")
self.layout_BakeRangeTop.addWidget(self.pushButton_SetFromTimeline)
self.layout_BakeRange.addWidget(self.frame_BakeRangeTop)
self.frame_BakeRangeSpinBoxes = QtWidgets.QFrame(self.frame_BakeRange)
self.frame_BakeRangeSpinBoxes.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_BakeRangeSpinBoxes.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_BakeRangeSpinBoxes.setObjectName("frame_BakeRangeSpinBoxes")
self.layout_BakeRangeSpinBoxes = QtWidgets.QHBoxLayout(self.frame_BakeRangeSpinBoxes)
self.layout_BakeRangeSpinBoxes.setSpacing(2)
self.layout_BakeRangeSpinBoxes.setContentsMargins(0, 4, 0, 0)
self.layout_BakeRangeSpinBoxes.setObjectName("layout_BakeRangeSpinBoxes")
self.spinBox_BakeStart = QtWidgets.QSpinBox(self.frame_BakeRangeSpinBoxes)
self.spinBox_BakeStart.setAccelerated(True)
self.spinBox_BakeStart.setMinimum(-16777215)
self.spinBox_BakeStart.setMaximum(16777215)
self.spinBox_BakeStart.setProperty("value", 1)
self.spinBox_BakeStart.setObjectName("spinBox_BakeStart")
self.layout_BakeRangeSpinBoxes.addWidget(self.spinBox_BakeStart)
self.spinBox_BakeEnd = QtWidgets.QSpinBox(self.frame_BakeRangeSpinBoxes)
self.spinBox_BakeEnd.setAccelerated(True)
self.spinBox_BakeEnd.setMinimum(-16777215)
self.spinBox_BakeEnd.setMaximum(16777215)
self.spinBox_BakeEnd.setProperty("value", 24)
self.spinBox_BakeEnd.setObjectName("spinBox_BakeEnd")
self.layout_BakeRangeSpinBoxes.addWidget(self.spinBox_BakeEnd)
self.layout_BakeRange.addWidget(self.frame_BakeRangeSpinBoxes)
self.layout_Root.addWidget(self.frame_BakeRange)
self.layout_centralWidget.addWidget(self.frame_Root)
SpaceSwitcherWindow.setCentralWidget(self.centralWidget)
self.retranslateUi(SpaceSwitcherWindow)
QtCore.QMetaObject.connectSlotsByName(SpaceSwitcherWindow)
def retranslateUi(self, SpaceSwitcherWindow):
pass
class ControlMainWindow(QtWidgets.QMainWindow):
def __init__(self, window_title, parent=None):
super(ControlMainWindow, self).__init__(parent)
self.window_title = window_title
self.ui = Ui_SpaceSwitcherWindow()
self.ui.setupUi(self)
# signal - slot connections
self.ui.pushButton_SetParent.clicked.connect(self.setParent_cliciked)
self.ui.pushButton_ClearParent.clicked.connect(self.clearParent_clicked)
self.ui.pushButton_CreateConstraint.clicked.connect(self.createConstraint_clicked)
self.ui.pushButton_CreateAndBakeConstraint.clicked.connect(self.createAndBakeConstraint_clicked)
self.ui.pushButton_DeleteConstraint.clicked.connect(self.deleteConstraint_clicked)
self.ui.pushButton_BakeAndDeleteConstraint.clicked.connect(self.bakeAndDeleteConstraint_clicked)
self.ui.pushButton_SetFromTimeline.clicked.connect(self.setBakeRange_clicked)
#
# UI query methods
#
def get_parentname(self):
return self.ui.lineEdit_Parent.text()
def get_translate_switches(self):
return (self.ui.checkBox_TranslateX.isChecked(),
self.ui.checkBox_TranslateY.isChecked(),
self.ui.checkBox_TranslateZ.isChecked())
def get_rotate_switches(self):
return (self.ui.checkBox_RotateX.isChecked(),
self.ui.checkBox_RotateY.isChecked(),
self.ui.checkBox_RotateZ.isChecked())
def get_bakestart(self):
return self.ui.spinBox_BakeStart.value()
def get_bakeend(self):
return self.ui.spinBox_BakeEnd.value()
#
# UI edit methods
#
def set_parentname(self, name=None):
_name = name
if name is None:
selections = ls(selection=True)
if selections:
_name = selections[0].name()
if _name is not None:
self.ui.lineEdit_Parent.setText(_name)
def set_bakestart(self, value):
self.ui.spinBox_BakeStart.setValue(value)
def set_bakeend(self, value):
self.ui.spinBox_BakeEnd.setValue(value)
#
# UI update methods
#
def update_bakerange(self):
self.set_bakestart(playbackOptions(q=1, minTime=True))
self.set_bakeend(playbackOptions(q=1, maxTime=True))
def update_all(self):
self.update_bakerange()
#
# slot callback functions
#
@Slot()
def setParent_cliciked(self):
self.set_parentname()
@Slot()
def clearParent_clicked(self):
self.set_parentname(name = '')
@Slot()
def createConstraint_clicked(self):
undoInfo(openChunk=True)
parent = None
try:
parent = PyNode(self.get_parentname())
except:
pass
try:
core.switch_space(None, parent,
translate_switches=self.get_translate_switches(),
rotate_switches=self.get_rotate_switches())
except Exception as err:
print(str(err))
finally:
undoInfo(closeChunk=True)
@Slot()
def createAndBakeConstraint_clicked(self):
undoInfo(openChunk=True)
parent = None
try:
parent = PyNode(self.get_parentname())
except:
pass
try:
core.switch_space(None, parent, self.get_translate_switches(), self.get_rotate_switches(),
bake=True, start=self.get_bakestart(), end=self.get_bakeend())
except Exception as err:
print(str(err))
finally:
undoInfo(closeChunk=True)
@Slot()
def deleteConstraint_clicked(self):
undoInfo(openChunk=True)
try:
core.delete_switch_space_constraints()
except Exception as err:
print(str(err))
finally:
undoInfo(closeChunk=True)
@Slot()
def bakeAndDeleteConstraint_clicked(self):
undoInfo(openChunk=True)
try:
core.delete_switch_space_constraints(bake=True, start=self.get_bakestart(), end=self.get_bakeend())
except Exception as err:
print(str(err))
finally:
undoInfo(closeChunk=True)
@Slot()
def setBakeRange_clicked(self):
self.update_bakerange()
def launch_ui(window_title='SpaceSwitcher'):
existing_win_ptr = OpenMayaUI.MQtUtil.findWindow('SpaceSwitcherWindow')
if existing_win_ptr:
existing_win = wrapInstance(long(existing_win_ptr), QtWidgets.QMainWindow)
if existing_win:
if existing_win.windowTitle() == window_title:
existing_win.close()
main_win = ControlMainWindow(window_title,
parent=wrapInstance(long(OpenMayaUI.MQtUtil.mainWindow()), QtWidgets.QWidget))
main_win.setAttribute(QtCore.Qt.WA_DeleteOnClose)
main_win.setWindowTitle(window_title)
main_win.update_all()
main_win.show()
| 1.578125 | 2 |
test/check-gradient.sikuli/check-gradient.py | jsmaniac/travis-os | 0 | 12769326 | <filename>test/check-gradient.sikuli/check-gradient.py
wait("1529963334209.png", 10)
| 0.789063 | 1 |
pypacker/interceptor.py | dschoonwinkel/pypacker | 0 | 12769327 | <reponame>dschoonwinkel/pypacker<filename>pypacker/interceptor.py<gh_stars>0
"""
Packet interceptor using NFQueue
Requirements:
- CPython
- NFQUEUE target support in Kernel
- iptables
"""
import ctypes
from ctypes import util as utils
import socket
from socket import ntohl
import os
import threading
import logging
logger = logging.getLogger("pypacker")
MSG_NO_NFQUEUE = """Could not find netfilter_queue library. Make sure that...
- NFQUEUE target is supported by your Kernel:
Networking Options
Network packet filtering ...
Core Netfilter ...
NFQUEUE target
- iptables is installed and callable via "iptables"
- NFQUEUE related rulez can be added eg "iptables -I INPUT 1 -j NFQUEUE --queue-num 0"
"""
netfilter = None
try:
# load library
nflib = utils.find_library("netfilter_queue")
if nflib is None:
raise RuntimeError()
netfilter = ctypes.cdll.LoadLibrary(nflib)
except RuntimeError:
logger.warning(MSG_NO_NFQUEUE)
uid = os.getuid()
if uid != 0:
print("you must be root!")
exit()
class NfqQHandler(ctypes.Structure):
pass
class NfnlHandle(ctypes.Structure):
pass
nfnl_callback_ctype = ctypes.CFUNCTYPE(
ctypes.c_int, *(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p)
)
class NfnlCallback(ctypes.Structure):
_fileds_ = [("call", nfnl_callback_ctype),
("data", ctypes.c_void_p),
("attr_count", ctypes.c_uint16)
]
class NfnlSubsysHandle(ctypes.Structure):
_fields_ = [("nfilter_handler", ctypes.POINTER(NfnlHandle)),
("subscriptions", ctypes.c_uint32),
("subsys_id", ctypes.c_uint8),
("cb_count", ctypes.c_uint8),
("callback", ctypes.POINTER(NfnlCallback))
]
class NfqHandle(ctypes.Structure):
_fields_ = [("nfnlh", ctypes.POINTER(NfnlHandle)),
("nfnlssh", ctypes.POINTER(NfnlSubsysHandle)),
("qh_list", ctypes.POINTER(NfqQHandler))
]
class NfqQHandle(ctypes.Structure):
_fields_ = [("next", ctypes.POINTER(NfqQHandler)),
("h", ctypes.POINTER(NfqHandle)),
("id", ctypes.c_uint16),
("cb", ctypes.POINTER(NfnlHandle)),
("data", ctypes.c_void_p)
]
class NfqData(ctypes.Structure):
_fields_ = [("data", ctypes.POINTER(ctypes.c_void_p))]
class NfqnlMsgPacketHw(ctypes.Structure):
_fields_ = [("hw_addrlen", ctypes.c_uint16),
("_pad", ctypes.c_uint16),
#############################
("hw_addr", ctypes.c_uint8 * 8)]
class NfqnlMsgPacketHdr(ctypes.Structure):
_fields_ = [("packet_id", ctypes.c_uint32),
("hw_protocol", ctypes.c_uint16),
("hook", ctypes.c_uint8)
]
class NfnlHandler(ctypes.Structure):
_fields_ = [("fd", ctypes.c_int),
("subscriptions", ctypes.c_uint32),
("seq", ctypes.c_uint32),
("dump", ctypes.c_uint32),
("rcv_buffer_size", ctypes.c_uint32),
#####################################
("local", ctypes.c_void_p),
("peer", ctypes.c_void_p),
("last_nlhdr", ctypes.c_void_p),
("subsys", ctypes.c_void_p)
]
class NlifHandle(ctypes.Structure):
_fields_ = [("ifindex_max", ctypes.c_void_p),
("rtnl_handle", ctypes.c_void_p),
("ifadd_handler", ctypes.c_void_p),
("ifdel_handler", ctypes.c_void_p)
]
class Timeval(ctypes.Structure):
_fields_ = [("tv_sec", ctypes.c_long),
("tv_usec", ctypes.c_long)]
# Return netfilter netlink handler
nfnlh = netfilter.nfq_nfnlh
nfnlh.restype = ctypes.POINTER(NfnlHandle)
nfnlh.argtypes = ctypes.POINTER(NfqHandle),
# Return a file descriptor for the netlink connection associated with the
# given queue connection handle.
nfq_fd = netfilter.nfnl_fd
nfq_fd.restype = ctypes.c_int
nfq_fd.argtypes = ctypes.POINTER(NfnlHandle),
# This function obtains a netfilter queue connection handle
ll_open_queue = netfilter.nfq_open
ll_open_queue.restype = ctypes.POINTER(NfqHandle)
# Open a nfqueue handler from a existing nfnetlink handler.
# Not implemented in this wrapper.
#open_nfnl = netfilter.nfq_open_nfnl
#open_nfnl.restype = ctypes.POINTER(NfqHandle)
#open_nfnl.argtypes = ctypes.POINTER(NfnlHandle),
# This function closes the nfqueue handler and free associated resources.
close_queue = netfilter.nfq_close
close_queue.restype = ctypes.c_int
close_queue.argtypes = ctypes.POINTER(NfqHandle),
# Bind a nfqueue handler to a given protocol family.
bind_pf = netfilter.nfq_bind_pf
bind_pf.restype = ctypes.c_int
bind_pf.argtypes = ctypes.POINTER(NfqHandle), ctypes.c_uint16
# Unbind nfqueue handler from a protocol family.
unbind_pf = netfilter.nfq_unbind_pf
unbind_pf.restype = ctypes.c_int
unbind_pf.argtypes = ctypes.POINTER(NfqHandle), ctypes.c_uint16
# Creates a new queue handle, and returns it.
create_queue = netfilter.nfq_create_queue
create_queue.restype = ctypes.POINTER(NfqQHandler)
create_queue.argtypes = ctypes.POINTER(NfqHandle), ctypes.c_uint16, ctypes.c_void_p, ctypes.c_void_p
# Removes the binding for the specified queue handle.
destroy_queue = netfilter.nfq_destroy_queue
destroy_queue.restype = ctypes.c_int
destroy_queue.argtypes = ctypes.POINTER(NfqQHandler),
# Triggers an associated callback for the given packet received from the queue.
handle_packet = netfilter.nfq_handle_packet
handle_packet.restype = ctypes.c_int
handle_packet.argtypes = ctypes.POINTER(NfqHandle), ctypes.c_char_p, ctypes.c_int
# nfqnl_config_mode
NFQNL_COPY_NONE, NFQNL_COPY_META, NFQNL_COPY_PACKET = 0, 1, 2
# Sets the amount of data to be copied to userspace for each packet queued
# to the given queue.
#
# NFQNL_COPY_NONE - do not copy any data
# NFQNL_COPY_META - copy only packet metadata
# NFQNL_COPY_PACKET - copy entire packet
set_mode = netfilter.nfq_set_mode
set_mode.restype = ctypes.c_int
set_mode.argtypes = ctypes.POINTER(NfqQHandler), ctypes.c_uint8, ctypes.c_uint
# Sets the size of the queue in kernel. This fixes the maximum number
# of packets the kernel will store before internally before dropping
# upcoming packets.
set_queue_maxlen = netfilter.nfq_set_queue_maxlen
set_queue_maxlen.restype = ctypes.c_int
set_queue_maxlen.argtypes = ctypes.POINTER(NfqQHandler), ctypes.c_uint32
# Responses from hook functions.
NF_DROP, NF_ACCEPT, NF_STOLEN = 0, 1, 2
NF_QUEUE, NF_REPEAT, NF_STOP = 3, 4, 5
NF_MAX_VERDICT = NF_STOP
# Notifies netfilter of the userspace verdict for the given packet. Every
# queued packet _must_ have a verdict specified by userspace, either by
# calling this function, or by calling the nfq_set_verdict_mark() function.
# NF_DROP - Drop packet
# NF_ACCEPT - Accept packet
# NF_STOLEN - Don't continue to process the packet and not deallocate it.
# NF_QUEUE - Enqueue the packet
# NF_REPEAT - Handle the same packet
# NF_STOP -
# NF_MAX_VERDICT -
set_verdict = netfilter.nfq_set_verdict
set_verdict.restype = ctypes.c_int
set_verdict.argtypes = ctypes.POINTER(NfqQHandler), ctypes.c_uint32, ctypes.c_uint32,\
ctypes.c_uint32, ctypes.c_char_p
# Like set_verdict, but you can set the mark.
set_verdict_mark = netfilter.nfq_set_verdict_mark
set_verdict_mark.restype = ctypes.c_int
set_verdict_mark.argtypes = ctypes.POINTER(NfqQHandler), ctypes.c_uint32, ctypes.c_uint32,\
ctypes.c_uint32, ctypes.c_uint32, ctypes.c_char_p
# Return the metaheader that wraps the packet.
get_msg_packet_hdr = netfilter.nfq_get_msg_packet_hdr
get_msg_packet_hdr.restype = ctypes.POINTER(NfqnlMsgPacketHdr)
get_msg_packet_hdr.argtypes = ctypes.POINTER(NfqData),
# Return the netfilter mark currently assigned to the given queued packet.
get_nfmark = netfilter.nfq_get_nfmark
get_nfmark.restype = ctypes.c_uint32
get_nfmark.argtypes = ctypes.POINTER(NfqData),
# Retrieves the received timestamp when the given queued packet.
get_timestamp = netfilter.nfq_get_timestamp
get_timestamp.restype = ctypes.c_int
get_timestamp.argtypes = ctypes.POINTER(NfqData), ctypes.POINTER(Timeval)
# Return the index of the device the queued packet was received via. If the
# returned index is 0, the packet was locally generated or the input
# interface is not known.
get_indev = netfilter.nfq_get_indev
get_indev.restype = ctypes.c_uint32
get_indev.argtypes = ctypes.POINTER(NfqData),
# Return the index of the physical device the queued packet was received via.
# If the returned index is 0, the packet was locally generated or the
# physical input interface is no longer known.
get_physindev = netfilter.nfq_get_physindev
get_physindev.restype = ctypes.c_uint32
get_physindev.argtypes = ctypes.POINTER(NfqData),
# Return the index of the device the queued packet will be sent out.
get_outdev = netfilter.nfq_get_outdev
get_outdev.restype = ctypes.c_uint32
get_outdev.argtypes = ctypes.POINTER(NfqData),
# Return The index of physical interface that the packet output will be routed out
get_physoutdev = netfilter.nfq_get_physoutdev
get_physoutdev.restype = ctypes.c_uint32
get_physoutdev.argtypes = ctypes.POINTER(NfqData),
##################################################################
# Not implemented yet.
##################################################################
#get_indev_name = netfilter.nfq_get_indev_name
#get_indev_name.restype = ctypes.c_int
#get_indev_name.argtypes = ctypes.POINTER(NlifHandle), ctypes.POINTER(NfqData), ctypes.c_void_p
#def test_get_indev_name(nfa):
# ptr_name = ctypes.c_void_p(0)
# nlif = NlifHandle()
# get_indev_name(ctypes.byref(nlif), nfa, ptr_name)
# print(ptr_name)
#get_physindev_name = netfilter.nfq_get_physindev_name
#get_physindev_name.restype = ctypes.c_int
#get_physindev_name.argtypes = ctypes.POINTER(NlifHandle), ctypes.POINTER(NfqData), ctypes.c_char_p
########
#get_outdev_name = netfilter.nfq_get_outdev_name
#get_outdev_name.restype = ctypes.c_int
#get_outdev_name.argtypes = ctypes.POINTER(NlifHandle), ctypes.POINTER(NfqData), ctypes.c_char_p
########
#get_physoutdev_name = netfilter.nfq_get_physoutdev_name
#get_physoutdev_name.restype = ctypes.c_int
#get_physoutdev_name.argtypes = ctypes.POINTER(NlifHandle), ctypes.POINTER(NfqData), ctypes.c_char_p
########
# Retrieves the hardware address associated with the given queued packet.
get_packet_hw = netfilter.nfq_get_packet_hw
get_packet_hw.restype = ctypes.POINTER(NfqnlMsgPacketHw)
get_packet_hw.argtypes = ctypes.POINTER(NfqData),
# Retrieve the payload for a queued packet.
get_payload = netfilter.nfq_get_payload
get_payload.restype = ctypes.c_int
get_payload.argtypes = ctypes.POINTER(NfqData), ctypes.POINTER(ctypes.c_void_p)
HANDLER = ctypes.CFUNCTYPE(
#(struct NfqQHandler *qh, struct nfgenmsg *nfmsg, struct NfqData *nfa, void *data)
None, *(ctypes.POINTER(NfqQHandler), ctypes.c_void_p, ctypes.POINTER(NfqData), ctypes.c_void_p)
)
def open_queue():
handler = ll_open_queue()
assert handler is not None, "can't open the queue"
return handler
def get_full_payload(nfa, ptr_packet):
len_recv = get_payload(nfa, ctypes.byref(ptr_packet))
data = ctypes.string_at(ptr_packet, len_recv)
return len_recv, data
def get_full_msg_packet_hdr(nfa):
pkg_hdr = get_msg_packet_hdr(nfa)
return {"packet_id": ntohl(pkg_hdr.contents.packet_id),
"hw_protocol": ntohl(pkg_hdr.contents.hw_protocol),
"hook": pkg_hdr.contents.hook}
def get_packet_id(nfa):
pkg_hdr = get_msg_packet_hdr(nfa)
return ntohl(pkg_hdr.contents.packet_id)
def set_pyverdict(queue_handle, packet_id, verdict, buffer_len, buffer):
set_verdict(queue_handle, packet_id, verdict, buffer_len, ctypes.c_char_p(buffer))
def get_pytimestamp(nfa):
mtime = Timeval()
get_timestamp(nfa, ctypes.byref(mtime))
return mtime.tv_sec, mtime.tv_usec
class Interceptor(object):
"""
Packet interceptor. Allows MITM and filtering.
"""
def __init__(self, verdict_callback):
"""
verdict_callback -- callback with this signature:
callback(data): data, verdict
"""
self._verdict_cb = verdict_callback
#self._packet_creation_cb = packet_creation_callback
self._packet_ptr = ctypes.c_void_p(0)
ptr_packet = ctypes.c_void_p(0)
def verdict_callback_ind(queue_handle, nfmsg, nfa, data):
packet_id = get_packet_id(nfa)
len_recv, data = get_full_payload(nfa, ptr_packet)
data_ret, verdict = self._verdict_cb(data)
set_pyverdict(queue_handle, packet_id, verdict, len(data_ret), data_ret)
self._c_handler = HANDLER(verdict_callback_ind)
self._queue = None
self._nfq_handle = None
self._socket = None
self._is_running = False
self._cycler_thread = None
@staticmethod
def verdict_cycler(obj):
# logger.debug("verdict_cycler starting")
recv = obj._socket.recv
nfq_handle = obj._nfq_handle
try:
while obj._is_running:
bts = recv(65535)
# logger.debug("got bytes: %r" % bts)
handle_packet(nfq_handle, bts, 65535)
except Exception as ex:
# logger.warning("Exception while reading: %r", ex)
pass
finally:
obj.stop()
def start(self, queue_id=0):
if self._is_running:
return
# logger.debug("starting interceptor")
self._nfq_handle = open_queue()
unbind_pf(self._nfq_handle, socket.AF_INET)
bind_pf(self._nfq_handle, socket.AF_INET)
self._queue = create_queue(self._nfq_handle, queue_id, self._c_handler, None)
set_mode(self._queue, NFQNL_COPY_PACKET, 0xffff)
nf = nfnlh(self._nfq_handle)
fd = nfq_fd(nf)
# fd,, family, sockettype
self._socket = socket.fromfd(fd, 0, 0)
self._cycler_thread = threading.Thread(
target=Interceptor.verdict_cycler,
args=[self])
self._is_running = True
self._cycler_thread.start()
def stop(self):
if not self._is_running:
return
# logger.debug("stopping interceptor")
self._is_running = False
destroy_queue(self._queue)
close_queue(self._nfqh)
| 2.328125 | 2 |
antidot/connector/generic/decorators.py | antidot/html-connector | 3 | 12769328 | <filename>antidot/connector/generic/decorators.py
import logging
from typing import List, Union
from colorama import Fore, init
from fluidtopics.connector import Client, LoginAuthentication, RemoteClient
from antidot.connector.generic.external_source_id_does_not_exists_error import ExternalSourceIdDoesNotExistsError
init()
class ClientAuthentication:
def __init__(self, function, client: Union[Client, List[Client]]):
self.function = function
if isinstance(client, list):
self.clients = client
else:
self.clients = [client]
def __call__(self, *args, **kwargs):
publications = self.function(*args, **kwargs)
if not publications:
print(Fore.YELLOW, "/!\\ We did not have any publications to publish !", Fore.RESET)
return None
for client in self.clients:
response = client.publish(*publications)
if response.status_code == 404 and client._sender.source_id in response.content.decode("utf8"):
error_msg = str(ExternalSourceIdDoesNotExistsError(client))
logging.critical(error_msg)
print(Fore.RED, error_msg, Fore.RESET)
elif response.status_code == 200:
successful_msg = "Uploaded everything to {} successfully : {}.".format(client, response)
logging.info(successful_msg)
print(Fore.GREEN, successful_msg, Fore.RESET)
else:
error_msg = "Problem during handling of {} : {} ({})".format(
client, response, response.content.decode("utf8")
)
logging.critical(error_msg)
print(Fore.RED + error_msg, Fore.RESET)
return response
class LoginAndPasswordAuthentication(ClientAuthentication):
def __init__(self, function, url, login, password, source_id):
client = RemoteClient(url=url, authentication=LoginAuthentication(login, password), source_id=source_id)
super(LoginAndPasswordAuthentication, self).__init__(function, client)
| 2.234375 | 2 |
python/sys_context.py | robotlightsyou/test | 2 | 12769329 | <gh_stars>1-10
import sys
lines = 0
try:
with sys.stdout as output:
with sys.stdin as input:
for i in input:
output.write(i)
lines += 1
finally:
print('!!!!', lines, file=sys.stderr)
| 2.828125 | 3 |
ciri/modules/autotranslate.py | AmarnathCJD/Cirilla-Userbot | 0 | 12769330 | from google_translate_py import AsyncTranslator
from ciri import HelpStr
from ciri.utils import ciri_cmd, eor
@ciri_cmd(pattern="at(?: |$)(.*)")
async def _at(e):
payload = e.text.split(maxsplit=3)
if len(payload) == 3:
text = payload[2]
lang = payload[1]
else:
text = payload[1]
lang = "en"
tr = await AsyncTranslator().translate(text, "", lang)
await eor(e, tr) # eor(e, tr)
HelpStr.update(
{
"autotranslate": {
"at": {
"description": "Auto Translate text while typing",
"usage": ".at <lang> <text>",
}
}
}
)
| 2.90625 | 3 |
core/serializers.py | utkarshagarwal1312/potato-url | 0 | 12769331 | from rest_framework import serializers
from rest_framework.serializers import Serializer, ModelSerializer
from core.models import KeyUrlMap
class RedirectionSerializer(Serializer):
pass
class CreateMappingSerializer(ModelSerializer):
potato_url = serializers.SerializerMethodField(read_only=True)
class Meta:
model = KeyUrlMap
fields = ('url', 'potato_url')
@staticmethod
def get_potato_url(obj):
return obj.get_potato_url()
| 2.375 | 2 |
gui_utils/threading.py | Arseha/peakonly | 65 | 12769332 | <reponame>Arseha/peakonly
from PyQt5 import QtCore
class WorkerSignals(QtCore.QObject):
"""
Defines the signals available from a running worker thread.
Attributes
----------
finished : QtCore.pyqtSignal
No data
error : QtCore.pyqtSignal
`tuple` (exctype, value, traceback.format_exc() )
result : QtCore.pyqtSignal
`object` data returned from processing, anything
progress : QtCore.pyqtSignal
`int` indicating % progress
download_progress : QtCore.pyqtSignal
`int`, `int`, `int` used to show a count of blocks transferred,
a block size in bytes, the total size of the file
"""
finished = QtCore.pyqtSignal()
error = QtCore.pyqtSignal(tuple)
result = QtCore.pyqtSignal(object)
progress = QtCore.pyqtSignal(int)
operation = QtCore.pyqtSignal(str)
download_progress = QtCore.pyqtSignal(int, int, int)
class Worker(QtCore.QRunnable):
"""
Worker thread
Parameters
----------
function : callable
Any callable object
Attributes
----------
mode : str
A one of two 'all in one' of 'sequential'
model : nn.Module
an ANN model if mode is 'all in one' (optional)
classifier : nn.Module
an ANN model for classification (optional)
segmentator : nn.Module
an ANN model for segmentation (optional)
peak_minimum_points : int
minimum peak length in points
"""
def __init__(self, function, *args, download=False, multiple_process=False, **kwargs):
super(Worker, self).__init__()
self.function = function
self.args = args
self.kwargs = kwargs
self.signals = WorkerSignals()
# Add the callback to our kwargs
if not download:
self.kwargs['progress_callback'] = self.signals.progress
else:
self.kwargs['progress_callback'] = self.signals.download_progress
if multiple_process:
self.kwargs['operation_callback'] = self.signals.operation
@QtCore.pyqtSlot()
def run(self):
result = self.function(*self.args, **self.kwargs)
self.signals.result.emit(result) # return results
self.signals.finished.emit() # done
| 2.546875 | 3 |
semantic-segmentation/lib/mnist_models.py | bcrafton/icsrl-deep-learning | 1 | 12769333 | <reponame>bcrafton/icsrl-deep-learning
import tensorflow as tf
import keras
import numpy as np
from lib.Model import Model
from lib.Layer import Layer
from lib.ConvToFullyConnected import ConvToFullyConnected
from lib.FullyConnected import FullyConnected
from lib.Convolution import Convolution
from lib.MaxPool import MaxPool
from lib.Dropout import Dropout
from lib.FeedbackFC import FeedbackFC
from lib.FeedbackConv import FeedbackConv
from lib.Activation import Relu
from lib.Activation import Tanh
def mnist_conv(batch_size, dropout_rate, init='alexnet', sparse=0, bias=0.1):
l0 = Convolution(input_shape=[batch_size, 28, 28, 1], filter_sizes=[3, 3, 1, 32], init=init, bias=bias, name='conv1')
l1 = Relu()
l2 = FeedbackConv(size=[batch_size, 28, 28, 32], num_classes=10, sparse=sparse, name='conv1_fb')
l3 = Convolution(input_shape=[batch_size, 28, 28, 32], filter_sizes=[3, 3, 32, 64], init=init, bias=bias, name='conv2')
l4 = Relu()
l5 = MaxPool(size=[batch_size, 28, 28, 64], ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="SAME")
l6 = FeedbackConv(size=[batch_size, 14, 14, 64], num_classes=10, sparse=sparse, name='conv2_fb')
l7 = ConvToFullyConnected(input_shape=[14, 14, 64])
l8 = FullyConnected(input_shape=14*14*64, size=128, init=init, bias=bias, name='fc1')
l9 = Relu()
l10 = Dropout(rate=dropout_rate)
l11 = FeedbackFC(size=[14*14*64, 128], num_classes=10, sparse=sparse, name='fc1_fb')
l12 = FullyConnected(input_shape=128, size=10, init=init, bias=bias, name='fc2')
##############################################
layers=[l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12]
model = Model(layers=layers)
return model
def mnist_fc(batch_size, dropout_rate, init='alexnet', sparse=0, bias=0.1):
l0 = ConvToFullyConnected(input_shape=[28, 28, 1])
l1 = FullyConnected(input_shape=784, size=400, init=init, bias=bias, name='fc1')
l2 = Relu()
l3 = Dropout(rate=dropout_rate)
l4 = FeedbackFC(size=[784, 400], num_classes=10, sparse=sparse, name='fc1_fb')
l5 = FullyConnected(input_shape=400, size=10, init=init, bias=bias, name='fc2')
layers=[l0, l1, l2, l3, l4, l5]
model = Model(layers=layers)
return model
| 2.296875 | 2 |
Integrations/QRadar/QRadar_test.py | TBE-Comp/content | 7 | 12769334 | <filename>Integrations/QRadar/QRadar_test.py<gh_stars>1-10
import pytest
import demistomock as demisto
@pytest.fixture(autouse=True)
def init_tests(mocker):
mocker.patch.object(demisto, 'params', return_value={'server': 'www.qradar.com', 'token': '<PASSWORD>', 'proxy': True})
def test_enrich_offense_res_with_source_and_destination_address_normal(mocker):
"""
Given:
- Offense raw response was fetched successfully with source and destination addresses IDs
When
- I enrich the offense with source and destination addresses
Then
- The offense result will have the source and destination addresses
"""
import QRadar as qradar
# Given:
# - Offense raw response was fetched successfully with source and destination addresses IDs
mocker.patch.object(qradar, 'extract_source_and_destination_addresses_ids',
return_value=(SOURCE_ADDR_IDS_DICT, DEST_ADDR_IDS_DICT))
mocker.patch.object(qradar, 'enrich_source_addresses_dict')
mocker.patch.object(qradar, 'enrich_destination_addresses_dict')
# When
# - I enrich the offense with source and destination addresses
enriched_offense = qradar.enrich_offense_res_with_source_and_destination_address(OFFENSE_RAW_RESULT)
# Then
# - The offense result will have the source and destination addresses
assert enriched_offense[0]['source_address_ids'] == ENRICH_OFFENSES_ADDR_EXPECTED[0]['source_address_ids']
assert enriched_offense[0]['local_destination_address_ids'] == ENRICH_OFFENSES_ADDR_EXPECTED[0][
'local_destination_address_ids']
def test_enrich_offense_res_with_source_and_destination_address_exception(mocker):
"""
Given:
- Offense raw response was fetched successfully with source and destination addresses IDs
When
- I enrich the offense with source and destination addresses, but encounter an exception in the middle
Then
- The offense result will be the same as the raw offense response
"""
import QRadar as qradar
# Given:
# - Offense raw response was fetched successfully with source and destination addresses IDs
mocker.patch.object(qradar, 'extract_source_and_destination_addresses_ids',
return_value=(SOURCE_ADDR_IDS_DICT, DEST_ADDR_IDS_DICT))
# When
# - I enrich the offense with source and destination addresses, but encounter an exception in the middle
mocker.patch.object(qradar, 'enrich_source_addresses_dict', side_effect=Exception('Raised exception'))
# Then
# - The offense result will be the same as the raw offense response
assert qradar.enrich_offense_res_with_source_and_destination_address(OFFENSE_RAW_RESULT) == OFFENSE_RAW_RESULT
def test_get_reference_by_name(mocker):
"""
Given:
- There's a reference set with non-url safe chars
When
- I fetch reference by name
Then
- The rest API endpoint will be called with URL safe chars
"""
import QRadar as qradar
mocker.patch.object(qradar, 'send_request')
# Given:
# - There's a reference set with non-url safe chars
# When
# - I fetch reference by name
qradar.get_reference_by_name(NON_URL_SAFE_MSG)
# Then
# - The rest API endpoint will be called with URL safe chars
qradar.send_request.assert_called_with('GET', 'www.qradar.com/api/reference_data/sets/{}'.format(
NON_URL_SAFE_MSG_URL_ENCODED), REQUEST_HEADERS, params={})
def test_delete_reference_set(mocker):
"""
Given:
- There's a reference set with non-url safe chars
When
- I delete a reference by name
Then
- The rest API endpoint will be called with URL safe chars
"""
import QRadar as qradar
mocker.patch.object(qradar, 'send_request')
# Given:
# - There's a reference set with non-url safe chars
# When
# - I delete a reference by name
qradar.delete_reference_set(NON_URL_SAFE_MSG)
# Then
# - The rest API endpoint will be called with URL safe chars
qradar.send_request.assert_called_with('DELETE', 'www.qradar.com/api/reference_data/sets/{}'.format(
NON_URL_SAFE_MSG_URL_ENCODED))
def test_update_reference_set_value(mocker):
"""
Given:
- There's a reference set with non-url safe chars
When
- I fetch reference value by name
Then
- The rest API endpoint will be called with URL safe chars
"""
import QRadar as qradar
mocker.patch.object(qradar, 'send_request')
# Given:
# - There's a reference set with non-url safe chars
# When
# - I fetch reference value by name
qradar.update_reference_set_value(NON_URL_SAFE_MSG, 'value')
# Then
# - The rest API endpoint will be called with URL safe chars
qradar.send_request.assert_called_with('POST', 'www.qradar.com/api/reference_data/sets/{}'.format(
NON_URL_SAFE_MSG_URL_ENCODED), params={'name': NON_URL_SAFE_MSG, 'value': 'value'})
def test_delete_reference_set_value(mocker):
"""
Given:
- There's a reference set with non-url safe chars
When
- I delete a reference value by name
Then
- The rest API endpoint will be called with URL safe chars
"""
import QRadar as qradar
mocker.patch.object(qradar, 'send_request')
# Given:
# - There's a reference set with non-url safe chars
# When
# - I delete a reference value by name
qradar.delete_reference_set_value(NON_URL_SAFE_MSG, 'value')
# Then
# - The rest API endpoint will be called with URL safe chars
qradar.send_request.assert_called_with('DELETE', 'www.qradar.com/api/reference_data/sets/{}/value'.format(
NON_URL_SAFE_MSG_URL_ENCODED), params={'name': NON_URL_SAFE_MSG, 'value': 'value'})
""" CONSTANTS """
REQUEST_HEADERS = {'Content-Type': 'application/json', 'SEC': 'token'}
NON_URL_SAFE_MSG = 'non-safe/;/?:@=&"<>#%{}|\\^~[] `'
NON_URL_SAFE_MSG_URL_ENCODED = 'non-safe%2F%3B%2F%3F%3A%40%3D%26%22%3C%3E%23%25%7B%7D%7C%5C%5E%7E%5B%5D%20%60'
""" API RAW RESULTS """
OFFENSE_RAW_RESULT = [{
"assigned_to": "mocker",
"categories": [
"Unknown Potential Exploit Attack",
"Potential Web Exploit"
],
"category_count": 2,
"close_time": None,
"closing_reason_id": None,
"closing_user": None,
"credibility": 2,
"description": "Activacion",
"destination_networks": [
"mock_net"
],
"device_count": 2,
"domain_id": 27,
"event_count": 2,
"flow_count": 0,
"follow_up": False,
"id": 49473,
"inactive": False,
"last_updated_time": 1563433313767,
"local_destination_address_ids": [
1234412
],
"local_destination_count": 1,
"log_sources": [
{
"id": 115,
"name": "Custom Rule Engine",
"type_id": 18,
"type_name": "EventCRE"
},
{
"id": 2439,
"name": "FortiGate 02",
"type_id": 73,
"type_name": "FortiGate"
}
],
"magnitude": 4,
"offense_source": "192.168.0.1",
"offense_type": 0,
"policy_category_count": 0,
"protected": False,
"relevance": 4,
"remote_destination_count": 0,
"rules": [
{
"id": 166,
"type": "CRE_RULE"
}
],
"security_category_count": 2,
"severity": 6,
"source_address_ids": [
294626
],
"source_count": 1,
"source_network": "other",
"start_time": 1563433305606,
"status": "OPEN",
"username_count": 0
}]
""" FUNCTION MOCK RESULTS """
SOURCE_ADDR_IDS_DICT = {
294626: '192.168.0.1'
}
DEST_ADDR_IDS_DICT = {
1234412: '192.168.0.2'
}
ENRICH_OFFENSES_ADDR_EXPECTED = [
{'offense_source': '192.168.0.1', 'status': 'OPEN', 'remote_destination_count': 0, 'source_count': 1,
'description': 'Activacion', 'rules': [{'type': 'CRE_RULE', 'id': 166}], 'destination_networks': ['mock_net'],
'source_address_ids': ['192.168.0.1'], 'policy_category_count': 0, 'last_updated_time': 1563433313767,
'offense_type': 0, 'category_count': 2, 'inactive': False, 'security_category_count': 2, 'flow_count': 0,
'protected': False, 'domain_id': 27, 'categories': ['Unknown Potential Exploit Attack', 'Potential Web Exploit'],
'follow_up': False, 'close_time': None, 'start_time': 1563433305606, 'severity': 6, 'event_count': 2,
'credibility': 2, 'local_destination_count': 1, 'closing_reason_id': None, 'device_count': 2, 'id': 49473,
'username_count': 0, 'magnitude': 4, 'closing_user': None, 'source_network': 'other', 'assigned_to': 'mocker',
'relevance': 4, 'local_destination_address_ids': ['192.168.0.2'],
'log_sources': [{'type_name': 'EventCRE', 'type_id': 18, 'id': 115, 'name': 'Custom Rule Engine'},
{'type_name': 'FortiGate', 'type_id': 73, 'id': 2439, 'name': 'FortiGate 02'}]}]
| 2.234375 | 2 |
ros2service/ros2service/verb/find.py | ruffsl/ros2cli | 0 | 12769335 | # Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ros2cli.node.strategy import NodeStrategy
from ros2service.api import get_service_names_and_types
from ros2service.verb import VerbExtension
from ros2srv.api import service_type_completer
class FindVerb(VerbExtension):
"""Output a list of available services of a given type."""
def add_arguments(self, parser, cli_name):
arg = parser.add_argument(
'service_type',
help='Name of the ROS service type to filter for '
"(e.g. 'rcl_interfaces/srv/ListParameters')")
arg.completer = service_type_completer
parser.add_argument(
'-c', '--count-services', action='store_true',
help='Only display the number of services discovered')
# duplicate the following argument from the command for visibility
parser.add_argument(
'--include-hidden-services', action='store_true',
help='Consider hidden services as well')
def main(self, *, args):
with NodeStrategy(args) as node:
service_names_and_types = get_service_names_and_types(
node=node,
include_hidden_services=args.include_hidden_services)
filtered_services = []
for (service_name, service_type) in service_names_and_types:
if args.service_type in service_type:
filtered_services.append(service_name)
if args.count_services:
print(len(filtered_services))
else:
for filtered_service in filtered_services:
print(filtered_service)
| 2.40625 | 2 |
pitool/web.py | calston/pitool | 0 | 12769336 | <reponame>calston/pitool<gh_stars>0
import json
import inspect
from twisted.web.template import renderer, XMLFile
from twisted.python.filepath import FilePath
from twisted.web.static import File
from twisted.web.template import tags
from .web_base import BaseResource, ContentElement, JSONResource
from . import pidata
class API(JSONResource):
isLeaf = True
def call_gpio(self, request):
gpios = [g.asJson() for g in self.service.board.gpio.values()]
return {
'count': len(gpios),
'pins': gpios
}
def call_gpio_mode(self, request):
io, mode = request.path.split('/')[-2:]
io = int(io)
if io in self.service.board.gpio:
g = self.service.board.gpio[io]
if mode == 'input':
g.setInput()
if mode == 'output':
g.setOutput()
return g.asJson()
return {}
def call_gpio_set(self, request):
io, state = request.path.split('/')[-2:]
io = int(io)
if io in self.service.board.gpio:
g = self.service.board.gpio[io]
g.set(int(state))
return g.asJson()
return {}
def get(self, request):
members = dict([
('/api/' + n.lstrip('call_').replace('_', '/'), m) for n,m in
inspect.getmembers(self, predicate=inspect.ismethod)
if n.startswith('call_')
])
keys = members.keys()
keys.sort(key=lambda n: len(n))
for key in reversed(keys):
if request.path.startswith(key):
return members[key](request)
return {'Err': 'No command'}
class Bitbanger(BaseResource):
isLeaf = True
class Content(ContentElement):
loader = XMLFile(FilePath('pitool/resources/bitbanger.html'))
class Analyzer(BaseResource):
isLeaf = True
class Content(ContentElement):
loader = XMLFile(FilePath('pitool/resources/analyzer.html'))
class Index(BaseResource):
isLeaf = True
class Content(ContentElement):
loader = XMLFile(FilePath('pitool/resources/index.html'))
@renderer
def board(self, request, tag):
board = self.service.board
url = "/static/images/modules/%s.png" % board.board_code
return tag.fillSlots(brd_name=board.board_name, image=url, mem=str(board.memory))
| 1.96875 | 2 |
code/log_writer.py | WCU-EDGE/spark-dashboard | 0 | 12769337 | #This runs on each webserver and funnels the /var/log/auth contents to Kafka broker under topic XYZ
# This is a Kafka producer
from kafka import KafkaProducer
import time
import json
producer = KafkaProducer(bootstrap_servers='broker:9092',value_serializer=lambda v:json.dumps(v).encode('ascii'))
logfile = open('/var/log/auth.log', 'r')
lines = logfile.readlines()
for l in lines:
producer.send('auth.log',{'ws1':l})
logfile.seek(0,2)
while True:
line = logfile.readline()
if not line:
time.sleep(10)
continue
producer.send('auth.log',{'ws1':line})
| 2.296875 | 2 |
Experiments/cross_entropy_demo.py | Golden-Slumber/AirFL-2nd | 1 | 12769338 | import random
import matplotlib
import numpy
from matplotlib import pyplot as plt
from matplotlib.ticker import PercentFormatter
from Resources.data_loader import load_data
from Utils.CrossEntropy import CrossEntropySolver
from Algorithms.CrossEntropy.Solver.ACCADE_cross_entropy_solver import ACCADECrossEntropySolver
from Algorithms.CrossEntropy.Solver.DANE_cross_entropy_solver import DANECrossEntropySolver
from Algorithms.CrossEntropy.Solver.GIANT_cross_entropy_solver import GIANTCrossEntropySolver
from Algorithms.CrossEntropy.Solver.FedGD_cross_entropy_solver import FedGDCrossEntropySolver
from Algorithms.CrossEntropy.Solver.Fedsplit_cross_entropy_solver import FedSplitCrossEntropySolver
from keras.datasets import fashion_mnist
import sys
from constants import color_list, marker_list, GS_DCA, DCA_ONLY, GS_SDR, SDR_ONLY, PERFECT_AGGREGATION, \
first_order_list, second_order_list, GBMA, THRESHOLD, DC_FRAMEWORK
home_dir = '../'
sys.path.append(home_dir)
class CrossEntropyDemo(object):
def __init__(self, data_name, max_iter, repeat, gamma, sigma, p, m, distance_list, data_size_list):
self.data_name = data_name
self.max_iter = max_iter
self.repeat = repeat
self.gamma = gamma
self.sigma = sigma
self.p = p
self.m = m
self.distance_list = distance_list
self.data_size_list = data_size_list
self.n = None
self.d = None
self.x_train = None
self.x_test = None
self.y_train = None
self.y_test = None
self.w_opt = None
self.cond_num = None
self.num_class = None
self.shards = None
def fit(self, x_train, y_train, shards, x_test, y_test):
self.x_train = x_train
self.y_train = y_train
self.shards = shards
self.x_test = x_test
self.y_test = y_test
self.n, self.d = self.x_train.shape
print(self.x_train.shape)
print(self.y_train.shape)
self.num_class = numpy.max(self.y_train) + 1
file_name = home_dir + 'Resources/' + self.data_name + '_optimal.npz'
npz_file = numpy.load(file_name)
self.w_opt = npz_file['w_opt']
print(self.w_opt)
print(self.w_opt.shape)
def perform_training(self, tau_list, k_list, modes, is_search=True, newton_iter=100):
for r in range(self.repeat):
for i in range(len(k_list)):
for j in range(len(tau_list)):
print('repeat ' + str(r) + ' : k = ' + str(k_list[i]) + ' , tau = ' + str(tau_list[j]))
h_mat = numpy.random.randn(self.max_iter, k_list[i], self.m) / numpy.sqrt(
2) + 1j * numpy.random.randn(self.max_iter, k_list[i], self.m) / numpy.sqrt(2)
for device in range(self.m):
PL = (10 ** 2) * ((self.distance_list[device] / 1) ** (-3.76))
h_mat[:, :, device] = numpy.sqrt(PL) * h_mat[:, :, device]
solver = ACCADECrossEntropySolver(m=self.m, h_mat=h_mat, tau=tau_list[j], p=self.p,
x_test=self.x_test, y_test=self.y_test,
opt_mode=DCA_ONLY,
num_class=self.num_class)
solver.fit(self.x_train, self.y_train, self.data_size_list, self.shards)
err, acc = solver.train(self.gamma, self.w_opt, max_iter=self.max_iter, is_search=is_search,
newton_max_iter=newton_iter)
out_file_name = home_dir + 'Outputs/cross_entropy_demo/cross_entropy_demo_ACCADE_' + self.data_name + '_antenna_' + str(
k_list[i]) + '_tau_' + str(tau_list[j]) + '_repeat_' + str(r) + '_GS-DCA.npz'
numpy.savez(out_file_name, err=err, acc=acc, data_name=self.data_name)
solver = FedGDCrossEntropySolver(m=self.m, h_mat=h_mat, tau=tau_list[j], p=self.p,
x_test=self.x_test, y_test=self.y_test, opt_mode=DC_FRAMEWORK,
num_class=self.num_class)
solver.fit(self.x_train, self.y_train, self.data_size_list, self.shards)
err, acc = solver.train(self.gamma, self.w_opt, max_iter=self.max_iter, is_search=is_search,
newton_max_iter=newton_iter)
out_file_name = home_dir + 'Outputs/cross_entropy_demo/cross_entropy_demo_FedGD_' + self.data_name + '_antenna_' + str(
k_list[i]) + '_tau_' + str(tau_list[j]) + '_repeat_' + str(r) + '_DC_FRAMEWORK.npz'
numpy.savez(out_file_name, err=err, acc=acc, data_name=self.data_name)
solver = FedSplitCrossEntropySolver(m=self.m, h_mat=h_mat, tau=tau_list[j], p=self.p,
x_test=self.x_test, y_test=self.y_test, opt_mode=THRESHOLD,
num_class=self.num_class)
solver.fit(self.x_train, self.y_train, self.data_size_list, self.shards)
err, acc = solver.train(self.gamma, self.w_opt, max_iter=self.max_iter, is_search=is_search,
newton_max_iter=newton_iter)
out_file_name = home_dir + 'Outputs/cross_entropy_demo/cross_entropy_demo_FedSplit_' + self.data_name + '_antenna_' + str(
k_list[i]) + '_tau_' + str(tau_list[j]) + '_repeat_' + str(r) + '_THRESHOLD.npz'
numpy.savez(out_file_name, err=err, acc=acc, data_name=self.data_name)
solver = DANECrossEntropySolver(m=self.m, h_mat=h_mat, tau=tau_list[j], p=self.p,
x_test=self.x_test, y_test=self.y_test, opt_mode=DCA_ONLY,
num_class=self.num_class)
solver.fit(self.x_train, self.y_train, self.data_size_list, self.shards)
err, acc = solver.train(self.gamma, self.w_opt, max_iter=self.max_iter, is_search=is_search,
newton_max_iter=newton_iter)
out_file_name = home_dir + 'Outputs/cross_entropy_demo/cross_entropy_demo_DANE_' + self.data_name + '_antenna_' + str(
k_list[i]) + '_tau_' + str(tau_list[j]) + '_repeat_' + str(r) + '_DCA only.npz'
numpy.savez(out_file_name, err=err, acc=acc, data_name=self.data_name)
solver = GIANTCrossEntropySolver(m=self.m, h_mat=h_mat, tau=tau_list[j], p=self.p,
x_test=self.x_test, y_test=self.y_test, opt_mode=DCA_ONLY,
num_class=self.num_class)
solver.fit(self.x_train, self.y_train, self.data_size_list, self.shards)
err, acc = solver.train(self.gamma, self.w_opt, max_iter=self.max_iter, is_search=is_search,
newton_max_iter=newton_iter)
out_file_name = home_dir + 'Outputs/cross_entropy_demo/cross_entropy_demo_GIANT_' + self.data_name + '_antenna_' + str(
k_list[i]) + '_tau_' + str(tau_list[j]) + '_repeat_' + str(r) + '_DCA only.npz'
numpy.savez(out_file_name, err=err, acc=acc, data_name=self.data_name)
del solver
def plot_results_versus_iteration(self, data_name, k, tau, modes, solvers, repeat, max_iter, legends):
err_mat = numpy.zeros((len(modes) + 1, repeat, max_iter))
acc_mat = numpy.zeros((len(modes) + 1, repeat, max_iter))
# centralized
for r in range(repeat):
file_name = home_dir + 'Outputs/centralized_training_demo/centralized_training_demo_' + data_name + '_repeat_' + str(
r) + '.npz'
npz_file = numpy.load(file_name)
err_mat[0][r] = npz_file['err']
acc_mat[0][r] = npz_file['acc']
for j in range(len(solvers)):
for r in range(repeat):
file_name = home_dir + 'Outputs/cross_entropy_demo/cross_entropy_demo_' + solvers[
j] + '_' + data_name + '_antenna_' + str(
k) + '_tau_' + str(tau) + '_repeat_' + str(r) + '_' + modes[j] + '.npz'
npz_file = numpy.load(file_name)
# print(npz_file['acc'])
# print(npz_file['err'])
err_mat[j+1][r] = npz_file['err']
acc_mat[j+1][r] = npz_file['acc']
fig = plt.figure(figsize=(9, 8))
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
line_list = []
for i in range(len(modes)+1):
line, = plt.semilogy(numpy.median(err_mat[i], axis=0), color=color_list[i], linestyle='-',
marker=marker_list[i],
markerfacecolor='none', ms=7, markeredgewidth=2.5, linewidth=2.5)
line_list.append(line)
plt.legend(line_list, legends, fontsize=20)
plt.xlabel('Communication Rounds', fontsize=20)
plt.ylabel('Training Loss', fontsize=20)
plt.xlim(0, max_iter - 1)
plt.ylim(0.25, 2.2)
plt.tight_layout()
plt.grid()
image_name = home_dir + 'Outputs/cross_entropy_demo/cross_entropy_demo_err_' + data_name + '_antenna_' + str(
k) + '_tau_' + str(tau) + '.pdf'
fig.savefig(image_name, format='pdf', dpi=1200)
plt.show()
fig = plt.figure(figsize=(9, 8))
line_list = []
for i in range(len(modes)+1):
line, = plt.plot(numpy.median(acc_mat[i], axis=0), color=color_list[i], linestyle='-',
marker=marker_list[i],
markerfacecolor='none', ms=7, markeredgewidth=2.5, linewidth=2.5, clip_on=False)
line_list.append(line)
plt.legend(line_list, legends, fontsize=20)
plt.xlabel('Communication Rounds', fontsize=20)
plt.ylabel('Test Accuracy', fontsize=20)
plt.xlim(0, max_iter - 1)
plt.gca().yaxis.set_major_formatter(PercentFormatter(1))
plt.tight_layout()
plt.grid()
image_name = home_dir + 'Outputs/cross_entropy_demo/cross_entropy_demo_acc_' + data_name + '_antenna_' + str(
k) + '_tau_' + str(tau) + '.pdf'
fig.savefig(image_name, format='pdf', dpi=1200)
plt.show()
def normalization(x_train, x_test):
mean = numpy.mean(x_train)
std_ev = numpy.sqrt(numpy.var(x_train))
normalized_x_train = numpy.divide(numpy.subtract(x_train, mean), std_ev)
mean = numpy.mean(x_test)
std_ev = numpy.sqrt(numpy.var(x_test))
normalized_x_test = numpy.divide(numpy.subtract(x_test, mean), std_ev)
return normalized_x_train, normalized_x_test
if __name__ == '__main__':
max_iter = 25
repeat = 5
gamma = 1e-8
sigma = 1
tau = numpy.sqrt(10)
k = 5
p = 1
m = 10
is_search = True
newton_iter = 50
datasets = ['fashion_mnist']
tau_list = [1e-9]
k_list = [5]
# modes = [GS_DCA, PERFECT_AGGREGATION, DC_FRAMEWORK, THRESHOLD, DCA_ONLY, DCA_ONLY]
# solvers = ['ACCADE', 'ACCADE', 'FedGD', 'FedSplit', 'GIANT', 'DANE']
# legends = ['Proposed Algorithm', 'Baseline 0', 'Baseline 1', 'Baseline 2', 'Baseline 3', 'Baseline 4']
modes = [GS_DCA, DC_FRAMEWORK, THRESHOLD, DCA_ONLY, DCA_ONLY]
solvers = ['ACCADE', 'FedGD', 'FedSplit', 'GIANT', 'DANE']
legends = ['Baseline 0', 'Proposed Algorithm', 'Baseline 1', 'Baseline 2', 'Baseline 3', 'Baseline 4']
for data_name in datasets:
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
train_n = x_train.shape[0]
test_n = x_test.shape[0]
print(x_train.shape)
print(y_test.shape)
x_train = x_train.reshape(train_n, 28 * 28)
idx = numpy.argsort(y_train)
# idx = numpy.random.permutation(train_n)
y_train = numpy.array(y_train).reshape(train_n, 1)
x_test = x_test.reshape(test_n, 28 * 28)
y_test = numpy.array(y_test).reshape(test_n, 1)
x_train, x_test = normalization(x_train, x_test)
# non-iid data distribution construction
# print(idx)
x_train = x_train[idx]
y_train = y_train[idx]
shard_size = train_n // (6 * m)
sub_shards = [range(i, i + shard_size) for i in range(0, 6 * shard_size * m, shard_size)]
shard_ls = random.sample(range(6 * m), k=6 * m)
# first_shards = [sub_shards[shard_ls[i]] for i in range(0, 2 * m, 2)]
# second_shards = [sub_shards[shard_ls[i + 1]] for i in range(0, 2 * m, 2)]
# shards = [list(sub_shards[shard_ls[i]]) + list(sub_shards[shard_ls[i+1]]) for i in range(0, 2 * m, 2)]
shards = [list(sub_shards[shard_ls[i]]) + list(sub_shards[shard_ls[i + 1]]) + list(
sub_shards[shard_ls[i + 2]]) + list(sub_shards[shard_ls[i + 3]]) + list(sub_shards[shard_ls[i + 4]]) + list(
sub_shards[shard_ls[i + 5]]) for i
in range(0, 6 * m, 6)]
# print(shards[0])
# heterogeneity construction for data size and distance
distance_list = numpy.random.randint(100, 120, size=m)
# distance_list[0: int(m / 10)] = numpy.random.randint(5, 10, size=int(m / 10))
# distance_list[int(m / 10):] = numpy.random.randint(100, 120, size=9 * int(m / 10))
perm = numpy.random.permutation(m)
distance_list = distance_list[perm]
# print(distance_list)
data_size_list = numpy.zeros(m, dtype=int)
data_size_list[0:m] = 6 * shard_size
# data_size_list[0: int(m / 10)] = numpy.random.randint(int(0.08 * s), int(0.1 * s + 1), size=int(m / 10))
# data_size_list[int(m / 10):] = numpy.random.randint(int(1 * s), int(1.1 * s + 1), size=9 * int(m / 10))
perm = numpy.random.permutation(m)
data_size_list = data_size_list[perm]
demo = CrossEntropyDemo(data_name, max_iter, repeat, gamma, sigma, p, m, distance_list, data_size_list)
demo.fit(x_train, y_train, shards, x_test, y_test)
demo.perform_training(tau_list, k_list, modes, is_search=is_search, newton_iter=newton_iter)
for k in k_list:
for tau in tau_list:
demo.plot_results_versus_iteration(data_name, k, tau, modes, solvers, repeat, max_iter + 1, legends)
| 2.265625 | 2 |
editor/lib/juma/core/dispatch/signals.py | RazielSun/juma-editor | 7 | 12769339 | # -*- coding: utf-8 -*-
__author__ = '<NAME>'
import threading
#import logging
#logger = logging.getLogger(__name__)
import idle_queue
from weak_ref import weak_ref
class Signal:
def __init__(self, name):
self.name = name
self.callbacks = []
self.lock = threading.Lock()
def __call__(self, *arg, **kwargs):
self.emit(*arg, **kwargs)
def connect(self, callback):
with self.lock:
callback = weak_ref(callback)
self.callbacks.append(callback)
def disconnect(self, callback):
with self.lock:
for index, weakref_callback in enumerate(self.callbacks):
if callback == weakref_callback():
del self.callbacks[index]
break
def emitNow(self, *args, **kwargs):
for weakref_callback in self.callbacks:
callback = weakref_callback()
if callback is not None:
callback(*args,**kwargs)
else: #lost reference
self.callbacks.remove(weakref_callback)
def emit(self, *args, **kwargs):
with self.lock:
#connected_methods = [callback.__name__ for callback in self.callbacks]
#logger.debug("Event emitted: {}".format(self.name))
for weakref_callback in self.callbacks:
callback = weakref_callback()
if callback is not None:
idle_queue.idle_add(callback, *args, **kwargs)
else: #lost reference
self.callbacks.remove(weakref_callback)
#if not self.callbacks:
#logger.debug("No signals assosiated to: {}".format(self.name)) | 2.3125 | 2 |
tests/test_api.py | inspired-co/eaas_client | 0 | 12769340 | # %%
import os
import unittest
from pathlib import Path
import jsonlines
from eaas import Client, Config
curr_dir = Path(__file__).parent
def read_jsonlines_to_list(file_name):
lines = []
with jsonlines.open(file_name, "r") as reader:
for obj in reader:
lines.append(obj)
return lines
class TestMetrics(unittest.TestCase):
def test_api(self):
config = Config()
client = Client(config)
input_file = os.path.join(curr_dir, "inputs", "multi_references.jsonl")
inputs = read_jsonlines_to_list(input_file)
# res = client.score(inputs)
res = client.score(inputs, metrics=["bleu", "rouge2"])
print(res)
def test_multilingual(self):
config = Config()
client = Client(config)
for lang in ["en", "fr", "zh"]:
# Single ref
print(f"****** LANG: {lang} ******")
print("For single reference")
input_file = os.path.join(
curr_dir, "inputs", f"{lang}_single_ref_tiny.jsonl"
)
inputs = read_jsonlines_to_list(input_file)
# res = client.score(inputs, task="sum", metrics=None, lang=lang)
res = client.score(inputs, metrics=["bleu", "rouge2"])
print(res)
# Multi ref
if lang != "en":
# Moverscore does not support languages other than English
metrics = [
# "bart_score_cnn_hypo_ref",
# "bart_score_summ",
# "bart_score_mt",
# "bert_score_p",
# "bert_score_r",
# "bert_score_f",
"bleu",
# "chrf",
# "comet",
# "comet_qe",
# "prism",
# "prism_qe",
# "rouge1",
"rouge2",
# "rougeL"
]
else:
metrics = [
"bleu",
"rouge2",
]
print("For multiple references")
input_file = os.path.join(
curr_dir, "inputs", f"{lang}_multi_ref_tiny.jsonl"
)
inputs = read_jsonlines_to_list(input_file)
res = client.score(inputs, metrics=metrics)
print(res)
def test_main_example(self):
client = Client(Config())
inputs = [
{
"source": "Hello, my world",
"references": ["Hello, world", "Hello my world"],
"hypothesis": "Hi, my world",
}
]
metrics = ["rouge1", "bleu", "chrf"]
score_list = client.score(inputs, metrics=metrics)
print(score_list)
| 2.609375 | 3 |
seahub/avatar/management/commands/rebuild_avatars.py | odontomachus/seahub | 0 | 12769341 | # Copyright (c) 2012-2016 Seafile Ltd.
from django.core.management.base import BaseCommand
from seahub.avatar.models import Avatar
from seahub.avatar.settings import AUTO_GENERATE_AVATAR_SIZES
class Command(BaseCommand):
help = "Regenerates avatar thumbnails for the sizes specified in " + \
"settings.AUTO_GENERATE_AVATAR_SIZES."
def handle(self, **options):
for avatar in Avatar.objects.all():
for size in AUTO_GENERATE_AVATAR_SIZES:
print "Rebuilding Avatar id=%s at size %s." % (avatar.id, size)
avatar.create_thumbnail(size)
| 2.40625 | 2 |
utils/test.py | JiatLn/yhshow-be | 0 | 12769342 | <gh_stars>0
import datetime
from load_yuhun_json import load_json_file
import algo
yuhun_list = load_json_file('yuhun.json')[1:]
l2_prop_limit = ['攻击加成']
l4_prop_limit = ['攻击加成']
l6_prop_limit = ['暴击', '暴击伤害']
optimize = '输出伤害'
limit_props = {'速度': {'min': 156, 'max': 158}, '暴击': {'min': 1, 'max': float('inf')}}
limit_pane = {'输出伤害': {'min': 17800, 'max': float('inf')}}
plan = {'4': '狂骨', '2': '荒骷髅'}
shishen_pane = {
'攻击': 3511,
'生命': 388,
'防御': 388,
'速度': 115,
'暴击': 0.12,
'暴击伤害': 1.6,
'效果命中': 0,
'效果抵抗': 0
}
yuhun = algo.YuhunComb(yuhun_list, l2_prop_limit, l4_prop_limit,
l6_prop_limit, optimize, limit_props, limit_pane, plan, shishen_pane)
# 程序执行时间:
start = datetime.datetime.now()
print('================start================')
res = yuhun.pipeline()
print(res)
end = datetime.datetime.now()
print('================end================')
print(end - start)
| 2.21875 | 2 |
exportdoc.py | mbebenita/wast | 0 | 12769343 | #!/usr/bin/env python
# Copyright 2016 WebAssembly Community Group participants
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, re
def read_yy():
with open('was_parser.yy', 'r') as f:
read_data = f.read()
grammar_content = re.compile(r"%%\n([\s\S]*)%%", re.M);
m = grammar_content.search(read_data)
remove_c_code = re.compile(r"\s+{\s[^}]*[^\n]*", re.M);
no_code = re.sub(remove_c_code, "", m.group(1))
return no_code
def read_l():
with open('was_lexer.l', 'r') as f:
read_data = f.read()
remove_c_code = re.compile(r"%\{((?!%\})[\s\S])*%\}", re.M);
remove_c_header = re.compile(r"/\*((?!\*/)[\s\S])*\*/\s*", re.M);
no_code = re.sub(remove_c_code, "", re.sub(remove_c_header, "", read_data));
remove_options = re.compile(r"^%\w[^\n]*\n", re.M);
no_options = re.sub(remove_options, "", no_code);
lexer_content = re.compile(r"\n*([\s\S]*)%%\n([\s\S]*)%%", re.M);
m = lexer_content.search(no_options)
sequences = m.group(1)
tokens = m.group(2)
simplify_tokens = re.compile(r"(\s+)\{.*?return\s+token::([^;]+);\s+\}", re.M)
simplified_tokens = re.sub(simplify_tokens, r"\1\2", tokens)
removed_trivial = re.sub(r"\n\x22([^\x22]+)\x22\s+\{.*?return\('\1'\)[^\n]+", "",simplified_tokens)
removed_stats = re.sub(r"(\s+)\{\s+BEGIN\(([^\)]+)\);\s+\}", r"\1STATE:\2", removed_trivial)
removed_code = re.sub(r"(\s+)\{[^\}]+\}[^\n]*", "", removed_stats);
return sequences + removed_code
print "# Grammar Rules"
print
print read_yy()
print
print "# Scanner/Lexer"
print
print read_l()
print | 2.265625 | 2 |
image_classif/app_image_classif_evaluate.py | icalciu/apps | 0 | 12769344 | import os
import sys
import random
import subprocess
import time
import turicreate as tc
#### Evaluate the model
# First create test and train data: app_image_classif_create.py
# And train the model: app_image_classif_model.py
######################
tracker='sudo /home/icalciu/ccfpga/peaberry/src/pbsim-ptrace/tracker'
######################
tc.config.set_runtime_config('TURI_DEFAULT_NUM_PYLAMBDA_WORKERS', 56)
pid=os.getpid()
print "[PB]: My pid is " + str(pid)
# Load the data
#data = tc.SFrame('cats-dogs.sframe')
test_data = tc.SFrame('cats-dogs-test.sframe')
model = tc.load_model('mymodel.model')
# Evaluate the model and print the results
print "Evaluate model"
start=time.time()
metrics = model.evaluate(test_data)
end=time.time()
print end - start
print(metrics['accuracy'])
sys.stdout.flush()
| 2.28125 | 2 |
k0001sumo.py | MartijnHarmenzon/openTLC | 10 | 12769345 | <reponame>MartijnHarmenzon/openTLC
# import libraries SUMO/TraCI
from __future__ import absolute_import
from __future__ import print_function
import traci
# import libraries openTLC
from k0001func import initialise
from k0001app import open_tlc
from k0001sumofunc import set_sumo_inputs, set_state
# import libraries other
import optparse
import os
import subprocess
import sys
# import time
# import random
# the port used for communicating with your sumo instance
traci_port = 8873
# we need to import python modules from the $SUMO_HOME/tools directory
try:
sys.path.append(os.path.join(os.path.dirname(
__file__), '..', '..', '..', '..', "tools")) # tutorial in tests
sys.path.append(os.path.join(os.environ.get("SUMO_HOME", os.path.join(
os.path.dirname(__file__), "..", "..", "..")), "tools")) # tutorial in docs
from sumolib import checkBinary
except ImportError:
sys.exit(
"""please declare environment variable 'SUMO_HOME' as the root directory of your sumo installation
(it should contain folders 'bin', 'tools' and 'docs')"""
)
#
def run():
#
traci.init(traci_port)
#
step = 0
# amber_state = False
#
initialise()
#
while traci.simulation.getMinExpectedNumber() > 0:
#
traci.simulationStep()
#
# if step % 5 == 0:
# amber_state ^= True
#
set_sumo_inputs()
#
open_tlc(step)
# open_tlc(step, amber_state)
#
set_state()
#
step += 1
#
traci.close()
sys.stdout.flush()
#
def get_options():
opt_parser = optparse.OptionParser()
opt_parser.add_option("--nogui", action="store_true", default=False, help="run the commandline version of sumo")
options, args = opt_parser.parse_args()
return options
# this is the main entry point of this script
if __name__ == "__main__":
# this script has been called from the command line. It will start sumo as a server, then connect and run
if get_options().nogui:
sumo_binary = checkBinary('sumo')
else:
sumo_binary = checkBinary('sumo-gui')
# first, generate the route file for this simulation generate_routefile()
# this is the normal way of using traci. sumo is started as a subprocess and then the python script connects and
# runs sumoProcess = subprocess.Popen([sumoBinary, "-c", "data/MAP_K0001.sumo.cfg", "--tripinfo-output",
# "tripinfo.xml", "--remote-port", str(PORT)], stdout=sys.stdout, stderr=sys.stderr)
sumo_process = subprocess.Popen([sumo_binary, "-c", "SUMO/k0001.sumocfg", "--remote-port", str(traci_port)],
stdout=sys.stdout, stderr=sys.stderr)
run()
sumo_process.wait()
| 2.03125 | 2 |
peek/api/authentication.py | and3rson/notez | 1 | 12769346 | <reponame>and3rson/notez<gh_stars>1-10
from rest_framework.authentication import SessionAuthentication
class CSRFExemptSessionAuthentication(SessionAuthentication):
def enforce_csrf(self, request): # pragma: no cover
return # To not perform the CSRF check previously happening
| 1.75 | 2 |
Examples/example25.py | ZibraMax/FEM | 10 | 12769347 | from FEM.Mesh.Geometry import Geometry
from FEM.Mesh.Delaunay import Delaunay
from FEM.PlaneStrain import PlaneStrain
from FEM.Utils.polygonal import roundCorner, giveCoordsCircle
import matplotlib.pyplot as plt
import numpy as np
E = 30*10**(5)
v = 0.25
b = 10
h = 20
he = h/4
ancho_en_h10_in = 18
ancho_en_h20_in = 10
p0 = 200
pp = 1000
ppx = pp*3/5
ppy = -pp*4/5
def darPolinomio(X, Y):
n = len(X)
A = np.zeros([n, n])
B = np.zeros([n, 1])
for i in range(n):
for j in range(n):
A[i, j] = X[i]**j
B[i, 0] = Y[i]
U = np.linalg.solve(A, B)
def f(x):
suma = 0
for i in range(n):
suma += U[i, 0]*x**i
return suma
return f
n = 20
parabola = darPolinomio(np.array([0, 10, 20]), np.array(
[0, b-ancho_en_h10_in/2, b-ancho_en_h20_in/2]))
c = [
[0, 0],
[2*b, 0]]
for i in range(1, n):
x = 2*b-parabola(h/n*i)
y = h/n*i
c += [[x, y]]
c += [[2*b-parabola(4*he), 4*he],
[parabola(4*he), 4*he]]
for i in reversed(range(1, n)):
x = parabola(h/n*i)
y = h/n*i
c += [[x, y]]
holes = []
radi = 2
cent = [b, h/2]
vert, seg = giveCoordsCircle(cent, radi, n=50)
hole = {'center': cent, 'segments': seg, 'vertices': vert}
holes += [hole]
params = Delaunay._strdelaunay(constrained=True, delaunay=True, a='0.1', o=2)
geometria = Delaunay(c, params, nvn=2, holes_dict=holes)
geometria.generateSegmentsFromCoords([0, 0], [2*b, 0])
geometria.generateSegmentsFromCoords(
[2*b-parabola(4*he), 4*he], [parabola(4*he), 4*he])
geometria.cbe = geometria.cbFromSegment(-2, 0, 1)
geometria.cbe += geometria.cbFromSegment(-2, 0, 2)
geometria.saveMesh('Mesh_tests/tunel')
geometria.show()
plt.show()
geometria.loadOnSegment(-1, fy=lambda s: -p0)
geometria.mask = None
O = PlaneStrain(geometria, E, v)
O.elementMatrices()
O.ensembling()
O.borderConditions()
O.solveES()
O.postProcess()
plt.show()
| 2.546875 | 3 |
neuroir/inputters/multitask/vector.py | niazangels/context_attentive_ir | 77 | 12769348 | <reponame>niazangels/context_attentive_ir
# Adapted from https://github.com/facebookresearch/DrQA/blob/master/drqa/reader/vector.py
import torch
import random
import copy
def vectorize(session, model, shuffle=False):
"""Torchify a single example."""
src_dict = model.src_dict
tgt_dict = model.tgt_dict
num_candidates = model.args.num_candidates
session_len = len(session)
max_source_len = max([len(query) for query in session.queries])
max_target_len = max([len(query) for query in session.queries[1:]])
max_document_len = max([len(doc) for query in session.queries for doc in query.documents])
source_tokens = [query.tokens for query in session.queries] # 2d list
target_tokens = [query.tokens for query in session.queries[1:]] # 2d list
source_words = torch.LongTensor(session_len, max_source_len).zero_()
source_lens = torch.LongTensor(session_len).zero_()
target_words = torch.LongTensor(session_len - 1, max_target_len).zero_()
target_lens = torch.LongTensor(session_len - 1).zero_()
target_seq = torch.LongTensor(session_len - 1, max_target_len).zero_() # use only to compute loss
document_words = torch.LongTensor(session_len, num_candidates, max_document_len).zero_()
document_lens = torch.LongTensor(session_len, num_candidates).zero_()
document_labels = torch.LongTensor(session_len, num_candidates).zero_()
for i in range(session_len):
query = session.queries[i]
query_len = len(query.tokens)
source_lens[i] = query_len
source_words[i, :query_len].copy_(torch.LongTensor(
query.vectorize(word_dict=src_dict)))
# candidate document ranking
candidates = copy.deepcopy(query.documents)
assert len(candidates) == num_candidates
if shuffle:
random.shuffle(candidates)
for cidx in range(num_candidates):
cand = candidates[cidx]
document_lens[i, cidx] = len(cand.tokens)
document_labels[i, cidx] = cand.label
document_words[i, cidx, :len(cand.tokens)].copy_(torch.LongTensor(
cand.vectorize(word_dict=src_dict)))
if i != session_len - 1:
# next query suggestion
query = session.queries[i + 1]
query_len = len(query.tokens)
target_lens[i] = query_len
target_words[i, :query_len].copy_(torch.LongTensor(
query.vectorize(word_dict=src_dict)))
target_seq[i, :query_len].copy_(torch.LongTensor(
query.vectorize(word_dict=tgt_dict))) # diff is which dict is used
return {
'id': session.id,
'source_tokens': source_tokens,
'source_words': source_words,
'source_lens': source_lens,
'target_tokens': target_tokens,
'target_words': target_words,
'target_lens': target_lens,
'target_seq': target_seq,
'max_source_len': max_source_len,
'max_target_len': max_target_len,
'session_len': session_len,
'num_candidates': num_candidates,
'document_words': document_words, # 3d tensor
'document_lens': document_lens, # 2d tensor
'document_labels': document_labels, # 2d tensor
'max_document_len': max_document_len
}
def batchify(batch):
"""Gather a batch of individual examples into one batch."""
# batch is a list of vectorized examples
batch_size = len(batch)
max_source_len = max([b['max_source_len'] for b in batch])
max_target_len = max([b['max_target_len'] for b in batch])
max_document_len = max([b['max_document_len'] for b in batch])
session_len = batch[0]['session_len']
num_candidates = batch[0]['num_candidates']
# all the sessions must have the same length
assert len(set([b['session_len'] for b in batch])) == 1
ids = [ex['id'] for ex in batch]
# --------- Prepare query tensors ---------
source_lens = torch.LongTensor(batch_size,
session_len).zero_()
source_words = torch.LongTensor(batch_size,
session_len,
max_source_len).zero_()
document_lens = torch.LongTensor(batch_size,
session_len,
num_candidates).zero_()
document_words = torch.LongTensor(batch_size,
session_len,
num_candidates,
max_document_len).zero_()
document_labels = torch.FloatTensor(batch_size,
session_len,
num_candidates).zero_()
target_lens = torch.LongTensor(batch_size,
session_len - 1).zero_()
target_words = torch.LongTensor(batch_size,
session_len - 1,
max_target_len).zero_()
target_seq = torch.LongTensor(batch_size,
session_len - 1,
max_target_len).zero_()
for bidx, session in enumerate(batch):
source_lens[bidx] = session['source_lens']
source_words[bidx, :, :session['max_source_len']].copy_(session['source_words'])
document_lens[bidx] = session['document_lens']
document_labels[bidx] = session['document_labels']
document_words[bidx, :, :, :session['max_document_len']].copy_(session['document_words'])
target_lens[bidx] = session['target_lens']
target_words[bidx, :, :session['max_target_len']].copy_(session['target_words'])
target_seq[bidx, :, :session['max_target_len']].copy_(session['target_seq'])
return {
'batch_size': batch_size,
'ids': ids,
'source_tokens': [item['source_tokens'] for item in batch],
'source_words': source_words,
'source_lens': source_lens,
'target_tokens': [item['target_tokens'] for item in batch],
'target_words': target_words,
'target_lens': target_lens,
'target_seq': target_seq,
'session_len': session_len,
'document_words': document_words,
'document_lens': document_lens,
'document_labels': document_labels
}
| 2.515625 | 3 |
hs_web_server/dev_settings.py | kjb4494/hs-web-server | 0 | 12769349 | <gh_stars>0
"""
Django settings for hs_web_server project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from .base_settings import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
| 1.367188 | 1 |
trigger/acl/tools.py | jccardonar/trigger | 380 | 12769350 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Various tools for use in scripts or other modules. Heavy lifting from tools
that have matured over time have been moved into this module.
"""
__author__ = '<NAME>, <NAME>'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__copyright__ = 'Copyright 2010-2011, AOL Inc.'
from collections import defaultdict
import datetime
import IPy
import os
import re
import sys
import tempfile
from trigger.acl.parser import *
from trigger.conf import settings
# Defaults
DEBUG = False
DATE_FORMAT = "%Y-%m-%d"
DEFAULT_EXPIRE = 6 * 30 # 6 months
# Exports
__all__ = ('create_trigger_term', 'create_access', 'check_access', 'ACLScript',
'process_bulk_loads', 'get_bulk_acls', 'get_comment_matches',
'write_tmpacl', 'diff_files', 'worklog', 'insert_term_into_acl',
'create_new_acl')
# Functions
def create_trigger_term(source_ips=[],
dest_ips=[],
source_ports=[],
dest_ports=[],
protocols=[],
action=['accept'],
name="generated_term"):
"""Constructs & returns a Term object from constituent parts."""
term = Term()
term.action = action
term.name = name
for key, data in {'source-address': source_ips,
'destination-address': dest_ips,
'source-port': source_ports,
'destination-port': dest_ports,
'protocol': protocols}.iteritems():
for n in data:
if key in term.match:
term.match[key].append(n)
else:
term.match[key] = [n]
return term
def check_access(terms_to_check, new_term, quiet=True, format='junos',
acl_name=None):
"""
Determine whether access is permitted by a given ACL (list of terms).
Tests a new term against a list of terms. Return True if access in new term
is permitted, or False if not.
Optionally displays the terms that apply and what edits are needed.
:param terms_to_check:
A list of Term objects to check
:param new_term:
The Term object used for the access test
:param quiet:
Toggle whether output is displayed
:param format:
The ACL format to use for output display
:param acl_name:
The ACL name to use for output display
"""
permitted = None
matches = {
'source-address': new_term.match.get('source-address',[]),
'destination-address': new_term.match.get('destination-address',[]),
'protocol': new_term.match.get('protocol',[]),
'destination-port': new_term.match.get('destination-port',[]),
'source-port': new_term.match.get('source-port',[]),
}
def _permitted_in_term(term, comment=' check_access: PERMITTED HERE'):
"""
A little closure to re-use internally that returns a Boolean based
on the given Term object's action.
"""
action = term.action[0]
if action == 'accept':
is_permitted = True
if not quiet:
term.comments.append(Comment(comment))
elif action in ('discard', 'reject'):
is_permitted = False
if not quiet:
print '\n'.join(new_term.output(format, acl_name=acl_name))
else:
is_permitted = None
return is_permitted
for t in terms_to_check:
hit = True
complicated = False
for comment in t.comments:
if 'trigger: make discard' in comment:
t.setaction('discard') #.action[0] = 'discard'
t.makediscard = True # set 'make discard' flag
for k,v in t.match.iteritems():
if k not in matches or not matches[k]:
complicated = True
else:
for test in matches[k]:
if test not in v:
hit = False
break
if hit and not t.inactive:
# Simple access check. Elegant!
if not complicated and permitted is None:
permitted = _permitted_in_term(t)
# Complicated checks should set hit=False unless you want
# them to display and potentially confuse end-users
# TODO (jathan): Factor this into a "better way"
else:
# Does the term have 'port' defined?
if 'port' in t.match:
port_match = t.match.get('port')
match_fields = (matches['destination-port'], matches['source-port'])
# Iterate the fields, and then the ports for each field. If
# one of the port numbers is within port_match, check if
# the action permits/denies and set the permitted flag.
for field in match_fields:
for portnum in field:
if portnum in port_match:
permitted = _permitted_in_term(t)
else:
hit = False
# Other complicated checks would go here...
# If a complicated check happened and was not a hit, skip to the
# next term
if complicated and not hit:
continue
if not quiet:
print '\n'.join(t.output(format, acl_name=acl_name))
return permitted
def create_access(terms_to_check, new_term):
"""
Breaks a new_term up into separate constituent parts so that they can be
compared in a check_access test.
Returns a list of terms that should be inserted.
"""
protos = new_term.match.get('protocol', ['any'])
sources = new_term.match.get('source-address', ['any'])
dests = new_term.match.get('destination-address', ['any'])
sourceports = new_term.match.get('source-port', ['any'])
destports = new_term.match.get('destination-port', ['any'])
ret = []
for proto in protos:
for source in sources:
for sourceport in sourceports:
for dest in dests:
for destport in destports:
t = Term()
if str(proto) != 'any':
t.match['protocol'] = [proto]
if str(source) != 'any':
t.match['source-address'] = [source]
if str(dest) != 'any':
t.match['destination-address'] = [dest]
if str(sourceport) != 'any':
t.match['source-port'] = [sourceport]
if str(destport) != 'any':
t.match['destination-port'] = [destport]
if not check_access(terms_to_check, t):
ret.append(t)
return ret
# note, following code is -not currently used-
def insert_term_into_acl(new_term, aclobj, debug=False):
"""
Return a new ACL object with the new_term added in the proper place based
on the aclobj. Intended to recursively append to an interim ACL object
based on a list of Term objects.
It's safe to assume that this function is incomplete pending better
documentation and examples.
:param new_term:
The Term object to use for comparison against aclobj
:param aclobj:
The original ACL object to use for creation of new_acl
Example::
import copy
# terms_to_be_added is a list of Term objects that is to be added in
# the "right place" into new_acl based on the contents of aclobj
original_acl = parse(open('acl.original'))
new_acl = copy.deepcopy(original_acl) # Dupe the original
for term in terms_to_be_added:
new_acl = generate_new_acl(term, new_acl)
"""
new_acl = ACL() # ACL comes from trigger.acl.parser
new_acl.policers = aclobj.policers
new_acl.format = aclobj.format
new_acl.name = aclobj.name
already_added = False
for c in aclobj.comments:
new_acl.comments.append(c)
# The following logic is almost identical to that of check_access() except
# that it tracks already_added and knows how to handle insertion of terms
# before or after Terms with an action of 'discard' or 'reject'.
for t in aclobj.terms:
hit = True
complicated = False
permitted = None
for k, v in t.match.iteritems():
if debug:
print "generate_new_acl(): k,v==",k,"and",v
if k == 'protocol' and k not in new_term.match:
continue
if k not in new_term.match:
complicated = True
continue
else:
for test in new_term.match[k]:
if test not in v:
hit = False
break
if not hit and k in ('source-port', 'destination-port',
'source-address', 'destination-address'):
# Here is where it gets odd: If we have multiple IPs in this
# new term, and one of them matches in a deny, we must set hit
# to True.
got_match = False
if t.action[0] in ('discard', 'reject'):
for test in new_term.match[k]:
if test in v:
hit = True
# Check whether access in new_term is permitted (a la check_access(),
# track whether it's already been added into new_acl, and then add it
# in the "right place".
if hit and not t.inactive and already_added == False:
if not complicated and permitted is None:
for comment in t.comments:
if 'trigger: make discard' in comment and \
new_term.action[0] == 'accept':
new_acl.terms.append(new_term)
already_added = True
permitted = True
if t.action[0] in ('discard','reject') and \
new_term.action[0] in ('discard','reject'):
permitted = False
elif t.action[0] in ('discard','reject'):
permitted = False
new_acl.terms.append(new_term)
already_added = True
elif t.action[0] == 'accept' and \
new_term.action[0] in ('discard', 'reject'):
permitted = False
new_acl.terms.append(new_term)
already_added = True
elif t.action[0] == 'accept' and \
new_term.action[0] == 'accept':
permitted = True
if debug:
print "PERMITTED?", permitted
# Original term is always appended as we move on
new_acl.terms.append(t)
return new_acl
def create_new_acl(old_file, terms_to_be_added):
"""Given a list of Term objects call insert_term_into_acl() to determine
what needs to be added in based on the contents of old_file. Returns a new
ACL object."""
aclobj = parse(open(old_file)) # Start with the original ACL contents
new_acl = None
for new_term in terms_to_be_added:
new_acl = insert_term_into_acl(new_term, aclobj)
return new_acl
def get_bulk_acls():
"""
Returns a dict of acls with an applied count over settings.AUTOLOAD_BULK_THRESH
"""
from trigger.netdevices import NetDevices
nd = NetDevices()
all_acls = defaultdict(int)
for dev in nd.all():
for acl in dev.acls:
all_acls[acl] += 1
bulk_acls = {}
for acl, count in all_acls.items():
if count >= settings.AUTOLOAD_BULK_THRESH and acl != '':
bulk_acls[acl] = count
return bulk_acls
def process_bulk_loads(work, max_hits=settings.BULK_MAX_HITS_DEFAULT, force_bulk=False):
"""
Formerly "process --ones".
Processes work dict and determines tuple of (prefix, site) for each device. Stores
tuple as a dict key in prefix_hits. If prefix_hits[(prefix, site)] is greater than max_hits,
remove all further matching devices from work dict.
By default if a device has no acls flagged as bulk_acls, it is not removed from the work dict.
Example:
* Device 'foo1-xyz.example.com' returns ('foo', 'xyz') as tuple.
* This is stored as prefix_hits[('foo', 'xyz')] = 1
* All further devices matching that tuple increment the hits for that tuple
* Any devices matching hit counter exceeds max_hits is removed from work dict
You may override max_hits to increase the num. of devices on which to load a bulk acl.
You may pass force_bulk=True to treat all loads as bulk loads.
"""
prefix_pat = re.compile(r'^([a-z]+)\d{0,2}-([a-z0-9]+)')
prefix_hits = defaultdict(int)
import trigger.acl.db as adb
bulk_acls = adb.get_bulk_acls()
nd = adb.get_netdevices()
if DEBUG:
print 'DEVLIST:', sorted(work)
# Sort devices numerically
for dev in sorted(work):
if DEBUG: print 'Doing', dev
#testacls = dev.bulk_acls
#if force_bulk:
# testacls = dev.acls
testacls = dev.acls if force_bulk else dev.bulk_acls
for acl in testacls: #only look at each acl once, but look at all acls if bulk load forced
if acl in work[dev]:
#if acl in work[router]:
if DEBUG: print 'Determining threshold for acl ', acl, ' on device ', dev, '\n'
if acl in settings.BULK_MAX_HITS:
max_hits = settings.BULK_MAX_HITS[acl]
try:
prefix_site = prefix_pat.findall(dev.nodeName)[0]
except IndexError:
continue
# Mark a hit for this tuple, and dump remaining matches
prefix_hits[prefix_site] += 1
if DEBUG: print prefix_site, prefix_hits[prefix_site]
if prefix_hits[prefix_site] > max_hits:
msg = "Removing %s on %s from job queue: threshold of %d exceeded for " \
"'%s' devices in '%s'" % (acl, dev, max_hits, prefix_site[0], prefix_site[1])
print msg
if 'log' in globals():
log.msg(msg)
# Remove that acl from being loaded, but still load on that device
work[dev].remove(acl)
#work[router].remove(acl)
#done with all the devices
return work
def get_comment_matches(aclobj, requests):
"""Given an ACL object and a list of ticket numbers return a list of matching comments."""
matches = set()
for t in aclobj.terms:
for req in requests:
for c in t.comments:
if req in c:
matches.add(t)
#[matches.add(t) for c in t.comments if req in c]
return matches
def update_expirations(matches, numdays=DEFAULT_EXPIRE):
"""Update expiration dates on matching terms. This modifies mutable objects, so use cautiously."""
print 'matching terms:', [term.name for term in matches]
for term in matches:
date = None
for comment in term.comments:
try:
date = re.search(r'(\d{4}\-\d\d\-\d\d)', comment.data).group()
except AttributeError:
#print 'No date match in term: %s, comment: %s' % (term.name, comment)
continue
try:
dstamp = datetime.datetime.strptime(date, DATE_FORMAT)
except ValueError, err:
print 'BAD DATE FOR THIS COMMENT:'
print 'comment:', comment.data
print 'bad date:', date
print err
print 'Fix the date and start the job again!'
import sys
sys.exit()
new_date = dstamp + datetime.timedelta(days=numdays)
#print 'Before:\n' + comment.data + '\n'
print 'Updated date for term: %s' % term.name
comment.data = comment.data.replace(date, datetime.datetime.strftime(new_date, DATE_FORMAT))
#print 'After:\n' + comment.data
def write_tmpacl(acl, process_name='_tmpacl'):
"""Write a temporary file to disk from an Trigger acl.ACL object & return the filename"""
tmpfile = tempfile.mktemp() + process_name
f = open(tmpfile, 'w')
for x in acl.output(acl.format, replace=True):
f.write(x)
f.write('\n')
f.close()
return tmpfile
def diff_files(old, new):
"""Return a unified diff between two files"""
return os.popen('diff -Naur %s %s' % (old, new)).read()
def worklog(title, diff, log_string='updated by express-gen'):
"""Save a diff to the ACL worklog"""
from time import strftime,localtime
from trigger.utils.rcs import RCS
date = strftime('%Y%m%d', localtime())
file = os.path.join(settings.FIREWALL_DIR, 'workdocs', 'workdoc.' + date)
rcs = RCS(file)
if not os.path.isfile(file):
print 'Creating new worklog %s' % file
f = open(file,"w")
f.write("# vi:noai:\n\n")
f.close()
rcs.checkin('.')
print 'inserting the diff into the worklog %s' % file
rcs.lock_loop()
fd = open(file,"a")
fd.write('"%s"\n' % title)
fd.write(diff)
fd.close()
print 'inserting %s into the load queue' % title
rcs.checkin(log_string)
# Use acl to insert into queue, should be replaced with API call
os.spawnlp(os.P_WAIT, 'acl', 'acl', '-i', title)
# Classes
class ACLScript:
"""
Interface to generating or modifying access-lists. Intended for use in
creating command-line utilities using the ACL API.
"""
def __init__(self, acl=None, mode='insert', cmd='acl_script',
show_mods=True, no_worklog=False, no_changes=False):
self.source_ips = []
self.dest_ips = []
self.protocol = []
self.source_ports = []
self.dest_ports = []
self.modify_terms = []
self.bcomments = []
self.tempfiles = []
self.acl = acl
self.cmd = cmd
self.mode = mode
self.show_mods = show_mods
self.no_worklog = no_worklog
self.no_changes = no_changes
def cleanup(self):
for file in self.tempfiles:
os.remove(file)
def genargs(self,interactive=False):
if not self.acl:
raise "need acl defined"
argz = []
argz.append('-a %s' % self.acl)
if self.show_mods:
argz.append('--show-mods')
if self.no_worklog:
argz.append('--no-worklog')
if self.no_changes:
argz.append('--no-changes')
if not interactive:
argz.append('--no-input')
if self.mode == 'insert':
argz.append('--insert-defined')
elif self.mode == 'replace':
argz.append('--replace-defined')
else:
raise "invalid mode"
for k,v in {'--source-address-from-file':self.source_ips,
'--destination-address-from-file':self.dest_ips,
}.iteritems():
if len(v) == 0:
continue
tmpf = tempfile.mktemp() + '_genacl'
self.tempfiles.append(tmpf)
try:
f = open(tmpf,'w')
except:
print "UNABLE TO OPEN TMPFILE"
raise "YIKES!"
for x in v:
f.write('%s\n' % x.strNormal())
f.close()
argz.append('%s %s' % (k,tmpf))
for k,v in {'-p':self.source_ports,
'-P':self.dest_ports}.iteritems():
if not len(v):
continue
for x in v:
argz.append('%s %d' % (k,x))
if len(self.modify_terms) and len(self.bcomments):
print "Can only define either modify_terms or between comments"
raise "Can only define either modify_terms or between comments"
if self.modify_terms:
for x in self.modify_terms:
argz.append('-t %s' % x)
else:
for x in self.bcomments:
(b,e) = x
argz.append('-c "%s" "%s"' % (b,e))
for proto in self.protocol:
argz.append('--protocol %s' % proto)
return argz
def parselog(self, log):
return log
def run(self, interactive=False):
args = self.genargs(interactive=interactive)
log = []
#print self.cmd + ' ' + ' '.join(args)
if interactive:
os.system(self.cmd + ' ' + ' '.join(args))
else:
f = os.popen(self.cmd + ' ' + ' '.join(args))
line = f.readline()
while line:
line = line.rstrip()
log.append(line)
line = f.readline()
return log
def errors_from_log(self, log):
errors = ''
for l in log:
if '%%ERROR%%' in l:
l = l.spit('%%ERROR%%')[1]
errors += l[1:] + '\n'
return errors
def diff_from_log(self, log):
diff = ""
for l in log:
if '%%DIFF%%' in l:
l = l.split('%%DIFF%%')[1]
diff += l[1:] + '\n'
return diff
def set_acl(self, acl):
self.acl=acl
def _add_addr(self, to, src):
if isinstance(src,list):
for x in src:
if IPy.IP(x) not in to:
to.append(IPy.IP(x))
else:
if IPy.IP(src) not in to:
to.append(IPy.IP(src))
def _add_port(self, to, src):
if isinstance(src, list):
for x in src:
if x not in to:
to.append(int(x))
else:
if int(src) not in to:
to.append(int(src))
def add_protocol(self, src):
to = self.protocol
if isinstance(src, list):
for x in src:
if x not in to:
to.append(x)
else:
if src not in to:
to.append(src)
def add_src_host(self, data):
self._add_addr(self.source_ips, data)
def add_dst_host(self, data):
self._add_addr(self.dest_ips, data)
def add_src_port(self, data):
self._add_port(self.source_ports, data)
def add_dst_port(self, data):
self._add_port(self.dest_ports, data)
def add_modify_between_comments(self, begin, end):
del self.modify_terms
self.modify_terms = []
self.bcomments.append((begin,end))
def add_modify_term(self, term):
del self.bcomments
self.bcomments = []
if term not in self.modify_terms:
self.modify_terms.append(term)
def get_protocols(self):
return self.protocol
def get_src_hosts(self):
return self.source_ips
def get_dst_hosts(self):
return self.dest_ips
def get_src_ports(self):
return self.source_ports
def get_dst_ports(self):
return self.dest_ports
| 2.15625 | 2 |