max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
FEM/Elements/E2D/Element2D.py | ZibraMax/FEM | 10 | 12761251 | <filename>FEM/Elements/E2D/Element2D.py
"""Defines a general 2D element
"""
from ..Element import Element
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.path as mpltPath
class Element2D(Element):
"""Create a 2D element
Args:
coords (np.ndarray): Element coordinate matrix
_coords (np.ndarray): Element coordinate matrix for graphical interface purposes
gdl (np.ndarray): Degree of freedom matrix
"""
def __init__(self, coords: np.ndarray, _coords: np.ndarray, gdl: np.ndarray) -> None:
"""Create a 2D element
Args:
coords (np.ndarray): Element coordinate matrix
_coords (np.ndarray): Element coordinate matrix for graphical interface purposes
gdl (np.ndarray): Degree of freedom matrix
"""
Element.__init__(self, coords, _coords, gdl)
self._coordsg = np.array(
self._coords.tolist()+[self._coords[0].tolist()])
for i, e in enumerate(self.borders):
delta = self._coordsg[i+1]-self._coordsg[i]
delta[0] *= -1
delta = delta[::-1]
delta = delta/np.linalg.norm(delta)
e.nx = delta[0]
e.ny = delta[1]
def draw(self) -> None:
"""Create a graph of element
"""
_z = self.domain
_x, _p = self.T(_z.T)
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
l = []
l.append('Element')
l.append('Nodes')
for i in range(self.n):
surf = ax.plot_trisurf(*_x.T, _p[:, i], alpha=0.3)
surf._facecolors2d = surf._facecolors3d
surf._edgecolors2d = surf._edgecolors3d
l.append(r'$\psi_{'+format(i)+r'}$')
__coords = np.array(self._coords.tolist()+[self._coords[0].tolist()]).T
ax.plot(*__coords, [0]*len(__coords.T), '-', color='black')
ax.plot(*self.coords.T, [0]*len(self.coords), 'o', color='blue')
ax.legend(l)
def jacobianGraph(self) -> None:
"""Create the determinant jacobian graph
"""
_z = self.domain
_x, _p = self.T(_z.T)
_j = self.J(_z.T)[0]
__j = np.linalg.det(_j)
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
l = []
surf = ax.plot_trisurf(*_x.T, __j, cmap='magma')
surf._facecolors2d = surf._facecolors3d
surf._edgecolors2d = surf._edgecolors3d
l.append('Element')
l.append('Nodes')
l.append(r'$|J|$')
cbar = fig.colorbar(surf)
__coords = np.array(self._coords.tolist()+[self._coords[0].tolist()]).T
ax.plot(*__coords, [0]*len(__coords.T), '-', color='black')
ax.plot(*self.coords.T, [0]*len(self.coords), 'o', color='blue')
ax.legend(l)
def isInside(self, x: np.ndarray) -> np.ndarray:
"""Test if a given points is inside element domain
Args:
x (np.ndarray): Point to be tested
Returns:
np.ndarray: Bolean array of test result
"""
path = mpltPath.Path(self._coords)
inside2 = path.contains_points([x])
return inside2[0]
| 3.46875 | 3 |
drift/loaders.py | monokrome/django-drift | 0 | 12761252 | <reponame>monokrome/django-drift
from django.conf import settings
import xlrd
import os
base_loader_error = 'The Loader class can only be used by extending it.'
extensions = getattr(
settings,
'DRIFT_LOADER_EXTENSIONS',
{
'excel': ('.xls', '.xlsx'),
'csv': ('csv',)
}
)
class Loader(object):
""" Detects and loads data from files. """
type_name = None
def __init__(self, context, autoload=True):
self.filename = context.path
if autoload is True:
return self.open()
def open(self):
raise NotImplementedError(base_loader_error)
def close(self):
pass
@classmethod
def sniff(cls, context):
if not cls.type_name: return False
if not cls.type_name in extensions: return False
return os.path.splitext(context.path)[-1] in extensions[cls.type_name]
class ExcelLoader(Loader):
""" Detects and loads files stored in Excel formats. """
supports_sheets = True
type_name = 'excel'
def open(self):
self.backend = xlrd.open_workbook(self.filename)
self.sheet_names = self.backend.sheet_names()
self.sheet_count = len(self.sheet_names)
def close(self):
self.backend.release_resources()
def sheet_by_name(self, name):
""" Returns a sheet based on it's name. """
return self.backend.sheet_by_name(name)
# TODO: Finish Loader for importing from CSV data.
class CSVLoader(Loader):
""" Detects and loads files stored in CSV format. """
supports_sheets = False
type_name = 'csv'
| 2.265625 | 2 |
CleanFC2.py | Terryqqy/XVH2for1Dpolymer | 0 | 12761253 | import subprocess as sbp
import sys
import os
import numpy as np
import numpy.linalg as la
import pandas as pd
import time
import math
from ast import literal_eval
from pdb import set_trace as pst
'''
decfreq01: The original opitimization with negative freq as first one.
decfreq02: Move the atoms to direction of negative freq then got the normal positive freq.
decfreq03: Extract from the decfreq02's last optimized geometry.
All the indexs start with 0!
All carbon number should be even and should be odd's double !! why? otherwise need to check cellsnumber
'''
#=========================prefix setup part==============================#
#filesname_FC2fchk = 'decfreq02'
filesname_FC3fchk = 'C14H30Freq'
filesname_FC2fchk = 'C34H70Freq'
#filesname_FC2fchk = 'C34H70HFixed'
#filesname_com = 'decfreq03cart'
filesname_FC3com = 'C14H30Freq'
filesname_FC2com ='C34H70Freq'
filesname_FC3csv = 'FC3_C14AnaHess.csv'
#++++++++++++++constant setting++++++++++++++++++++++++++++++++++++++
meconstant = 1822.888486
Ang_bohr = 1.8897259886
au_cm = 4.359743E-18/(1.660538E-27 * 0.5292E-10 * 0.5292E-10/meconstant)#hatree/(amu*bohr*bohr) it transfer to SI unit
len_a = 2.567381*Ang_bohr #The average length of cell(transfer to bohr)
massau = [12.0107*meconstant,1.00794*meconstant] #From NIST database
#XXX the K is depended on how many the cells we used. here FC only take neighbor 1 cell so in total is 3 cells
#klist= np.linspace(0,1,K//2+1)#XXX here still use 0-1 but later should times pi/len_a when using
#FC4klist = np.linspace(0,2,K4+1)[:K4//2+1]#XXX here still use 0-1 but later should times pi/len_a when using
#XXX: plus cellsnumber and endidx
#........................... 2nd
#XXX this global variables could only be modified in local scope but not redefined.
FC2Coef_kp = {}
#............................3rd
FC2atomsname= []
#coordA = []
cal_method = '#p freq B3YLP/6-31G(d)'
#atomcharge = 0
#atommult = 1
'''
H13 H14
\/
C4-- C1 -- C2 -- C3
/\
H11 H12
'''
#===============================HARMONIC PART===============================#
def harmFreq_per_k():
for i in range(len(FC2klist)):
getCoef_w_perk(i)
print("The w (omg) in a.u is :\n")
print(w_omgkpcm[0])
#XXX the following is to check Coeficient is right
#eigvaltest = np.zeros((len(FC2klist),P),dtype = np.complex_)
#for _p in range(P):
# for kidx in range(len(FC2klist)):
# for kappa in range(P):
# atom1 = 3*(cellsnumber[0][kappa//3] - 1) + kappa%3
# for gamma in range(P):
# for midx in range(-endidx,endidx + 1):
# atom2 = 3*(cellsnumber[midx][gamma//3] - 1) + gamma%3
# eigvaltest[kidx][_p] += FC2[getidx(atom1,atom2)] * Coef_kp[kidx][kappa][_p] * Coef_kp[kidx][gamma][_p].conjugate()* math.e**(- 1j * midx * klistFC2[kidx] * math.pi) / (math.sqrt(massau[int(kappa>5)]*massau[int(gamma>5)]))
#print(w_omgkp)
#print(eigvaltest)
#For now I just calculate the neaby cells
#Fuvk is 18*18 for atoms in first cell but Force constant was store in 96*96 but in lower dense triangular form.
#XXX: u and v is the uth vth Cartesian Coordinates!!!
def getCoef_w_perk(kidx,Fcc):
kk = FC2klist[kidx] + 0.1
Fuvk = np.zeros((P,P),dtype = np.complex_)
#XXX: m is just -1 0 1 for decane
for u in range(P):
atom1 = 3*(FC2cellsnumber[0][u//3] - 1) + u%3
for v in range(P):
eachterm = 0.0
for midx in range(-FC2endidx,FC2endidx+1):
# F u(0)v(m) :
# Cell[m] [v//3] give us the atoms number in FC matrix XXX:which started with 1!
# atom2 is the nth coordinates of each atoms XXX: which started with 0!
atom2 = 3*(FC2cellsnumber[midx][v//3] - 1) + v%3
# transfer to k space
eachterm += Fcc[getidx(atom1,atom2)]* math.e ** (-1j * kk * midx*math.pi)#/(math.sqrt(massau[int(u>5)]*massau[int(v>5)]))
#eachterm += Fcc[atom1,atom2]* math.e ** (-1j * kk * midx * len_a)
# mass weighted : if u and v is > 5 so it is not Carbon's coordinates
Fuvk[u][v] = eachterm /(math.sqrt(massau[int(u>5)]*massau[int(v>5)]))
eigval, eigvector = la.eigh(Fuvk)#hermition matrix to get real eigenvalue
#print(eigval)
for i in range(P):
w_omgkp[kidx][i] = math.sqrt(abs(eigval[i]))
w_omgkpcm[kidx][i] = math.sqrt(abs(eigval[i]*au_cm))/(2.99792458E10 * 2 * math.pi)
print(w_omgkpcm[kidx])
FC2Coef_kp[kidx] = eigvector.conjugate() #here we add v is a p*p matrix (p is branch number and number of atoms in cell
return eigvector, Fuvk ,eigval
#df = pd.DataFrame(w_omgkpcm)
#df.to_csv('./w_omgkpcmNorm.csv')
def cleanFC2():
#XXX My way
#test = []
#u = 1
#atom1 = 3*(FC2cellsnumber[0][u//3] - 1) + u%3
#v = 3
#for midx in range(-FC2endidx,FC2endidx+1):
# atom2 = 3*(FC2cellsnumber[midx][v//3] - 1) + v%3
# test.append(FC2[getidx(atom1,atom2)])
#print(test)
#print(w)
#XXX Sode way
#mass weighted first
FCinput = FC2.copy()
for u in range(P):
atom1 = 3*(FC2cellsnumber[0][u//3] - 1) + u%3
for v in range(P):
for midx in range(-FC2endidx,FC2endidx+1):
atom2 = 3*(FC2cellsnumber[midx][v//3] - 1) + v%3
FCinput[getidx(atom1,atom2)] = FC2[getidx(atom1,atom2)]/(math.sqrt(massau[int(u>5)]*massau[int(v>5)]))
L,D0,k = getCoef_w_perk(0,FCinput.copy())
print(L[:,0])
print(L[:,1])
print(L[:,2])
print(L[:,3])
#I = np.eye(P)
#L1 = np.outer(L[:,0],L[:,0])
#L2 = np.outer(L[:,1],L[:,1])
#L3 = np.outer(L[:,2],L[:,2])
#L4 = np.outer(L[:,3],L[:,3])
##Pp = (I - L1@L1)@(I - L2@L2)@(I - L3@L3)@(I - L4@L4)
#Pp = (I - L4@L4)
#corrct = (Pp@D0@Pp - D0)/(15)
##print(corrct.shape)
#FC2new = np.zeros(FC2.shape,dtype = np.complex_)
#for u in range(P):
# atom1 = 3*(FC2cellsnumber[0][u//3] - 1) + u%3
# for v in range(P):
# for midx in range(-FC2endidx,FC2endidx+1):
# atom2 = 3*(FC2cellsnumber[midx][v//3] - 1) + v%3
# FC2new[getidx(atom1,atom2)] = FCinput[getidx(atom1,atom2)] + corrct[u,v]
#FCinput = FC2new.copy()
#L,D0,k = getCoef_w_perk(1,FCinput.copy())
#return Fnew
#XXX:Works really well! Check!
#def C14harmonicFreqCheck():
# eigvaltestOriginFC3 = np.zeros((len(FC3klist),P),dtype = np.complex_)
# for kk in range(len(FC3klist)):
# Fuvk = np.zeros((P,P),dtype = np.complex_)
# #XXX: m is just -1 0 1 for decane
# #Carbon 1
# for kappa in range(P):
# atom1 = 3*(FC3cellsnumber[0][kappa//3] - 1) + kappa%3
# for gamma in range(P):
# eachterm = 0.0
# for midx in range(-FC3endidx,FC3endidx+1):
# # F u(0)v(m) :
# # Cell[m] [v//3] give us the atoms number in FC matrix XXX:which started with 1!
# # atom2 is the nth coordinates of each atoms XXX: which started with 0!
# atom2 = 3*(FC3cellsnumber[midx][gamma//3] - 1) + gamma%3
# # transfer to k space
# eachterm += FC3FC2[getidx(atom1,atom2)]* math.e ** (-1j * klistFC3[kk] * midx * math.pi)
# # mass weighted : if u and v is > 5 so it is not Carbon's coordinates
# Fuvk[kappa][gamma] = eachterm /(math.sqrt(massau[int(kappa>5)]*massau[int(gamma>5)]))
# eigval, eigvector = la.eigh(Fuvk)#hermition matrix to get real eigenvalue
# for i in range(P):
# eigvaltestOriginFC3[kk][i] = math.sqrt(abs(eigval[i]*au_cm))/(2.99792458E10 * 2 * math.pi)
# print(eigvaltestOriginFC3)
# eigvaltestFC3 = np.zeros((len(FC3klist),P),dtype = np.complex_)
# for _p in range(P):
# for kidx in range(len(FC3klist)):
# for kappa in range(P):
# atom1 = 3*(FC3cellsnumber[0][kappa//3] - 1) + kappa%3
# for gamma in range(P):
# for midx in range(-FC3endidx,FC3endidx + 1):
# atom2 = 3*(FC3cellsnumber[midx][gamma//3] - 1) + gamma%3
# eigvaltestFC3[kidx][_p] += FC3FC2[getidx(atom1,atom2)] * FC2Coef_kp[3*kidx][kappa][_p] * FC2Coef_kp[3*kidx][gamma][_p].conjugate()* math.e**(- 1j * midx * FC2klist[3* kidx] * math.pi) / (math.sqrt(massau[int(kappa>5)]*massau[int(gamma>5)]))
# eigvaltestFC3[kidx][_p] = math.sqrt(abs(eigvaltestFC3[kidx][_p] * au_cm))/(2.99792458E10 * 2 * math.pi)
# print(eigvaltestFC3)
#===============================ANHARM PART=============================#
#read in the csv file for force constant directly.
#TODO:Finish the code for polyethylene (already have FC)
#TODO:- readin FC - transfer FC to k space - diagrams - find root - last step.
"""
FC3 is stored in csv file need to read in
"""
#===============================HELPER FUNCTION========================#
"""
#helper function to readin the fchk FC2 and store in array and return copy
"""
def readFC2(filename):
for fname in os.listdir('.'):
if fname == filename + '.fchk':
with open(fname) as f:
search = f.readlines()
for fcidx in range(len(search)):
eachline = search[fcidx].split()
if eachline and eachline[0] == "Cartesian" and eachline[1] == "Force":
fcnum = int(eachline[5])
break
tempFC2 = [0]*fcnum
i = 0
plus = int(fcnum%5==0)
for itr in range(fcidx+1, fcidx+int(fcnum)//5+2- plus):
for ele in search[itr].split():
tempFC2[i] = float(ele)
i+=1
return tempFC2
"""
#get idx of FCs
"""
def getidx(*args):#XXX:started with 0!
output = list(args)
if len(args)==2:
output.sort()
return int(output[1]*(output[1]+1)/2 + output[0])
elif len(output) == 3:
output.sort()
return str(output[0]) + '_' + str(output[1]) + '_' + str(output[2])
elif len(output) == 4:
output.sort()
return str(output[0]) + '_' + str(output[1]) + '_' + str(output[2]) +'_' + str(output[3])
sys.exit("wrong input for idx()")
return 0
"""
#cells setting
#return a numpy array of the index of the cell atoms
"""
def cellsetting():
##totalnum = len(FC2atomsname)
##assert (totalnum-2)%3 == 0
###eg carbon_num is 10
FC2carbon_num = int((len(FC2atomsname)-2)/3)
###eg numcellused is 3
FC2numcellused = int((FC2carbon_num-4)/2)
##assert (carbon_num-4)%2 == 0
global FC2cellsnumber
FC2cellsnumber = np.zeros((FC2numcellused,6))#XXX:we use EVEN number of carbon here!!! and cut off the end 4 carbons
FC2cellsnumber = FC2cellsnumber.astype(int)
FC2cellsnumber[:2,:2] = np.array([[1,2],[3,5]])
FC2cellsnumber[FC2numcellused//2 + 1,:2] = np.array([FC2carbon_num - 4,FC2carbon_num - 6])
for i in range(FC2numcellused):
if i > 1 and i < FC2numcellused//2 + 1:
FC2cellsnumber[i,:2] = FC2cellsnumber[i-1,:2] + 4
elif i > FC2numcellused//2 + 1 :
FC2cellsnumber[i,:2] = FC2cellsnumber[i-1,:2] - 4
for j in range(1,3):
FC2cellsnumber[i,2*j] = 2*(FC2cellsnumber[i,j-1]-1) + FC2carbon_num +1
FC2cellsnumber[i,2*j+1] = 2*(FC2cellsnumber[i,j-1]-1) + FC2carbon_num +2
FC2cellused = len(FC2cellsnumber)#XXX should be odd
global FC2endidx
FC2endidx = FC2cellused//2# if cellused is 3 then endidx is 3//2 = 1 so the range is (-1, 2)
print("For FC2 number of cells used is", FC2numcellused,"and the endidx is", FC2endidx)
print(FC2cellsnumber)
'''
#get atoms name, charge, multi num, coordA(actually no use here)
'''
def init_para():
with open(filesname_FC2com + ".com") as f:
read = f.readlines()
for idx in range(len(read)):
eachline = read[idx].split()
if eachline and eachline[0] == "calculation":
break
idx += 3 #move from the title section to coordinates part
while read[idx]!= '\n':
eachline = read[idx].split()
FC2atomsname.append(eachline[0])
#for cdidx in range(1, len(eachline)):
#coordA.append(float(eachline[cdidx]))
idx+=1
print("The number of FC2 atoms is",len(FC2atomsname))
#readin the FC2 of the oject
global FC2
FC2 = np.array(readFC2(filesname_FC2fchk))
global K
K = 15
global K2
K2 = 15 # number of cells harmonic
K3 = 5 # number of cells FC3
K4 = 3 # number of cells FC4
N = 6
global P
P = 3*N #branch number of normal modes in first BZ
global FC2klist
FC2klist = np.linspace(0,2,K2+1)[:K2//2+1]#XXX here still use 0-1 but later should times pi/len_a when using
#FC3klist = np.linspace(0,2,K3+1)[:K3//2+1]#XXX here still use 0-1 but later should times pi/len_a when using
#global FC3FC2
#FC3FC2 = readFC2(filesname_FC3fchk)
global w_omgkp
global w_omgkpcm
w_omgkp = np.zeros((len(FC2klist),P))#We just store the half BZ plus zero's w_omg since they are symmetric
w_omgkpcm = np.zeros((len(FC2klist),P))
#===================================TEST PART ==============================#
t1 = time.time()
init_para()
cellsetting()
#cleanFC2()
L,D0,k = getCoef_w_perk(0,FC2)
#do mass-weighted back
#print(L.real)
for i in range(4):
temp = L[i,:].real.copy()
print(temp)
#for a in range(len(temp)):
# temp[a] *= (math.sqrt(massau[int(a>5)]*massau[int(i>5)]))
#print("cellsnumber is ,",FC2cellsnumber)#,FC3cellsnumber)
print(time.time()-t1)
#testpart(0)
| 2.09375 | 2 |
polyjit/experiments/sequences/hill_climber.py | PolyJIT/polyjit.experiments | 0 | 12761254 | #!/usr/bin/env python
"""This module supplies a function that can generate custom sequences of
optimization passes for arbitrary programs.
This module provides an implementation of a hill climber algorithm presented by
Kulkarni in his paper "Evaluating Heuristic Optimization Phase Order Search
Algorithms" (published 2007). The algorithm is used to generate a custom
optimization sequence for an arbitrary application. The resulting sequence
is a list of flags that can be set by the LLVM opt tool. The generated
sequence is meant to be a good flag combination that increases the amount of
code that can be detected by Polly.
"""
import random
import multiprocessing
import logging
import polyjit.experiments.sequences.polly_stats as polly_stats
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
# Default values
DEFAULT_PASS_SPACE = ['-basicaa', '-mem2reg']
DEFAULT_SEQ_LENGTH = 10
DEFAULT_ITERATIONS = 100
print_debug = False
def create_random_sequence(pass_space, seq_length):
"""Creates a random sequence.
This methods generates a sequence by randomly picking available passes.
Args:
pass_space (list[string]): contains the available passes.
seq_length (int): the length the sequence should have.
Returns:
list: the created sequence.
"""
sequence = []
for _ in range(seq_length):
sequence.append(random.choice(pass_space))
return sequence
def calculate_fitness_value(sequence, seq_to_fitness, key, program):
"""Calculates the fitness value of the provided sequence.
This method calculates the fitness of the sequence by using the number
of regions that are no valid SCoPs if this sequence is used for
preoptimization before Polly's SCoP detection.
Args:
sequence (list[string]): the sequence for that the fitness value should
be calculated.
seq_to_fitness (dict): dictionary that stores calculated fitness
values.
key (string): the key of the provided sequence for the dictionary.
program (string): the name of the application this sequence
should be used for.
"""
if key not in seq_to_fitness:
seq_to_fitness[key] = polly_stats.get_regions_without_scops(sequence,
program)
def calculate_neighbours(sequence, seq_to_fitness, pass_space, program):
"""Calculates the neighbours of the specified sequence.
This method calculates all sequences that differ from the specified
sequence at exactly one position. Furthermore this method calculates the
fitness values of the neighbours.
A sequence is a neighbour of another sequence if they have exactly one
different pass.
E.g.: Sequences s1 = [a, a], s2 = [a, b], s3 = [b, b].
s2 is a neighbour of s1, because they differ in the second position.
s3 is not a neighbour of s1, because they differ in position one and
two.
Args:
sequence (list[string]): the specified sequence.
seq_to_fitness (dict): dictionary that contains calculated fitness
values.
pass_space (list[string]): a list of all available passes.
program (string): the name of the application the neighbour
sequences should be used for.
Returns:
list[list[string]]: all neighbours of the specified sequence are
returned as list.
"""
neighbours = []
pool = multiprocessing.Pool()
pool.apply_async(calculate_fitness_value,
args=(sequence, seq_to_fitness, str(sequence), program))
for i in range(len(sequence)):
remaining_passes = list(pass_space)
remaining_passes.remove(sequence[i])
# Create sequences with different pass at position i.
for remaining_pass in remaining_passes:
neighbour = list(sequence)
neighbour[i] = remaining_pass
pool.apply_async(calculate_fitness_value,
args=(neighbour, seq_to_fitness, str(neighbour),
program))
neighbours.append(neighbour)
pool.close()
pool.join()
return neighbours
def climb(sequence, program, pass_space, seq_to_fitness):
"""Performs the actual hill climbing.
Args:
sequence (list[string]): the sequence that should be used as base
sequence.
program (string): name of the application the sequences are applied
on.
pass_space (list[string]): a list containing all available passes.
seq_to_fitness (dict): dictionary that stores calculated fitness
values.
"""
log = logging.getLogger(__name__)
base_sequence = sequence
base_sequence_key = str(base_sequence)
log.debug("Start climbing...")
log.debug("Initial base sequence: %s", str(base_sequence))
# Take the base sequence and calculate all neighbours. Check if the best
# performing neighbour is better than the base sequence. If this is the
# case this neighbour becomes the new base sequence.
# This process is repeated until the base sequence outperforms all its
# neighbours.
climbs = 0
changed = True
while changed:
changed = False
# Calculate its neighbours.
neighbours = calculate_neighbours(base_sequence, seq_to_fitness,
pass_space, program)
# Check if there is a better performing neighbour.
for neighbour in neighbours:
if seq_to_fitness[base_sequence_key] \
> seq_to_fitness[str(neighbour)]:
base_sequence = neighbour
base_sequence_key = str(neighbour)
changed = True
climbs += 1
log.debug("\n---> Climb number %s <---", str(climbs))
log.debug("---> Base sequence: %s <---", str(base_sequence))
log.debug("---> Neighbours: <---")
if print_debug:
for neighbour in neighbours:
log.debug('Neighbour: %s; Fitness value: %s',
str(neighbour),
str(seq_to_fitness[str(neighbour)]))
log.debug("Local optimum reached!\n")
return base_sequence
def generate_custom_sequence(program, pass_space=DEFAULT_PASS_SPACE,
seq_length=DEFAULT_SEQ_LENGTH,
iterations=DEFAULT_ITERATIONS, debug=False):
"""Generates a custom optimization sequence for a provided application.
Args:
program (string): the name of the application a custom sequence should
be generated for.
pass_space (list[string], optional): list of passes that should be
taken into consideration for the generation of the custom
sequence.
seq_length(int, optional): the length of the sequence that should be
generated.
iterations (int, optional): the number of times the hill climbing
process is to be repeated.
debug (boolean, optional): true if debug information should be printed;
false otherwise.
Returns:
list[string]: the generated custom optimization sequence. Each element
of the list represents one optimization pass.
"""
global print_debug
print_debug = debug
log = logging.getLogger(__name__)
best_sequence = []
seq_to_fitness = multiprocessing.Manager().dict()
log.debug("\n Start hill climbing algorithm...")
for i in range(iterations):
log.debug("Iteration: %d", i + 1)
base_sequence = create_random_sequence(pass_space, seq_length)
base_sequence = climb(base_sequence, program, pass_space,
seq_to_fitness)
if not best_sequence or seq_to_fitness[str(best_sequence)] < \
seq_to_fitness[str(base_sequence)]:
best_sequence = base_sequence
log.debug("Best sequence found in %d iterations:")
log.debug("Sequence: %s", best_sequence)
log.debug("Fitness value: %s", str(seq_to_fitness[str(best_sequence)]))
return best_sequence
| 3.3125 | 3 |
tests/test_gui_config.py | akki2825/CorpusTools | 97 | 12761255 | <filename>tests/test_gui_config.py
from corpustools.gui.config import *
def test_preferences(qtbot, settings):
dialog = PreferencesDialog(None, settings)
qtbot.addWidget(dialog)
dialog.accept()
| 1.53125 | 2 |
Plugins/TemporalParallelismScriptGenerator/tp_export.py | aashish24/paraview-climate-3.11.1 | 1 | 12761256 | <reponame>aashish24/paraview-climate-3.11.1<gh_stars>1-10
# boolean telling if we want to export rendering.
export_rendering = %1
# string->string map with key being the proxyname while value being the
# file name on the system the generated python script is to be run on.
reader_input_map = { %2 };
# list of views along with a file name and magnification flag
screenshot_info = {%3}
# the number of processes working together on a single time step
timeCompartmentSize = %4
# the name of the Python script to be outputted
scriptFileName = "%5"
# this method replaces construction of proxies with methods
# that will work on the remote machine
def tp_hook(info, ctorMethod, ctorArgs, extraCtorCommands):
global reader_input_map, export_rendering
if info.ProxyName in reader_input_map.keys():
# mark this proxy as a reader input to make it easier to locate the
# reader input for the writers.
info.Proxy.tpReaderInput = reader_input_map[info.ProxyName]
# take out the guiName argument if it exists
newArgs = []
import re
for arg in ctorArgs:
if re.match("^FileName", arg) == None and re.match("^guiName", arg) == None:
newArgs.append(arg)
newArgs = [ctorMethod, newArgs, "\"%s\"" % info.Proxy.tpReaderInput]
ctorMethod = "CreateReader"
extraCtorCommands = "timeSteps = GetActiveSource().TimestepValues if len(GetActiveSource().TimestepValues)!=0 else [0]"
return (ctorMethod, newArgs, extraCtorCommands)
proxy = info.Proxy
# handle views
if proxy.GetXMLGroup() == 'views' and export_rendering:
proxyName = servermanager.ProxyManager().GetProxyName("views", proxy)
ctorArgs = [ ctorMethod, "\"%s\"" % screenshot_info[proxyName][0], \
screenshot_info[proxyName][1], screenshot_info[proxyName][2], \
screenshot_info[proxyName][3], "tp_views" ]
return ("CreateView", ctorArgs, extraCtorCommands)
# handle writers.
if not proxy.GetHints() or \
not proxy.GetHints().FindNestedElementByName("TemporalParallelism"):
return (ctorMethod, ctorArgs, extraCtorCommands)
# this is a writer we are dealing with.
xmlElement = proxy.GetHints().FindNestedElementByName("TemporalParallelism")
xmlgroup = xmlElement.GetAttribute("group")
xmlname = xmlElement.GetAttribute("name")
pxm = smtrace.servermanager.ProxyManager()
writer_proxy = pxm.GetPrototypeProxy(xmlgroup, xmlname)
ctorMethod = \
smtrace.servermanager._make_name_valid(writer_proxy.GetXMLLabel())
ctorArgs = [ctorMethod, \
"\"%s\"" % proxy.GetProperty("FileName").GetElement(0), "tp_writers" ]
ctorMethod = "CreateWriter"
return (ctorMethod, ctorArgs, '')
try:
from paraview import smstate, smtrace
except:
raise RuntimeError('could not import paraview.smstate')
# Start trace
smtrace.start_trace(CaptureAllProperties=True, UseGuiName=True)
# update trace globals.
smtrace.trace_globals.proxy_ctor_hook = staticmethod(tp_hook)
smtrace.trace_globals.trace_output = []
# Get list of proxy lists
proxy_lists = smstate.get_proxy_lists_ordered_by_group(WithRendering=export_rendering)
# Now register the proxies with the smtrace module
for proxy_list in proxy_lists:
smstate.register_proxies_by_dependency(proxy_list)
# Calling append_trace causes the smtrace module to sort out all the
# registered proxies and their properties and write them as executable
# python.
smtrace.append_trace()
# Stop trace and print it to the console
smtrace.stop_trace()
output_contents = """
try: paraview.simple
except: from paraview.simple import *
import sys
import os
import paraview
paraview.servermanager.misc.GlobalMapperProperties.GlobalImmediateModeRendering = 1
# trying to import the library where I can specify the global and subcontrollers
try:
import libvtkParallelPython as vtkParallel # requires LD_LIBRARY_PATH being properly set
except ImportError:
import vtkParallelPython as vtkParallel # for a static build, i.e. jaguarpf, use this instead and don't worry about LD_LIBRARY_PATH
paraview.options.batch = True # this may not be necessary
paraview.simple._DisableFirstRenderCameraReset()
def CreateTimeCompartments(globalController, timeCompartmentSize):
if globalController.GetNumberOfProcesses() == 1:
print 'single process'
return
elif globalController.GetNumberOfProcesses() %% timeCompartmentSize != 0:
print 'number of processes must be an integer multiple of time compartment size'
return
elif timeCompartmentSize == globalController.GetNumberOfProcesses():
return globalController
gid = globalController.GetLocalProcessId()
timeCompartmentGroupId = int (gid / timeCompartmentSize )
newController = globalController.PartitionController(timeCompartmentGroupId, gid %% timeCompartmentSize)
# must unregister if the reference count is greater than 1
if newController.GetReferenceCount() > 1:
newController.UnRegister(None)
#print gid, timeCompartmentGroupId, gid %% timeCompartmentSize
print gid, ' of global comm is ', newController.GetLocalProcessId()
globalController.SetGlobalController(newController)
return newController
def CheckReader(reader):
if hasattr(reader, "FileName") == False:
print "ERROR: Don't know how to set file name for ", reader.SMProxy.GetXMLName()
sys.exit(-1)
if hasattr(reader, "TimestepValues") == False:
print "ERROR: ", reader.SMProxy.GetXMLName(), " doesn't have time information"
sys.exit(-1)
def CreateControllers(timeCompartmentSize):
pm = paraview.servermanager.vtkProcessModule.GetProcessModule()
globalController = pm.GetGlobalController()
if timeCompartmentSize > globalController.GetNumberOfProcesses():
timeCompartmentSize = globalController.GetNumberOfProcesses()
temporalController = CreateTimeCompartments(globalController, timeCompartmentSize)
return globalController, temporalController, timeCompartmentSize
currentTimeStep = -1
def UpdateCurrentTimeStep(globalController, timeCompartmentSize):
global currentTimeStep
if currentTimeStep == -1:
currentTimeStep = globalController.GetLocalProcessId() / timeCompartmentSize
return currentTimeStep
numTimeStepsPerIteration = globalController.GetNumberOfProcesses() / timeCompartmentSize
currentTimeStep = currentTimeStep + numTimeStepsPerIteration
return currentTimeStep
def WriteImages(currentTimeStep, currentTime, views):
for view in views:
filename = view.tpFileName.replace("%%t", str(currentTimeStep))
view.ViewTime = currentTime
WriteImage(filename, view, Magnification=view.tpMagnification)
def WriteFiles(currentTimeStep, currentTime, writers):
for writer in writers:
originalfilename = writer.FileName
fname = originalfilename.replace("%%t", str(currentTimeStep))
writer.FileName = fname
writer.UpdatePipeline(currentTime)
writer.FileName = originalfilename
def IterateOverTimeSteps(globalController, timeCompartmentSize, timeSteps, writers, views):
currentTimeStep = UpdateCurrentTimeStep(globalController, timeCompartmentSize)
while currentTimeStep < len(timeSteps):
print globalController.GetLocalProcessId(), " is working on ", currentTimeStep
WriteImages(currentTimeStep, timeSteps[currentTimeStep], views)
WriteFiles(currentTimeStep, timeSteps[currentTimeStep], writers)
currentTimeStep = UpdateCurrentTimeStep(globalController, timeCompartmentSize)
def CreateReader(ctor, args, fileInfo):
"Creates a reader, checks if it can be used, and sets the filenames"
reader = ctor()
CheckReader(reader)
import glob
files = glob.glob(fileInfo)
files.sort() # assume there is a logical ordering of the filenames that corresponds to time ordering
reader.FileName = files
for a in args:
s = "reader."+a
exec (s)
return reader
def CreateWriter(ctor, filename, tp_writers):
writer = ctor()
writer.FileName = filename
tp_writers.append(writer)
return writer
def CreateView(proxy_ctor, filename, magnification, width, height, tp_views):
view = proxy_ctor()
view.add_attribute("tpFileName", filename)
view.add_attribute("tpMagnification", magnification)
tp_views.append(view)
view.ViewSize = [width, height]
return view
tp_writers = []
tp_views = []
# ==================== end of specialized temporal parallelism sections ==================
timeCompartmentSize = %s
globalController, temporalController, timeCompartmentSize = CreateControllers(timeCompartmentSize)
%s
IterateOverTimeSteps(globalController, timeCompartmentSize, timeSteps, tp_writers, tp_views)
"""
pipeline_trace = ""
for original_line in smtrace.trace_globals.trace_output:
for line in original_line.split("\n"):
pipeline_trace += line + "\n";
outFile = open(scriptFileName, 'w')
outFile.write(output_contents % (timeCompartmentSize, pipeline_trace))
outFile.close()
| 2.46875 | 2 |
Class 12 Record Book/WithA.py | Bamgm14/My-Random-Work | 0 | 12761257 | def RemoveA(file1='Weird.txt',file2='WeirdButA.txt'):
f=open(file2,'a')
lst=open(file1,'r').readlines()
f1=open(file1,'w')
for x in lst:
if 'a' in x or 'A' in x:
f.write(x)
else:
f1.write(x)
f.close()
f1.close()
print(open(file1,'r').read(),open(file2,'r').read())
RemoveA()
| 3.546875 | 4 |
data-api/EDR/provider/tca.py | ShaneMill1/NCPP_EDR_API | 0 | 12761258 | <filename>data-api/EDR/provider/tca.py<gh_stars>0
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import json
from EDR.provider.base import BaseProvider
from EDR.provider import tcaDecoder
from EDR.provider import tcaEncoder
import os
import random
import time
VERSION = '0.0.1'
headers_ = {
'X-Powered-By': 'Environmental Data Retrieval API {}'.format(VERSION)
}
class TcaProvider(BaseProvider):
def __init__(self, dataset, config):
"""initializer"""
self.name = 'tca'
self.config = config
self.source_dir = os.path.join(os.path.dirname(__file__),
config['datasets'][dataset]['provider']['data_source'])
self.decoder = tcaDecoder.Decoder()
self.encoder = tcaEncoder.Encoder()
self.tacs = []
for f in [os.path.join(self.source_dir, x)
for x in os.listdir(self.source_dir) if x[:2] == 'FK']:
_fh = open(f, 'r')
self.tacs.append(_fh.read())
_fh.close()
random.seed()
def query(self, dataset, qtype, coords, time_range, z_value, params, identifier, outputFormat):
for idx,t in enumerate(self.tacs):
t_all=self.tacs[idx].split('\n')
t_el=t_all[2].split(' ')
dd=t_all[7].split(' ')[4]
tac_string=t_el[0]+t_el[1]+'.'+t_el[2]
id_search=identifier.split('_')
id_manipulate=id_search[1][0:4]
id_match=id_search[0]+'.'+dd+id_manipulate
print(id_match + '==' + tac_string)
if str(tac_string) in str(id_match):
tac = self.tacs[idx]
break
#except IndexError:
# headers_['Content-type'] = 'text/ascii'
# return 'No product found'
if outputFormat == 'ascii':
headers_['Content-type'] = 'text/ascii'
return tac, 'no_delete'
decodedTAC = self.decoder(tac)
if outputFormat == 'json':
headers_['Content-type'] = 'application/json'
return json.dumps(decodedTAC), 'no_delete'
else:
headers_['Content-type'] = 'application/xml'
decodedTAC['translatedBulletinReceptionTime'] = time.strftime('%Y-%m-%dT%H:%M:%SZ')
decodedTAC['translatedBulletinID'] = 'FKXX23KNHC%s' % time.strftime('%d%H%M')
return self.encoder(decodedTAC, tac), 'no_delete'
#elif qtype == 'multipoint':
# return json.dumps('Multi-point queries not supported yet.')
| 1.945313 | 2 |
intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/community/crypto/plugins/module_utils/crypto/module_backends/privatekey.py | Stienvdh/statrick | 0 | 12761259 | <filename>intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/community/crypto/plugins/module_utils/crypto/module_backends/privatekey.py
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2016, <NAME> <<EMAIL>>
# Copyright: (c) 2020, <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import abc
import base64
import traceback
from distutils.version import LooseVersion
from ansible.module_utils import six
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils._text import to_bytes
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
CRYPTOGRAPHY_HAS_X25519,
CRYPTOGRAPHY_HAS_X25519_FULL,
CRYPTOGRAPHY_HAS_X448,
CRYPTOGRAPHY_HAS_ED25519,
CRYPTOGRAPHY_HAS_ED448,
OpenSSLObjectError,
OpenSSLBadPassphraseError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
load_privatekey,
get_fingerprint_of_privatekey,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.pem import (
identify_private_key_format,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.module_backends.common import ArgumentSpec
MINIMAL_PYOPENSSL_VERSION = '0.6'
MINIMAL_CRYPTOGRAPHY_VERSION = '1.2.3'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
import cryptography.exceptions
import cryptography.hazmat.backends
import cryptography.hazmat.primitives.serialization
import cryptography.hazmat.primitives.asymmetric.rsa
import cryptography.hazmat.primitives.asymmetric.dsa
import cryptography.hazmat.primitives.asymmetric.ec
import cryptography.hazmat.primitives.asymmetric.utils
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
class PrivateKeyError(OpenSSLObjectError):
pass
# From the object called `module`, only the following properties are used:
#
# - module.params[]
# - module.warn(msg: str)
# - module.fail_json(msg: str, **kwargs)
@six.add_metaclass(abc.ABCMeta)
class PrivateKeyBackend:
def __init__(self, module, backend):
self.module = module
self.type = module.params['type']
self.size = module.params['size']
self.curve = module.params['curve']
self.passphrase = module.params['passphrase']
self.cipher = module.params['cipher']
self.format = module.params['format']
self.format_mismatch = module.params.get('format_mismatch', 'regenerate')
self.regenerate = module.params.get('regenerate', 'full_idempotence')
self.backend = backend
self.private_key = None
self.existing_private_key = None
self.existing_private_key_bytes = None
@abc.abstractmethod
def generate_private_key(self):
"""(Re-)Generate private key."""
pass
def convert_private_key(self):
"""Convert existing private key (self.existing_private_key) to new private key (self.private_key).
This is effectively a copy without active conversion. The conversion is done
during load and store; get_private_key_data() uses the destination format to
serialize the key.
"""
self._ensure_existing_private_key_loaded()
self.private_key = self.existing_private_key
@abc.abstractmethod
def get_private_key_data(self):
"""Return bytes for self.private_key."""
pass
def set_existing(self, privatekey_bytes):
"""Set existing private key bytes. None indicates that the key does not exist."""
self.existing_private_key_bytes = privatekey_bytes
def has_existing(self):
"""Query whether an existing private key is/has been there."""
return self.existing_private_key_bytes is not None
@abc.abstractmethod
def _check_passphrase(self):
"""Check whether provided passphrase matches, assuming self.existing_private_key_bytes has been populated."""
pass
@abc.abstractmethod
def _ensure_existing_private_key_loaded(self):
"""Make sure that self.existing_private_key is populated from self.existing_private_key_bytes."""
pass
@abc.abstractmethod
def _check_size_and_type(self):
"""Check whether provided size and type matches, assuming self.existing_private_key has been populated."""
pass
@abc.abstractmethod
def _check_format(self):
"""Check whether the key file format, assuming self.existing_private_key and self.existing_private_key_bytes has been populated."""
pass
def needs_regeneration(self):
"""Check whether a regeneration is necessary."""
if self.regenerate == 'always':
return True
if not self.has_existing():
# key does not exist
return True
if not self._check_passphrase():
if self.regenerate == 'full_idempotence':
return True
self.module.fail_json(msg='Unable to read the key. The key is protected with a another passphrase / no passphrase or broken.'
' Will not proceed. To force regeneration, call the module with `generate`'
' set to `full_idempotence` or `always`, or with `force=yes`.')
self._ensure_existing_private_key_loaded()
if self.regenerate != 'never':
if not self._check_size_and_type():
if self.regenerate in ('partial_idempotence', 'full_idempotence'):
return True
self.module.fail_json(msg='Key has wrong type and/or size.'
' Will not proceed. To force regeneration, call the module with `generate`'
' set to `partial_idempotence`, `full_idempotence` or `always`, or with `force=yes`.')
# During generation step, regenerate if format does not match and format_mismatch == 'regenerate'
if self.format_mismatch == 'regenerate' and self.regenerate != 'never':
if not self._check_format():
if self.regenerate in ('partial_idempotence', 'full_idempotence'):
return True
self.module.fail_json(msg='Key has wrong format.'
' Will not proceed. To force regeneration, call the module with `generate`'
' set to `partial_idempotence`, `full_idempotence` or `always`, or with `force=yes`.'
' To convert the key, set `format_mismatch` to `convert`.')
return False
def needs_conversion(self):
"""Check whether a conversion is necessary. Must only be called if needs_regeneration() returned False."""
# During conversion step, convert if format does not match and format_mismatch == 'convert'
self._ensure_existing_private_key_loaded()
return self.has_existing() and self.format_mismatch == 'convert' and not self._check_format()
def _get_fingerprint(self):
if self.private_key:
return get_fingerprint_of_privatekey(self.private_key, backend=self.backend)
try:
self._ensure_existing_private_key_loaded()
except Exception as dummy:
# Ignore errors
pass
if self.existing_private_key:
return get_fingerprint_of_privatekey(self.existing_private_key, backend=self.backend)
def dump(self, include_key):
"""Serialize the object into a dictionary."""
if not self.private_key:
try:
self._ensure_existing_private_key_loaded()
except Exception as dummy:
# Ignore errors
pass
result = {
'type': self.type,
'size': self.size,
'fingerprint': self._get_fingerprint(),
}
if self.type == 'ECC':
result['curve'] = self.curve
if include_key:
# Get hold of private key bytes
pk_bytes = self.existing_private_key_bytes
if self.private_key is not None:
pk_bytes = self.get_private_key_data()
# Store result
if pk_bytes:
if identify_private_key_format(pk_bytes) == 'raw':
result['privatekey'] = base64.b64encode(pk_bytes)
else:
result['privatekey'] = pk_bytes.decode('utf-8')
else:
result['privatekey'] = None
return result
# Implementation with using pyOpenSSL
class PrivateKeyPyOpenSSLBackend(PrivateKeyBackend):
def __init__(self, module):
super(PrivateKeyPyOpenSSLBackend, self).__init__(module=module, backend='pyopenssl')
if self.type == 'RSA':
self.openssl_type = crypto.TYPE_RSA
elif self.type == 'DSA':
self.openssl_type = crypto.TYPE_DSA
else:
self.module.fail_json(msg="PyOpenSSL backend only supports RSA and DSA keys.")
if self.format != 'auto_ignore':
self.module.fail_json(msg="PyOpenSSL backend only supports auto_ignore format.")
def generate_private_key(self):
"""(Re-)Generate private key."""
self.private_key = crypto.PKey()
try:
self.private_key.generate_key(self.openssl_type, self.size)
except (TypeError, ValueError) as exc:
raise PrivateKeyError(exc)
def _ensure_existing_private_key_loaded(self):
if self.existing_private_key is None and self.has_existing():
try:
self.existing_private_key = load_privatekey(
None, self.passphrase, content=self.existing_private_key_bytes, backend=self.backend)
except OpenSSLBadPassphraseError as exc:
raise PrivateKeyError(exc)
def get_private_key_data(self):
"""Return bytes for self.private_key"""
if self.cipher and self.passphrase:
return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.private_key,
self.cipher, to_bytes(self.passphrase))
else:
return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.private_key)
def _check_passphrase(self):
try:
load_privatekey(None, self.passphrase, content=self.existing_private_key_bytes, backend=self.backend)
return True
except Exception as dummy:
return False
def _check_size_and_type(self):
return self.size == self.existing_private_key.bits() and self.openssl_type == self.existing_private_key.type()
def _check_format(self):
# Not supported by this backend
return True
# Implementation with using cryptography
class PrivateKeyCryptographyBackend(PrivateKeyBackend):
def _get_ec_class(self, ectype):
ecclass = cryptography.hazmat.primitives.asymmetric.ec.__dict__.get(ectype)
if ecclass is None:
self.module.fail_json(msg='Your cryptography version does not support {0}'.format(ectype))
return ecclass
def _add_curve(self, name, ectype, deprecated=False):
def create(size):
ecclass = self._get_ec_class(ectype)
return ecclass()
def verify(privatekey):
ecclass = self._get_ec_class(ectype)
return isinstance(privatekey.private_numbers().public_numbers.curve, ecclass)
self.curves[name] = {
'create': create,
'verify': verify,
'deprecated': deprecated,
}
def __init__(self, module):
super(PrivateKeyCryptographyBackend, self).__init__(module=module, backend='cryptography')
self.curves = dict()
self._add_curve('secp224r1', 'SECP224R1')
self._add_curve('secp256k1', 'SECP256K1')
self._add_curve('secp256r1', 'SECP256R1')
self._add_curve('secp384r1', 'SECP384R1')
self._add_curve('secp521r1', 'SECP521R1')
self._add_curve('secp192r1', 'SECP192R1', deprecated=True)
self._add_curve('sect163k1', 'SECT163K1', deprecated=True)
self._add_curve('sect163r2', 'SECT163R2', deprecated=True)
self._add_curve('sect233k1', 'SECT233K1', deprecated=True)
self._add_curve('sect233r1', 'SECT233R1', deprecated=True)
self._add_curve('sect283k1', 'SECT283K1', deprecated=True)
self._add_curve('sect283r1', 'SECT283R1', deprecated=True)
self._add_curve('sect409k1', 'SECT409K1', deprecated=True)
self._add_curve('sect409r1', 'SECT409R1', deprecated=True)
self._add_curve('sect571k1', 'SECT571K1', deprecated=True)
self._add_curve('sect571r1', 'SECT571R1', deprecated=True)
self._add_curve('brainpoolP256r1', 'BrainpoolP256R1', deprecated=True)
self._add_curve('brainpoolP384r1', 'BrainpoolP384R1', deprecated=True)
self._add_curve('brainpoolP512r1', 'BrainpoolP512R1', deprecated=True)
self.cryptography_backend = cryptography.hazmat.backends.default_backend()
if not CRYPTOGRAPHY_HAS_X25519 and self.type == 'X25519':
self.module.fail_json(msg='Your cryptography version does not support X25519')
if not CRYPTOGRAPHY_HAS_X25519_FULL and self.type == 'X25519':
self.module.fail_json(msg='Your cryptography version does not support X25519 serialization')
if not CRYPTOGRAPHY_HAS_X448 and self.type == 'X448':
self.module.fail_json(msg='Your cryptography version does not support X448')
if not CRYPTOGRAPHY_HAS_ED25519 and self.type == 'Ed25519':
self.module.fail_json(msg='Your cryptography version does not support Ed25519')
if not CRYPTOGRAPHY_HAS_ED448 and self.type == 'Ed448':
self.module.fail_json(msg='Your cryptography version does not support Ed448')
def _get_wanted_format(self):
if self.format not in ('auto', 'auto_ignore'):
return self.format
if self.type in ('X25519', 'X448', 'Ed25519', 'Ed448'):
return 'pkcs8'
else:
return 'pkcs1'
def generate_private_key(self):
"""(Re-)Generate private key."""
try:
if self.type == 'RSA':
self.private_key = cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key(
public_exponent=65537, # OpenSSL always uses this
key_size=self.size,
backend=self.cryptography_backend
)
if self.type == 'DSA':
self.private_key = cryptography.hazmat.primitives.asymmetric.dsa.generate_private_key(
key_size=self.size,
backend=self.cryptography_backend
)
if CRYPTOGRAPHY_HAS_X25519_FULL and self.type == 'X25519':
self.private_key = cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.generate()
if CRYPTOGRAPHY_HAS_X448 and self.type == 'X448':
self.private_key = cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey.generate()
if CRYPTOGRAPHY_HAS_ED25519 and self.type == 'Ed25519':
self.private_key = cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.generate()
if CRYPTOGRAPHY_HAS_ED448 and self.type == 'Ed448':
self.private_key = cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey.generate()
if self.type == 'ECC' and self.curve in self.curves:
if self.curves[self.curve]['deprecated']:
self.module.warn('Elliptic curves of type {0} should not be used for new keys!'.format(self.curve))
self.private_key = cryptography.hazmat.primitives.asymmetric.ec.generate_private_key(
curve=self.curves[self.curve]['create'](self.size),
backend=self.cryptography_backend
)
except cryptography.exceptions.UnsupportedAlgorithm as dummy:
self.module.fail_json(msg='Cryptography backend does not support the algorithm required for {0}'.format(self.type))
def get_private_key_data(self):
"""Return bytes for self.private_key"""
# Select export format and encoding
try:
export_format = self._get_wanted_format()
export_encoding = cryptography.hazmat.primitives.serialization.Encoding.PEM
if export_format == 'pkcs1':
# "TraditionalOpenSSL" format is PKCS1
export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.TraditionalOpenSSL
elif export_format == 'pkcs8':
export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.PKCS8
elif export_format == 'raw':
export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.Raw
export_encoding = cryptography.hazmat.primitives.serialization.Encoding.Raw
except AttributeError:
self.module.fail_json(msg='Cryptography backend does not support the selected output format "{0}"'.format(self.format))
# Select key encryption
encryption_algorithm = cryptography.hazmat.primitives.serialization.NoEncryption()
if self.cipher and self.passphrase:
if self.cipher == 'auto':
encryption_algorithm = cryptography.hazmat.primitives.serialization.BestAvailableEncryption(to_bytes(self.passphrase))
else:
self.module.fail_json(msg='Cryptography backend can only use "auto" for cipher option.')
# Serialize key
try:
return self.private_key.private_bytes(
encoding=export_encoding,
format=export_format,
encryption_algorithm=encryption_algorithm
)
except ValueError as dummy:
self.module.fail_json(
msg='Cryptography backend cannot serialize the private key in the required format "{0}"'.format(self.format)
)
except Exception as dummy:
self.module.fail_json(
msg='Error while serializing the private key in the required format "{0}"'.format(self.format),
exception=traceback.format_exc()
)
def _load_privatekey(self):
data = self.existing_private_key_bytes
try:
# Interpret bytes depending on format.
format = identify_private_key_format(data)
if format == 'raw':
if len(data) == 56 and CRYPTOGRAPHY_HAS_X448:
return cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey.from_private_bytes(data)
if len(data) == 57 and CRYPTOGRAPHY_HAS_ED448:
return cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey.from_private_bytes(data)
if len(data) == 32:
if CRYPTOGRAPHY_HAS_X25519 and (self.type == 'X25519' or not CRYPTOGRAPHY_HAS_ED25519):
return cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.from_private_bytes(data)
if CRYPTOGRAPHY_HAS_ED25519 and (self.type == 'Ed25519' or not CRYPTOGRAPHY_HAS_X25519):
return cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.from_private_bytes(data)
if CRYPTOGRAPHY_HAS_X25519 and CRYPTOGRAPHY_HAS_ED25519:
try:
return cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.from_private_bytes(data)
except Exception:
return cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.from_private_bytes(data)
raise PrivateKeyError('Cannot load raw key')
else:
return cryptography.hazmat.primitives.serialization.load_pem_private_key(
data,
None if self.passphrase is None else to_bytes(self.passphrase),
backend=self.cryptography_backend
)
except Exception as e:
raise PrivateKeyError(e)
def _ensure_existing_private_key_loaded(self):
if self.existing_private_key is None and self.has_existing():
self.existing_private_key = self._load_privatekey()
def _check_passphrase(self):
try:
format = identify_private_key_format(self.existing_private_key_bytes)
if format == 'raw':
# Raw keys cannot be encrypted. To avoid incompatibilities, we try to
# actually load the key (and return False when this fails).
self._load_privatekey()
# Loading the key succeeded. Only return True when no passphrase was
# provided.
return self.passphrase is None
else:
return cryptography.hazmat.primitives.serialization.load_pem_private_key(
self.existing_private_key_bytes,
None if self.passphrase is None else to_bytes(self.passphrase),
backend=self.cryptography_backend
)
except Exception as dummy:
return False
def _check_size_and_type(self):
if isinstance(self.existing_private_key, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
return self.type == 'RSA' and self.size == self.existing_private_key.key_size
if isinstance(self.existing_private_key, cryptography.hazmat.primitives.asymmetric.dsa.DSAPrivateKey):
return self.type == 'DSA' and self.size == self.existing_private_key.key_size
if CRYPTOGRAPHY_HAS_X25519 and isinstance(self.existing_private_key, cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey):
return self.type == 'X25519'
if CRYPTOGRAPHY_HAS_X448 and isinstance(self.existing_private_key, cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey):
return self.type == 'X448'
if CRYPTOGRAPHY_HAS_ED25519 and isinstance(self.existing_private_key, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
return self.type == 'Ed25519'
if CRYPTOGRAPHY_HAS_ED448 and isinstance(self.existing_private_key, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
return self.type == 'Ed448'
if isinstance(self.existing_private_key, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
if self.type != 'ECC':
return False
if self.curve not in self.curves:
return False
return self.curves[self.curve]['verify'](self.existing_private_key)
return False
def _check_format(self):
if self.format == 'auto_ignore':
return True
try:
format = identify_private_key_format(self.existing_private_key_bytes)
return format == self._get_wanted_format()
except Exception as dummy:
return False
def select_backend(module, backend):
if backend == 'auto':
# Detection what is possible
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# Decision
if module.params['cipher'] and module.params['passphrase'] and module.params['cipher'] != 'auto':
# First try pyOpenSSL, then cryptography
if can_use_pyopenssl:
backend = 'pyopenssl'
elif can_use_cryptography:
backend = 'cryptography'
else:
# First try cryptography, then pyOpenSSL
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
backend = 'pyopenssl'
# Success?
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
MINIMAL_CRYPTOGRAPHY_VERSION,
MINIMAL_PYOPENSSL_VERSION))
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
return backend, PrivateKeyPyOpenSSLBackend(module)
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
return backend, PrivateKeyCryptographyBackend(module)
else:
raise Exception('Unsupported value for backend: {0}'.format(backend))
def get_privatekey_argument_spec():
return ArgumentSpec(
argument_spec=dict(
size=dict(type='int', default=4096),
type=dict(type='str', default='RSA', choices=[
'DSA', 'ECC', 'Ed25519', 'Ed448', 'RSA', 'X25519', 'X448'
]),
curve=dict(type='str', choices=[
'secp224r1', 'secp256k1', 'secp256r1', 'secp384r1', 'secp521r1',
'secp192r1', 'brainpoolP256r1', 'brainpoolP384r1', 'brainpoolP512r1',
'sect163k1', 'sect163r2', 'sect233k1', 'sect233r1', 'sect283k1',
'sect283r1', 'sect409k1', 'sect409r1', 'sect571k1', 'sect571r1',
]),
passphrase=dict(type='str', no_log=True),
cipher=dict(type='str'),
format=dict(type='str', default='auto_ignore', choices=['pkcs1', 'pkcs8', 'raw', 'auto', 'auto_ignore']),
format_mismatch=dict(type='str', default='regenerate', choices=['regenerate', 'convert']),
select_crypto_backend=dict(type='str', choices=['auto', 'pyopenssl', 'cryptography'], default='auto'),
regenerate=dict(
type='str',
default='full_idempotence',
choices=['never', 'fail', 'partial_idempotence', 'full_idempotence', 'always']
),
),
required_together=[
['cipher', 'passphrase']
],
required_if=[
['type', 'ECC', ['curve']],
],
)
| 1.8125 | 2 |
scrapers/tests/test_dcwd_spider.py | ralphqq/dc-alerts-service | 0 | 12761260 | from unittest import skip
from scrapy.http import Request
from scrapers.scrapers.spiders.dcwd import DcwdSpider
from scrapers.tests.base_scraper_test_setup import ScraperTestCase, html_files
from scrapers.tests.utils import make_response_object, make_fake_id
class DcwdParserTests(ScraperTestCase):
def setUp(self):
self.spider = DcwdSpider(limit=1)
def test_parse(self):
"""Tests the spider's main parse method."""
valid_results = self.get_parse_results(
response=make_response_object(html_files['dcwd_index'])
)
# Test if list is non-empty
self.assertGreater(len(valid_results), 0)
# Test each Request object in the result
for request in valid_results:
self.assertIsInstance(request, Request)
self.assertIsNotNone(request.meta)
self.assertIsNotNone(request.meta.get('urgency'))
self.assertIsNotNone(request.meta.get('title'))
self.assertIsNotNone(request.meta.get('notice_id'))
def test_parse_page(self):
"""Tests the spider's parse_page method."""
valid_results = self.get_parse_results(
parse_method_name='parse_page',
response=make_response_object(
filepath=html_files['dcwd_details'],
meta={'urgency': 'a', 'title': 'Some Title',
'notice_id': make_fake_id()}
)
)
self.assertGreater(len(valid_results), 0)
for item in valid_results:
self.assertIsNotNone(item.get('urgency'))
self.assertIsNotNone(item.get('headline'))
self.assertIsNotNone(item.get('source_url'))
self.assertIsNotNone(item.get('notice_id'))
self.assertIsNotNone(item.get('posted_on'))
self.assertIsInstance(item.get('details'), list)
self.assertIsNotNone(item.get('scraped_on'))
self.assertEqual(item.get('provider'), 'DCWD')
self.assertEqual(item.get('service'), 'Water')
def test_water_outage_details(self):
"""Tests if scraped outage details are complete."""
# Get results from parse_page
valid_results = self.get_parse_results(
parse_method_name='parse_page',
response=make_response_object(
filepath=html_files['dcwd_details'],
meta={'urgency': 'a', 'title': 'Some Title',
'notice_id': make_fake_id()}
)
)
for item in valid_results:
# Unpack list of outage details (dicts)
details_per_set = item['details']
for outage_set in details_per_set:
self.assertIsNotNone(outage_set.get('set_n'))
self.assertIsNotNone(outage_set.get('when'))
self.assertIsNotNone(outage_set.get('where'))
self.assertIsNotNone(outage_set.get('why'))
| 2.796875 | 3 |
joining_server/server_join_handler.py | CSU-Hacks/ClearBot | 1 | 12761261 | <reponame>CSU-Hacks/ClearBot
#Handles the process of the bot being joined to a new server
import discord, psycopg2, time #external libraries
import joining_server.server_join_postgres as server_join_postgres #internal library
async def new_server_handler(server):
server_id = server.id
owner_id = server.owner_id
server_name = server.name
user_list = server.members
user_id_list = []
for user in user_list:
user_id_list.append(user.id)
await server_join_postgres.new_server_added(server_id, str(server_name), owner_id)
await server_join_postgres.new_server_add_users(user_id_list, server_id)
return
| 2.390625 | 2 |
Mundos/Mundo 1/Aulas/Aula 10/ex033.py | NicolasdeLimaAlves/Curso-de-Python | 0 | 12761262 | <reponame>NicolasdeLimaAlves/Curso-de-Python
pn = int(input('Digite o primeiro número: '))
sn = int(input('Digite o segundo número: '))
tn = int(input('Digite o terceiro número: '))
if pn > sn and pn > tn:
print('O primeiro número é o maior.'.format(pn))
if pn < sn and pn < tn:
print('O primeiro número é o menor'.format(pn))
if sn > pn and sn > tn:
print('O segundo número é o maior'.format(sn))
if sn < pn and sn < tn:
print('O segundo número é o menor'.format(sn))
if tn > pn and tn > sn:
print('O terceiro número é o maior'.format(tn))
if tn < pn and tn < sn:
print('O terceiro número é o menor'.format(tn))
| 4.125 | 4 |
comments/views.py | awesome-archive/DjangoBlog | 0 | 12761263 | from django.shortcuts import render
# Create your views here.
from .models import Comment
from blog.models import Article
from .forms import CommentForm
from django.views.generic.edit import FormView
from django.http import HttpResponseRedirect
from django.contrib.auth import get_user_model
from django import forms
class CommentPostView(FormView):
form_class = CommentForm
template_name = 'blog/article_detail.html'
def get(self, request, *args, **kwargs):
article_id = self.kwargs['article_id']
article = Article.objects.get(pk=article_id)
url = article.get_absolute_url()
return HttpResponseRedirect(url + "#comments")
def form_invalid(self, form):
article_id = self.kwargs['article_id']
article = Article.objects.get(pk=article_id)
u = self.request.user
if self.request.user.is_authenticated:
form.fields.update({
'email': forms.CharField(widget=forms.HiddenInput()),
'name': forms.CharField(widget=forms.HiddenInput()),
})
user = self.request.user
form.fields["email"].initial = user.email
form.fields["name"].initial = user.username
return self.render_to_response({
'form': form,
'article': article
})
def form_valid(self, form):
"""提交的数据验证合法后的逻辑"""
user = self.request.user
article_id = self.kwargs['article_id']
article = Article.objects.get(pk=article_id)
if not self.request.user.is_authenticated():
email = form.cleaned_data['email']
username = form.cleaned_data['name']
user = get_user_model().objects.get_or_create(username=username, email=email)[0]
# auth.login(self.request, user)
comment = form.save(False)
comment.article = article
comment.author = user
if form.cleaned_data['parent_comment_id']:
parent_comment = Comment.objects.get(pk=form.cleaned_data['parent_comment_id'])
comment.parent_comment = parent_comment
comment.save(True)
from DjangoBlog.utils import expire_view_cache, cache
from django.contrib.sites.models import Site
path = article.get_absolute_url()
site = Site.objects.get_current().domain
if site.find(':') > 0:
site = site[0:site.find(':')]
port = 80
try:
# django1.8 没有这个方法...
port = self.request.get_port()
except:
pass
expire_view_cache(path, servername=site, serverport=port, key_prefix='blogdetail')
if cache.get('seo_processor'):
cache.delete('seo_processor')
comment_cache_key = 'article_comments_{id}'.format(id=article_id)
cache.delete(comment_cache_key)
from django.core.cache.utils import make_template_fragment_key
username = self.request.user.username if self.request.user else ''
key = make_template_fragment_key('sidebar', [username])
cache.delete(key)
return HttpResponseRedirect("%s#div-comment-%d" % (article.get_absolute_url(), comment.pk))
| 2.296875 | 2 |
sim/resource.py | edgerun/faas-sim | 19 | 12761264 | <gh_stars>10-100
from collections import defaultdict
from copy import deepcopy
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple
import numpy as np
from sim.core import Environment
from sim.faas import FunctionReplica, FaasSystem, FunctionState
class ResourceUtilization:
__resources: Dict[str, float]
def __init__(self):
self.__resources = {}
def put_resource(self, resource: str, value: float):
if self.__resources.get(resource) is None:
self.__resources[resource] = 0
self.__resources[resource] += value
def remove_resource(self, resource: str, value: float):
if self.__resources.get(resource) is None:
self.__resources[resource] = 0
self.__resources[resource] -= value
def list_resources(self) -> Dict[str, float]:
return deepcopy(self.__resources)
def copy(self) -> 'ResourceUtilization':
util = ResourceUtilization()
util.__resources = self.list_resources()
return util
def get_resource(self, resource) -> Optional[float]:
return self.__resources.get(resource)
def is_empty(self) -> bool:
return len(self.__resources) == 0
class NodeResourceUtilization:
# key is pod-name, uniqueness allows for running same FunctionContainer multiple times on node
__resources: Dict[str, ResourceUtilization]
# associates the pod-name with its FunctionReplica
__replicas: Dict[str, FunctionReplica]
def __init__(self):
self.__resources = {}
self.__replicas = {}
def put_resource(self, replica: FunctionReplica, resource: str, value: float):
self.get_resource_utilization(replica).put_resource(resource, value)
def remove_resource(self, replica: FunctionReplica, resource: str, value: float):
self.get_resource_utilization(replica).remove_resource(resource, value)
def get_resource_utilization(self, replica: FunctionReplica) -> ResourceUtilization:
name = replica.pod.name
util = self.__resources.get(name)
if util is None:
self.__resources[name] = ResourceUtilization()
self.__replicas[name] = replica
return self.__resources[name]
else:
return util
def list_resource_utilization(self) -> List[Tuple[FunctionReplica, ResourceUtilization]]:
functions = []
for pod_name, utilization in self.__resources.items():
replica = self.__replicas.get(pod_name)
functions.append((replica, utilization))
return functions
@property
def total_utilization(self) -> ResourceUtilization:
total = ResourceUtilization()
for _, resource_utilization in self.list_resource_utilization():
for resource, value in resource_utilization.list_resources().items():
total.put_resource(resource, value)
return total
class ResourceState:
node_resource_utilizations: Dict[str, NodeResourceUtilization]
def __init__(self):
self.node_resource_utilizations = {}
def put_resource(self, function_replica: FunctionReplica, resource: str, value: float):
node_name = function_replica.node.name
node_resources = self.get_node_resource_utilization(node_name)
node_resources.put_resource(function_replica, resource, value)
def remove_resource(self, replica: 'FunctionReplica', resource: str, value: float):
node_name = replica.node.name
self.get_node_resource_utilization(node_name).remove_resource(replica, resource, value)
def get_resource_utilization(self, replica: 'FunctionReplica') -> 'ResourceUtilization':
node_name = replica.node.name
return self.get_node_resource_utilization(node_name).get_resource_utilization(replica)
def list_resource_utilization(self, node_name: str) -> List[Tuple['FunctionReplica', 'ResourceUtilization']]:
return self.get_node_resource_utilization(node_name).list_resource_utilization()
def get_node_resource_utilization(self, node_name: str) -> Optional[NodeResourceUtilization]:
node_resources = self.node_resource_utilizations.get(node_name)
if node_resources is None:
self.node_resource_utilizations[node_name] = NodeResourceUtilization()
node_resources = self.node_resource_utilizations[node_name]
return node_resources
@dataclass
class ResourceWindow:
replica: FunctionReplica
resources: Dict[str, float]
time: float
class MetricsServer:
"""
contains methods to obtain metrics - offers query functions for resources (functionreplica)
stores time-series data in data structure (i.e. list)
"""
def __init__(self):
# TODO this will inevitably leak memory
self._windows = defaultdict(lambda: defaultdict(list))
# TODO make dynamic -> read key-values from replica/pod
def put(self, window: ResourceWindow):
node = window.replica.node.name
pod = window.replica.pod.name
self._windows[node][pod].append(window)
def get_average_cpu_utilization(self, fn_replica: FunctionReplica, window_start: float, window_end: float) -> float:
utilization = self.get_average_resource_utilization(fn_replica, 'cpu', window_start, window_end)
millis = fn_replica.node.capacity.cpu_millis
return utilization / millis
def get_average_resource_utilization(self, fn_replica: FunctionReplica, resource: str, window_start: float,
window_end: float) -> float:
node = fn_replica.node.name
pod = fn_replica.pod.name
windows: List[ResourceWindow] = self._windows.get(node, {}).get(pod, [])
if len(windows) == 0:
return 0
average_windows = []
for window in reversed(windows):
if window.time <= window_end:
if window.time < window_start:
break
average_windows.append(window)
# slicing never throws IndexError
return np.mean(list(map(lambda l: l.resources[resource], average_windows)))
class ResourceMonitor:
"""Simpy process - continuously collects resource data"""
def __init__(self, env: Environment, reconcile_interval: int, logging=True):
self.env = env
self.reconcile_interval = reconcile_interval
self.metric_server: MetricsServer = env.metrics_server
self.logging = logging
def run(self):
faas: FaasSystem = self.env.faas
while True:
yield self.env.timeout(self.reconcile_interval)
now = self.env.now
for deployment in faas.get_deployments():
for replica in faas.get_replicas(deployment.name, FunctionState.RUNNING):
utilization = self.env.resource_state.get_resource_utilization(replica)
if utilization.is_empty():
continue
# TODO extract logging into own process
if self.logging:
self.env.metrics.log_function_resource_utilization(replica, utilization)
self.metric_server.put(
ResourceWindow(replica, utilization.list_resources(), now))
| 2.546875 | 3 |
src/lib/nets/planar/res2net2d.py | charzharr/Hierarchical-Contrastive-Pretraining | 0 | 12761265 | """
Adapted from Res2Net (v1b) official git repo:
https://github.com/Res2Net/Res2Net-PretrainedModels/blob/master/res2net_v1b.py
"""
from os import stat
import pathlib
import math
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
from lib.nets.basemodel import BaseModel
__all__ = ['Res2Net', 'res2net50_v1b', 'res2net101_v1b']
model_urls = {
'res2net50_v1b_26w_4s': 'https://shanghuagao.oss-cn-beijing.aliyuncs.com/res2net/res2net50_v1b_26w_4s-3cf99910.pth',
'res2net101_v1b_26w_4s': 'https://shanghuagao.oss-cn-beijing.aliyuncs.com/res2net/res2net101_v1b_26w_4s-0812c246.pth',
}
model_params_path = '/afs/crc.nd.edu/user/y/yzhang46/_DLResources/Models/'
model_params = {
'res2net50_v1b_26w_4s': str(model_params_path / 'res2net50_v1b_26w_4s.pth'),
'res2net101_v1b_26w_4s': str(model_params_path / 'res2net101_v1b_26w_4s.pth'),
}
class Bottle2neck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,
baseWidth=26, scale=4, stype='normal'):
""" Constructor
Args:
inplanes: input channel dimensionality
planes: output channel dimensionality
stride: conv stride. Replaces pooling layer.
downsample: None when stride = 1
baseWidth: basic width of conv3x3
scale: number of scale.
type: 'normal': normal set. 'stage': first block of a new stage.
"""
super(Bottle2neck, self).__init__()
width = int(math.floor(planes * (baseWidth/64.0)))
self.conv1 = nn.Conv2d(inplanes, width*scale, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(width*scale)
if scale == 1:
self.nums = 1
else:
self.nums = scale -1
if stype == 'stage':
self.pool = nn.AvgPool2d(kernel_size=3, stride = stride, padding=1)
convs = []
bns = []
for i in range(self.nums):
convs.append(nn.Conv2d(width, width, kernel_size=3, stride = stride,
padding=1, bias=False))
bns.append(nn.BatchNorm2d(width))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv3 = nn.Conv2d(width*scale, planes * self.expansion,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stype = stype
self.scale = scale
self.width = width
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
spx = torch.split(out, self.width, 1)
for i in range(self.nums):
if i==0 or self.stype=='stage':
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp)
sp = self.relu(self.bns[i](sp))
if i==0:
out = sp
else:
out = torch.cat((out, sp), 1)
if self.scale != 1 and self.stype=='normal':
out = torch.cat((out, spx[self.nums]),1)
elif self.scale != 1 and self.stype=='stage':
out = torch.cat((out, self.pool(spx[self.nums])),1)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Res2Net(BaseModel):
def __init__(self, block, layers, baseWidth=26, scale=4, num_classes=1000,
prelinear_dropout=0):
self.num_classes = num_classes
self.prelinear_dropout = prelinear_dropout
self.inplanes = 64
super(Res2Net, self).__init__()
self.baseWidth = baseWidth
self.scale = scale
self.conv1 = nn.Sequential(
nn.Conv2d(3, 32, 3, 2, 1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, 3, 1, 1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 64, 3, 1, 1, bias=False)
)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.classifier = self.fc # compatibility with linear eval
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',
nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
tot_params, tot_tparams = self.param_counts
print(f'💠 Res2Netv1b model initiated with n_classes={num_classes}, \n'
f' layers={layers}, base-width={baseWidth}, scale={scale}\n'
f' params={tot_params:,}, trainable_params={tot_tparams:,}.')
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.AvgPool2d(kernel_size=stride, stride=stride,
ceil_mode=True, count_include_pad=False),
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride,
downsample=downsample, stype='stage',
baseWidth=self.baseWidth, scale=self.scale))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, baseWidth=self.baseWidth,
scale=self.scale))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
if self.prelinear_dropout > 0:
x = F.dropout(x, p=self.prelinear_dropout, training=self.training)
x = self.fc(x)
return x
def res2net50_v1b(pretrained=False, **kwargs):
"""Constructs a Res2Net-50_v1b model.
Res2Net-50 refers to the Res2Net-50_v1b_26w_4s.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth=26, scale=4, **kwargs)
if pretrained:
load_state_dict(model, 'res2net50_v1b_26w_4s')
return model
def res2net101_v1b(pretrained=False, **kwargs):
"""Constructs a Res2Net-50_v1b_26w_4s model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = Res2Net(Bottle2neck, [3, 4, 23, 3], baseWidth=26, scale=4, **kwargs)
if pretrained:
load_state_dict(model, 'res2net101_v1b_26w_4s')
return model
def res2net50_v1b_26w_4s(pretrained=False, **kwargs):
"""Constructs a Res2Net-50_v1b_26w_4s model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth=26, scale=4, **kwargs)
if pretrained:
load_state_dict(model, 'res2net50_v1b_26w_4s')
return model
def res2net101_v1b_26w_4s(pretrained=False, **kwargs):
"""Constructs a Res2Net-50_v1b_26w_4s model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = Res2Net(Bottle2neck, [3, 4, 23, 3], baseWidth=26, scale=4, **kwargs)
if pretrained:
load_state_dict(model, 'res2net101_v1b_26w_4s')
return model
def res2net152_v1b_26w_4s(pretrained=False, **kwargs):
"""Constructs a Res2Net-50_v1b_26w_4s model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = Res2Net(Bottle2neck, [3, 8, 36, 3], baseWidth=26, scale=4, **kwargs)
if pretrained:
load_state_dict(model, 'res2net152_v1b_26w_4s')
return model
def load_state_dict(model, model_key):
print(f' * Res2Net1b loading pretrained ImageNet weights.')
# print(model.load_state_dict(model_zoo.load_url(model_urls[model_key])))
# My code after downloading model params
state_dict = torch.load(model_params[model_key], map_location='cpu')
if model.num_classes != 1000:
del state_dict['fc.weight']
del state_dict['fc.bias']
print(model.load_state_dict(state_dict, strict=False))
def get_model(layers, num_classes, pretrained=True, prelinear_dropout=0):
layers = int(layers)
if layers == 50:
model = res2net50_v1b(pretrained=pretrained, num_classes=num_classes,
prelinear_dropout=prelinear_dropout)
elif layers == 101:
model = res2net101_v1b(pretrained=pretrained, num_classes=num_classes,
prelinear_dropout=prelinear_dropout)
else:
raise ValueError(f'{layers} layers is not supported right now.')
return model
if __name__ == '__main__':
images = torch.rand(1, 3, 224, 224).cuda(0)
model = res2net50_v1b_26w_4s(pretrained=True)
model = model.cuda(0)
print(model(images).size()) | 1.84375 | 2 |
utils/overlapadd_singlethread.py | haoheliu/2021-ISMIR-MSS-Challenge-CWS-PResUNet | 78 | 12761266 |
from itertools import permutations
import torch
from torch import nn
from scipy.optimize import linear_sum_assignment
import numpy as np
import torch.nn.functional as F
class PITLossWrapper(nn.Module):
r"""Permutation invariant loss wrapper.
Args:
loss_func: function with signature (est_targets, targets, **kwargs).
pit_from (str): Determines how PIT is applied.
* ``'pw_mtx'`` (pairwise matrix): `loss_func` computes pairwise
losses and returns a torch.Tensor of shape
:math:`(batch, n\_src, n\_src)`. Each element
:math:`(batch, i, j)` corresponds to the loss between
:math:`targets[:, i]` and :math:`est\_targets[:, j]`
* ``'pw_pt'`` (pairwise point): `loss_func` computes the loss for
a batch of single source and single estimates (tensors won't
have the source axis). Output shape : :math:`(batch)`.
See :meth:`~PITLossWrapper.get_pw_losses`.
* ``'perm_avg'`` (permutation average): `loss_func` computes the
average loss for a given permutations of the sources and
estimates. Output shape : :math:`(batch)`.
See :meth:`~PITLossWrapper.best_perm_from_perm_avg_loss`.
In terms of efficiency, ``'perm_avg'`` is the least efficicient.
perm_reduce (Callable): torch function to reduce permutation losses.
Defaults to None (equivalent to mean). Signature of the func
(pwl_set, **kwargs) : :math:`(B, n\_src!, n\_src) --> (B, n\_src!)`.
`perm_reduce` can receive **kwargs during forward using the
`reduce_kwargs` argument (dict). If those argument are static,
consider defining a small function or using `functools.partial`.
Only used in `'pw_mtx'` and `'pw_pt'` `pit_from` modes.
For each of these modes, the best permutation and reordering will be
automatically computed. When either ``'pw_mtx'`` or ``'pw_pt'`` is used,
and the number of sources is larger than three, the hungarian algorithm is
used to find the best permutation.
Examples
>>> import torch
>>> from asteroid.losses import pairwise_neg_sisdr
>>> sources = torch.randn(10, 3, 16000)
>>> est_sources = torch.randn(10, 3, 16000)
>>> # Compute PIT loss based on pairwise losses
>>> loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from='pw_mtx')
>>> loss_val = loss_func(est_sources, sources)
>>>
>>> # Using reduce
>>> def reduce(perm_loss, src):
>>> weighted = perm_loss * src.norm(dim=-1, keepdim=True)
>>> return torch.mean(weighted, dim=-1)
>>>
>>> loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from='pw_mtx',
>>> perm_reduce=reduce)
>>> reduce_kwargs = {'src': sources}
>>> loss_val = loss_func(est_sources, sources,
>>> reduce_kwargs=reduce_kwargs)
"""
def __init__(self, loss_func, pit_from="pw_mtx", perm_reduce=None):
super().__init__()
self.loss_func = loss_func
self.pit_from = pit_from
self.perm_reduce = perm_reduce
if self.pit_from not in ["pw_mtx", "pw_pt", "perm_avg"]:
raise ValueError(
"Unsupported loss function type for now. Expected"
"one of [`pw_mtx`, `pw_pt`, `perm_avg`]"
)
def forward(self, est_targets, targets, return_est=False, reduce_kwargs=None, **kwargs):
r"""Find the best permutation and return the loss.
Args:
est_targets: torch.Tensor. Expected shape $(batch, nsrc, ...)$.
The batch of target estimates.
targets: torch.Tensor. Expected shape $(batch, nsrc, ...)$.
The batch of training targets
return_est: Boolean. Whether to return the reordered targets
estimates (To compute metrics or to save example).
reduce_kwargs (dict or None): kwargs that will be passed to the
pairwise losses reduce function (`perm_reduce`).
**kwargs: additional keyword argument that will be passed to the
loss function.
Returns:
- Best permutation loss for each batch sample, average over
the batch.
- The reordered targets estimates if ``return_est`` is True.
:class:`torch.Tensor` of shape $(batch, nsrc, ...)$.
"""
n_src = targets.shape[1]
# assert n_src < 10, f"Expected source axis along dim 1, found {n_src}"
if self.pit_from == "pw_mtx":
# Loss function already returns pairwise losses
pw_losses = self.loss_func(est_targets, targets, **kwargs)
elif self.pit_from == "pw_pt":
# Compute pairwise losses with a for loop.
pw_losses = self.get_pw_losses(self.loss_func, est_targets, targets, **kwargs)
elif self.pit_from == "perm_avg":
# Cannot get pairwise losses from this type of loss.
# Find best permutation directly.
min_loss, batch_indices = self.best_perm_from_perm_avg_loss(
self.loss_func, est_targets, targets, **kwargs
)
# Take the mean over the batch
mean_loss = torch.mean(min_loss)
if not return_est:
return mean_loss
reordered = self.reorder_source(est_targets, batch_indices)
return mean_loss, reordered
else:
return
assert pw_losses.ndim == 3, (
"Something went wrong with the loss " "function, please read the docs."
)
assert pw_losses.shape[0] == targets.shape[0], "PIT loss needs same batch dim as input"
reduce_kwargs = reduce_kwargs if reduce_kwargs is not None else dict()
min_loss, batch_indices = self.find_best_perm(
pw_losses, perm_reduce=self.perm_reduce, **reduce_kwargs
)
mean_loss = torch.mean(min_loss)
if not return_est:
return mean_loss
reordered = self.reorder_source(est_targets, batch_indices)
return mean_loss, reordered
@staticmethod
def get_pw_losses(loss_func, est_targets, targets, **kwargs):
r"""Get pair-wise losses between the training targets and its estimate
for a given loss function.
Args:
loss_func: function with signature (est_targets, targets, **kwargs)
The loss function to get pair-wise losses from.
est_targets: torch.Tensor. Expected shape $(batch, nsrc, ...)$.
The batch of target estimates.
targets: torch.Tensor. Expected shape $(batch, nsrc, ...)$.
The batch of training targets.
**kwargs: additional keyword argument that will be passed to the
loss function.
Returns:
torch.Tensor or size $(batch, nsrc, nsrc)$, losses computed for
all permutations of the targets and est_targets.
This function can be called on a loss function which returns a tensor
of size :math:`(batch)`. There are more efficient ways to compute pair-wise
losses using broadcasting.
"""
batch_size, n_src, *_ = targets.shape
pair_wise_losses = targets.new_empty(batch_size, n_src, n_src)
for est_idx, est_src in enumerate(est_targets.transpose(0, 1)):
for target_idx, target_src in enumerate(targets.transpose(0, 1)):
pair_wise_losses[:, est_idx, target_idx] = loss_func(est_src, target_src, **kwargs)
return pair_wise_losses
@staticmethod
def best_perm_from_perm_avg_loss(loss_func, est_targets, targets, **kwargs):
r"""Find best permutation from loss function with source axis.
Args:
loss_func: function with signature $(est_targets, targets, **kwargs)$
The loss function batch losses from.
est_targets: torch.Tensor. Expected shape $(batch, nsrc, *)$.
The batch of target estimates.
targets: torch.Tensor. Expected shape $(batch, nsrc, *)$.
The batch of training targets.
**kwargs: additional keyword argument that will be passed to the
loss function.
Returns:
- :class:`torch.Tensor`:
The loss corresponding to the best permutation of size $(batch,)$.
- :class:`torch.Tensor`:
The indices of the best permutations.
"""
n_src = targets.shape[1]
perms = torch.tensor(list(permutations(range(n_src))), dtype=torch.long)
loss_set = torch.stack(
[loss_func(est_targets[:, perm], targets, **kwargs) for perm in perms], dim=1
)
# Indexes and values of min losses for each batch element
min_loss, min_loss_idx = torch.min(loss_set, dim=1)
# Permutation indices for each batch.
batch_indices = torch.stack([perms[m] for m in min_loss_idx], dim=0)
return min_loss, batch_indices
@staticmethod
def find_best_perm(pair_wise_losses, perm_reduce=None, **kwargs):
r"""Find the best permutation, given the pair-wise losses.
Dispatch between factorial method if number of sources is small (<3)
and hungarian method for more sources. If ``perm_reduce`` is not None,
the factorial method is always used.
Args:
pair_wise_losses (:class:`torch.Tensor`):
Tensor of shape :math:`(batch, n\_src, n\_src)`. Pairwise losses.
perm_reduce (Callable): torch function to reduce permutation losses.
Defaults to None (equivalent to mean). Signature of the func
(pwl_set, **kwargs) : :math:`(B, n\_src!, n\_src) -> (B, n\_src!)`
**kwargs: additional keyword argument that will be passed to the
permutation reduce function.
Returns:
- :class:`torch.Tensor`:
The loss corresponding to the best permutation of size $(batch,)$.
- :class:`torch.Tensor`:
The indices of the best permutations.
"""
n_src = pair_wise_losses.shape[-1]
if perm_reduce is not None or n_src <= 3:
min_loss, batch_indices = PITLossWrapper.find_best_perm_factorial(
pair_wise_losses, perm_reduce=perm_reduce, **kwargs
)
else:
min_loss, batch_indices = PITLossWrapper.find_best_perm_hungarian(pair_wise_losses)
return min_loss, batch_indices
@staticmethod
def reorder_source(source, batch_indices):
r"""Reorder sources according to the best permutation.
Args:
source (torch.Tensor): Tensor of shape :math:`(batch, n_src, time)`
batch_indices (torch.Tensor): Tensor of shape :math:`(batch, n_src)`.
Contains optimal permutation indices for each batch.
Returns:
:class:`torch.Tensor`: Reordered sources.
"""
reordered_sources = torch.stack(
[torch.index_select(s, 0, b) for s, b in zip(source, batch_indices)]
)
return reordered_sources
@staticmethod
def find_best_perm_factorial(pair_wise_losses, perm_reduce=None, **kwargs):
r"""Find the best permutation given the pair-wise losses by looping
through all the permutations.
Args:
pair_wise_losses (:class:`torch.Tensor`):
Tensor of shape :math:`(batch, n_src, n_src)`. Pairwise losses.
perm_reduce (Callable): torch function to reduce permutation losses.
Defaults to None (equivalent to mean). Signature of the func
(pwl_set, **kwargs) : :math:`(B, n\_src!, n\_src) -> (B, n\_src!)`
**kwargs: additional keyword argument that will be passed to the
permutation reduce function.
Returns:
- :class:`torch.Tensor`:
The loss corresponding to the best permutation of size $(batch,)$.
- :class:`torch.Tensor`:
The indices of the best permutations.
MIT Copyright (c) 2018 <NAME>.
See `Original code
<https://github.com/kaituoxu/Conv-TasNet/blob/master>`__ and `License
<https://github.com/kaituoxu/Conv-TasNet/blob/master/LICENSE>`__.
"""
n_src = pair_wise_losses.shape[-1]
# After transposition, dim 1 corresp. to sources and dim 2 to estimates
pwl = pair_wise_losses.transpose(-1, -2)
perms = pwl.new_tensor(list(permutations(range(n_src))), dtype=torch.long)
# Column permutation indices
idx = torch.unsqueeze(perms, 2)
# Loss mean of each permutation
if perm_reduce is None:
# one-hot, [n_src!, n_src, n_src]
perms_one_hot = pwl.new_zeros((*perms.size(), n_src)).scatter_(2, idx, 1)
loss_set = torch.einsum("bij,pij->bp", [pwl, perms_one_hot])
loss_set /= n_src
else:
# batch = pwl.shape[0]; n_perm = idx.shape[0]
# [batch, n_src!, n_src] : Pairwise losses for each permutation.
pwl_set = pwl[:, torch.arange(n_src), idx.squeeze(-1)]
# Apply reduce [batch, n_src!, n_src] --> [batch, n_src!]
loss_set = perm_reduce(pwl_set, **kwargs)
# Indexes and values of min losses for each batch element
min_loss, min_loss_idx = torch.min(loss_set, dim=1)
# Permutation indices for each batch.
batch_indices = torch.stack([perms[m] for m in min_loss_idx], dim=0)
return min_loss, batch_indices
@staticmethod
def find_best_perm_hungarian(pair_wise_losses: torch.Tensor):
"""
Find the best permutation given the pair-wise losses, using the Hungarian algorithm.
Returns:
- :class:`torch.Tensor`:
The loss corresponding to the best permutation of size (batch,).
- :class:`torch.Tensor`:
The indices of the best permutations.
"""
# After transposition, dim 1 corresp. to sources and dim 2 to estimates
pwl = pair_wise_losses.transpose(-1, -2)
# Just bring the numbers to cpu(), not the graph
pwl_copy = pwl.detach().cpu()
# Loop over batch + row indices are always ordered for square matrices.
batch_indices = torch.tensor([linear_sum_assignment(pwl)[1] for pwl in pwl_copy]).to(
pwl.device
)
min_loss = torch.gather(pwl, 2, batch_indices[..., None]).mean([-1, -2])
return min_loss, batch_indices
class PITReorder(PITLossWrapper):
"""Permutation invariant reorderer. Only returns the reordered estimates.
See `:py:class:asteroid.losses.PITLossWrapper`."""
def forward(self, est_targets, targets, reduce_kwargs=None, **kwargs):
_, reordered = super().forward(
est_targets=est_targets,
targets=targets,
return_est=True,
reduce_kwargs=reduce_kwargs,
**kwargs,
)
return reordered
class LambdaOverlapAdd(torch.nn.Module):
"""Overlap-add with lambda transform on segments (not scriptable).
Segment input signal, apply lambda function (a neural network for example)
and combine with OLA.
`LambdaOverlapAdd` can be used with :mod:`asteroid.separate` and the
`asteroid-infer` CLI.
Args:
nnet (callable): Function to apply to each segment.
n_src (Optional[int]): Number of sources in the output of nnet.
If None, the number of sources is determined by the network's output,
but some correctness checks cannot be performed.
window_size (int): Size of segmenting window.
hop_size (int): Segmentation hop size.
window (str): Name of the window (see scipy.signal.get_window) used
for the synthesis.
reorder_chunks (bool): Whether to reorder each consecutive segment.
This might be useful when `nnet` is permutation invariant, as
source assignements might change output channel from one segment
to the next (in classic speech separation for example).
Reordering is performed based on the correlation between
the overlapped part of consecutive segment.
Examples
>>> from asteroid import ConvTasNet
>>> nnet = ConvTasNet(n_src=2)
>>> continuous_nnet = LambdaOverlapAdd(
>>> nnet=nnet,
>>> n_src=2,
>>> window_size=64000,
>>> hop_size=None, >>> window="hanning",
>>> reorder_chunks=True,
>>> enable_grad=False,
>>> )
>>> # Process wav tensor:
>>> wav = torch.randn(1, 1, 500000)
>>> out_wavs = continuous_nnet.forward(wav)
>>> # asteroid.separate.Separatable support:
>>> from asteroid.separate import file_separate
>>> file_separate(continuous_nnet, "example.wav")
"""
def __init__(
self,
nnet,
n_src,
window_size,
in_margin,
window="hanning",
reorder_chunks=True,
enable_grad=False,
):
super().__init__()
assert window_size % 2 == 0, "Window size must be even"
self.nnet = nnet
self.window_size = window_size
self.hop_size = window_size
self.n_src = n_src
self.in_channels = getattr(nnet, "in_channels", None)
self.in_margin = in_margin
if window:
from scipy.signal import get_window # for torch.hub
window = get_window(window, self.window_size).astype("float32")
window = torch.from_numpy(window)
self.use_window = True
else:
self.use_window = False
self.register_buffer("window", window.type_as(nnet.f_helper.stft.conv_real.weight))
self.reorder_chunks = reorder_chunks
self.enable_grad = enable_grad
def ola_forward(self, x, key='wav'):
"""Heart of the class: segment signal, apply func, combine with OLA."""
"""
x: [batchsize, channels, samples]
"""
assert x.ndim == 3
batch, channels, n_frames = x.size()
# Overlap and add:
# [batch, chans, n_frames] -> [batch, chans, win_size, n_chunks]
# ================================================================================================
def calc_L(outputsize, padding, dilation, kernel_size, stride):
return int((outputsize+2*padding-dilation*(kernel_size-1)-1)/stride+1)
# Pad signal
last_frame_samples = n_frames - int(n_frames/self.window_size) * self.window_size
if(last_frame_samples != 0):
x = F.pad(x,(0,self.window_size-last_frame_samples))
unfolded = F.unfold(
x.unsqueeze(-1),
kernel_size=(self.window_size+self.in_margin, 1),
padding=(self.in_margin, 0),
stride=(self.hop_size, 1),
)
out = []
n_chunks = unfolded.shape[-1]
######################################################################
# unfolded = unfolded.view(batch, self.window_size, channels, n_chunks) # Wrong!!!
unfolded = unfolded.view(batch, channels, self.window_size+self.in_margin, n_chunks) # Split channel out !
margin = torch.zeros(size=(batch,channels,self.in_margin,n_chunks)).type_as(unfolded)
margin[...,:-1] = unfolded[...,self.in_margin:self.in_margin*2,1:]
unfolded = torch.cat([unfolded,margin],dim=2)
# unfolded = unfolded.permute(0,2,1,3) # convert to the shape of the model input
######################################################################
for frame_idx in range(n_chunks): # for loop to spare memory
# print(unfolded[..., frame_idx].size())
if(frame_idx == 0):
frame = self.nnet(unfolded[..., frame_idx][...,self.in_margin:])
frame = frame[key] # convert to what the following code needs
frame = frame[:, :, :-self.in_margin]
elif(frame_idx == n_chunks-1 and last_frame_samples != 0):
frame = self.nnet(unfolded[..., frame_idx][...,:self.in_margin+last_frame_samples])
frame = frame[key] # convert to what the following code needs
frame = frame[:, :, self.in_margin:]
frame = F.pad(frame,(0,self.window_size-last_frame_samples))
elif(frame_idx == n_chunks-1 and last_frame_samples == 0):
frame = self.nnet(unfolded[..., frame_idx][...,:-self.in_margin])
frame = frame[key] # convert to what the following code needs
frame = frame[:, :, self.in_margin:]
else:
frame = self.nnet(unfolded[..., frame_idx])
# x_out = self.nnet(x[:,:,int(frame_idx*self.window_size)-self.in_margin:int((frame_idx+1)*self.window_size)+self.in_margin])
# print("out",torch.sum(x_out['wav']-frame['wav']))
######################################################################
# frame = frame['wav'].permute(0,2,1) # convert to what the following code needs
frame = frame[key] # convert to what the following code needs
frame = frame[:,:,self.in_margin:-self.in_margin]
# print(torch.sum(unfolded[..., frame_idx]-x[:,:,int(frame_idx*self.window_size)-self.in_margin:int((frame_idx+1)*self.window_size)+self.in_margin]))
######################################################################
# user must handle multichannel by reshaping to batch
if frame_idx == 0:
assert frame.ndim == 3, "nnet should return (batch, n_src, time)"
if self.n_src is not None:
assert frame.shape[1] == self.n_src, "nnet should return (batch, n_src, time)"
n_src = frame.shape[1]
frame = frame.reshape(batch * n_src, -1)
if frame_idx != 0 and self.reorder_chunks:
# we determine best perm based on xcorr with previous sources
frame = _reorder_sources(frame, out[-1], n_src, self.window_size, self.hop_size)
if self.use_window:
frame = frame * self.window
else:
frame = frame / (self.window_size / self.hop_size)
out.append(frame)
out = torch.stack(out).reshape(n_chunks, batch * n_src, self.window_size)
out = out.permute(1, 2, 0)
L = calc_L(outputsize=out.size()[-1]*out.size()[-2],padding=0,dilation=1,kernel_size=self.window_size,stride=self.hop_size)
out = out[...,:L]
out = torch.nn.functional.fold(
out,
(out.size()[-1]*out.size()[-2], 1),
kernel_size=(self.window_size, 1),
padding=(0, 0),
stride=(self.hop_size, 1),
)
out = out.squeeze(-1).reshape(batch, n_src, -1)
out = out[...,:n_frames]
return out
def forward(self, x, type:str, key='wav'):
"""Forward module: segment signal, apply func, combine with OLA.
Args:
x (:class:`torch.Tensor`): waveform signal of shape (batch, channels, time).
Returns:
:class:`torch.Tensor`: The output of the lambda OLA.
"""
# Here we can do the reshaping
with torch.autograd.set_grad_enabled(self.enable_grad):
olad = self.ola_forward(x,key=key)
return olad
# Implement `asteroid.separate.Separatable` (separation support)
@property
def sample_rate(self):
return self.nnet.sample_rate
def _separate(self, wav, *args, **kwargs):
return self.forward(wav, *args, **kwargs)
def _reorder_sources(
current: torch.FloatTensor,
previous: torch.FloatTensor,
n_src: int,
window_size: int,
hop_size: int,
):
"""
Reorder sources in current chunk to maximize correlation with previous chunk.
Used for Continuous Source Separation. Standard dsp correlation is used
for reordering.
Args:
current (:class:`torch.Tensor`): current chunk, tensor
of shape (batch, n_src, window_size)
previous (:class:`torch.Tensor`): previous chunk, tensor
of shape (batch, n_src, window_size)
n_src (:class:`int`): number of sources.
window_size (:class:`int`): window_size, equal to last dimension of
both current and previous.
hop_size (:class:`int`): hop_size between current and previous tensors.
"""
batch, frames = current.size()
current = current.reshape(-1, n_src, frames)
previous = previous.reshape(-1, n_src, frames)
overlap_f = window_size - hop_size
def reorder_func(x, y):
x = x[..., :overlap_f]
y = y[..., -overlap_f:]
# Mean normalization
x = x - x.mean(-1, keepdim=True)
y = y - y.mean(-1, keepdim=True)
# Negative mean Correlation
return -torch.sum(x.unsqueeze(1) * y.unsqueeze(2), dim=-1)
# We maximize correlation-like between previous and current.
pit = PITReorder(reorder_func)
current = pit(current, previous)
return current.reshape(batch, frames)
class DualPathProcessing(nn.Module):
"""
Perform Dual-Path processing via overlap-add as in DPRNN [1].
Args:
chunk_size (int): Size of segmenting window.
hop_size (int): segmentation hop size.
References
[1] <NAME>, <NAME> and <NAME>. "Dual-path RNN: efficient
long sequence modeling for time-domain single-channel speech separation"
https://arxiv.g/abs/1910.06379
"""
def __init__(self, chunk_size, hop_size):
super(DualPathProcessing, self).__init__()
self.chunk_size = chunk_size
self.hop_size = hop_size
self.n_orig_frames = None
def unfold(self, x):
r"""
Unfold the feature tensor from $(batch, channels, time)$ to
$(batch, channels, chunksize, nchunks)$.
Args:
x (:class:`torch.Tensor`): feature tensor of shape $(batch, channels, time)$.
Returns:
:class:`torch.Tensor`: spliced feature tensor of shape
$(batch, channels, chunksize, nchunks)$.
"""
# x is (batch, chan, frames)
batch, chan, frames = x.size()
assert x.ndim == 3
self.n_orig_frames = x.shape[-1]
unfolded = torch.nn.functional.unfold(
x.unsqueeze(-1),
kernel_size=(self.chunk_size, 1),
padding=(self.chunk_size, 0),
stride=(self.hop_size, 1),
)
return unfolded.reshape(
batch, chan, self.chunk_size, -1
) # (batch, chan, chunk_size, n_chunks)
def fold(self, x, output_size=None):
r"""
Folds back the spliced feature tensor.
Input shape $(batch, channels, chunksize, nchunks)$ to original shape
$(batch, channels, time)$ using overlap-add.
Args:
x (:class:`torch.Tensor`): spliced feature tensor of shape
$(batch, channels, chunksize, nchunks)$.
output_size (int, optional): sequence length of original feature tensor.
If None, the original length cached by the previous call of
:meth:`unfold` will be used.
Returns:
:class:`torch.Tensor`: feature tensor of shape $(batch, channels, time)$.
.. note:: `fold` caches the original length of the input.
"""
output_size = output_size if output_size is not None else self.n_orig_frames
# x is (batch, chan, chunk_size, n_chunks)
batch, chan, chunk_size, n_chunks = x.size()
to_unfold = x.reshape(batch, chan * self.chunk_size, n_chunks)
x = torch.nn.functional.fold(
to_unfold,
(output_size, 1),
kernel_size=(self.chunk_size, 1),
padding=(self.chunk_size, 0),
stride=(self.hop_size, 1),
)
# force float div for torch jit
x /= float(self.chunk_size) / self.hop_size
return x.reshape(batch, chan, self.n_orig_frames)
@staticmethod
def intra_process(x, module):
r"""Performs intra-chunk processing.
Args:
x (:class:`torch.Tensor`): spliced feature tensor of shape
(batch, channels, chunk_size, n_chunks).
module (:class:`torch.nn.Module`): module one wish to apply to each chunk
of the spliced feature tensor.
Returns:
:class:`torch.Tensor`: processed spliced feature tensor of shape
$(batch, channels, chunksize, nchunks)$.
.. note:: the module should have the channel first convention and accept
a 3D tensor of shape $(batch, channels, time)$.
"""
# x is (batch, channels, chunk_size, n_chunks)
batch, channels, chunk_size, n_chunks = x.size()
# we reshape to batch*chunk_size, channels, n_chunks
x = x.transpose(1, -1).reshape(batch * n_chunks, chunk_size, channels).transpose(1, -1)
x = module(x)
x = x.reshape(batch, n_chunks, channels, chunk_size).transpose(1, -1).transpose(1, 2)
return x
@staticmethod
def inter_process(x, module):
r"""Performs inter-chunk processing.
Args:
x (:class:`torch.Tensor`): spliced feature tensor of shape
$(batch, channels, chunksize, nchunks)$.
module (:class:`torch.nn.Module`): module one wish to apply between
each chunk of the spliced feature tensor.
Returns:
x (:class:`torch.Tensor`): processed spliced feature tensor of shape
$(batch, channels, chunksize, nchunks)$.
.. note:: the module should have the channel first convention and accept
a 3D tensor of shape $(batch, channels, time)$.
"""
batch, channels, chunk_size, n_chunks = x.size()
x = x.transpose(1, 2).reshape(batch * chunk_size, channels, n_chunks)
x = module(x)
x = x.reshape(batch, chunk_size, channels, n_chunks).transpose(1, 2)
return x
| 2.609375 | 3 |
scripts/transform_corpus.py | sunlightlabs/fcc-net-neutrality-comments | 18 | 12761267 | <gh_stars>10-100
# coding: utf-8
# In[1]:
import sys
import os
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), os.path.pardir))
# In[2]:
import logging
logging.basicConfig(filename='log/transform_corpus.log', format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# In[3]:
import settings
#reload(settings)
# In[7]:
from gensim import corpora
from gensim.corpora import dictionary
from gensim import models
if len(sys.argv) > 1:
fname_suffix = sys.argv[1]
else:
fname_suffix = ''
# In[6]:
corpus_fname = 'corpus' + fname_suffix + '.mm'
tfidf_corpus_fname = 'tfidf_corpus' + fname_suffix + '.mm'
my_dict = dictionary.Dictionary.load(os.path.join(settings.PERSIST_DIR, 'my_dict'))
corpus = corpora.MmCorpus(os.path.join(settings.PERSIST_DIR, corpus_fname))
# In[8]:
tfidf = models.TfidfModel(corpus)
# In[10]:
tfidf_corpus = tfidf[corpus]
tfidf.save(os.path.join(settings.PERSIST_DIR, 'tfidf_model' + fname_suffix ))
# In[11]:
corpora.MmCorpus.serialize(os.path.join(settings.PERSIST_DIR, tfidf_corpus_fname),
tfidf_corpus)
| 2.140625 | 2 |
src/Entities/Coupon.py | Tygovanommen/relational-database-data-import | 0 | 12761268 | <reponame>Tygovanommen/relational-database-data-import
couponTypeId = None
def getImportCouponTypeId(database):
if couponTypeId is not None:
return couponTypeId
with database.cursor as cursor:
query = "SELECT id FROM coupon_type WHERE type_name = 'IMPORTED'"
for row in cursor.execute(query):
return row.id
query = """INSERT INTO coupon_type(type_name, type_description)
OUTPUT inserted.id
VALUES('IMPORTED', 'IMPORTED')"""
return cursor.execute(query).fetchone()[0]
couponDict = {}
def createCouponIfNotExists(database, couponName):
couponName = couponName.upper()
if couponName in couponDict:
return couponDict[couponName]
with database.cursor as cursor:
query = 'SELECT id FROM coupon WHERE name = ?'
args = [couponName]
for row in cursor.execute(query, args):
couponDict[couponName] = row.id
return row.id
query = '''INSERT INTO coupon(coupon_type_id, name, discription, coupon_nr, date_from, date_to)
OUTPUT inserted.id
VALUES(?, ?, '', -1, GETDATE(), GETDATE())'''
args = [getImportCouponTypeId(database), couponName]
id = cursor.execute(query, args).fetchone()[0]
couponDict[couponName] = id
return id | 2.578125 | 3 |
application/db_insert.py | andrew749/andrew749.github.io | 0 | 12761269 | import sqlite3
import json
import os
import db_constants
import frontmatter
from datetime import datetime
def create_projects_table_query(table_name, title, subtitle, content, image_path):
return 'CREATE TABLE IF NOT EXISTS {table_name}' \
' ({title} TEXT, {subtitle} TEXT,' \
' {content} TEXT, {image_path} TEXT);'.format(table_name, title, subtitle, content, image_path)
def create_blog_table_query(table_name, title, subtitle, content, date):
return 'CREATE TABLE IF NOT EXISTS {table_name}' \
' ({title} TEXT, {subtitle} TEXT,' \
' {content} TEXT, {date} TEXT);'.format(table_name, title, subtitle, content, date)
insert_project_query = 'INSERT OR REPLACE INTO {table_name} (title,subtitle, content, image_path) VALUES (?, ?, ?, ?);'
# Helper to insert an entry in the table
def insert_project(cursor, title, subtitle, content, path):
cursor.execute(
insert_project_query.format(table_name=db_constants.project_table),
(title, subtitle, content, path)
)
insert_blog_query = 'INSERT OR REPLACE INTO {table_name} (title, subtitle, content, date) VALUES (?, ?, ?, ?);'
def insert_blog(cursor, title, subtitle, content, date):
cursor.execute(
insert_blog_query.format(table_name=db_constants.blog_table),
(title, subtitle, content, date)
)
# Search for JSON files with content to render and parse them
def updateProjectDatabase():
for x in os.listdir(os.path.join(os.getcwd(), db_constants.content_dir)):
if (x.endswith('.json')):
with open(os.path.join(os.getcwd(), db_constants.content_dir, x)) as file:
jsondata = json.loads(file.read())
insert_project(
jsondata['title'],
jsondata['subheading'],
jsondata['description'],
jsondata['url']
)
def updateDBMarkdown(directory, function):
"""
Gets the frontmatter from the markdown pages and puts it into the database
Args:
directory: path to insert files
table_name: name of table in database to store the information
function: function to apply to all the front matter (i.e. insert all into a blog post)
"""
data_to_write = []
for x in os.listdir(os.path.join(os.getcwd(), directory)):
if (x.endswith('.md')):
with open(os.path.join(os.getcwd(), directory, x)) as file:
data = getMarkdownFrontMatter(file.read())
data_to_write.append(data)
# sort entries by time
data_to_write = sorted(data_to_write, key=lambda x: datetime.strptime(x['date'], "%A %B %d, %Y"), reverse=True)
for data in data_to_write:
print (data['date'])
function( data['title'], data['subtitle'], data.content, data['date'])
"""
Specialized helper to get notes from folder and render markdown
"""
def updateBlogPosts(cursor):
updateDBMarkdown(db_constants.blog_dir, insert_blog)
def createProjectsTable(cursor):
return cursor.execute(
create_projects_table_query(
table_name = db_constants.project_table,
title = db_constants.project_title,
subtitle = db_constants.project_subtitle,
content = db_constants.project_content,
image_path = db_constants.project_image
)
)
def createBlogTable(cursor):
return cursor.execute(
create_blog_table_query(
table_name = db_constants.blog_table,
title = db_constants.blog_title,
subtitle = db_constants.blog_subtitle,
content = db_constants.blog_content,
date = db_constants.blog_date
)
)
def createTablesIfNotExist(cursor):
# Create the tables if they don't already exist.
createProjectsTable(cursor)
createBlogTable(cursor)
if __name__ == "__main__":
conn = sqlite3.connect(db_constants.content_path)
cursor = conn.cursor()
createTablesIfNotExist(cursor)
updateProjectDatabase(cursor)
updateBlogPosts(cursor)
conn.commit()
conn.close()
| 2.921875 | 3 |
AbletonLiveScripts/v10/ComradeEncoders/physical_display.py | lzref/ComradeEncoders | 1 | 12761270 | # uncompyle6 version 3.4.1
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.16 (v2.7.16:413a49145e, Mar 2 2019, 14:32:10)
# [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)]
# Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/SL_MkIII/physical_display.py
# Compiled at: 2019-04-23 14:43:03
from __future__ import absolute_import, print_function, unicode_literals
from itertools import chain, ifilter, imap
from ableton.v2.base import first
from ableton.v2.control_surface.elements import PhysicalDisplayElement as PhysicalDisplayElementBase
from .sysex import TEXT_PROPERTY_BYTE
class PhysicalDisplayElement(PhysicalDisplayElementBase):
def _translate_string(self, string):
return map(self._translate_char, ifilter(lambda c: c in self._translation_table, string))
class ConfigurablePhysicalDisplayElement(PhysicalDisplayElement):
def __init__(self, v_position=0, *a, **k):
super(ConfigurablePhysicalDisplayElement, self).__init__(*a, **k)
self._v_position = v_position
def _build_display_message(self, display):
def wrap_segment_message(segment):
return chain(segment.position_identifier(), (
TEXT_PROPERTY_BYTE, self._v_position), self._translate_string(unicode(segment).strip()), (0, ))
return chain(*imap(wrap_segment_message, display._logical_segments))
class SpecialPhysicalDisplayElement(PhysicalDisplayElement):
def _send_message(self):
if self._message_to_send is None:
self._message_to_send = self._build_message(map(first, self._central_resource.owners))
inner_message = self._message_to_send[len(self._message_header):-len(self._message_tail)]
if not self._is_whitespace(inner_message):
self.send_midi(self._message_to_send)
return
def _is_whitespace(self, message):
return all(map(lambda c: c == self.ascii_translations[' '], message)) | 2.09375 | 2 |
satisfactory_docker_ui/classes/savegame.py | hupebln/dockerized-satisfactory | 0 | 12761271 | <filename>satisfactory_docker_ui/classes/savegame.py
import struct
class SaveGameHeader(object):
def __init__(self, save_bytes: bytes = None, save_file: str = None):
"""
Class to extract the header of a Satisfactory save game.
It takes either a bytes object or the path to the file.
:param save_bytes: The bytes-like object of the save game.
:type save_bytes: bytearray
:param save_file: The path to the file.
:type save_file: str
"""
self.save_bytes: bytes = save_bytes
self.save_file: str = save_file
self.valid_data: bool = True
self.error_message_data: list = []
if not (self.save_bytes or self.save_file):
self.valid_data = False
self.error_message_data.append("neither save_bytes nor save_file given")
if self.save_bytes and self.save_file:
self.valid_data = False
self.error_message_data.append("save_bytes and save_file given - only one is allowed")
if self.save_file:
self._load_file()
self.header_version: int = self._get_integer()
self.save_version: int = self._get_integer()
self.build_version: int = self._get_integer()
self.world_type: str = self._get_string()
self.world_properties: str = self._get_string()
self.session_name: str = self._get_string()
self.play_time: int = self._get_integer()
self.save_date: int = self._get_integer(size=8, struct_format="q")
self.session_visibility: bool = self._get_byte() if self.header_version >= 5 else None
self.editor_object_version: int = self._get_integer() if self.header_version >= 7 else None
self.mod_metadata: str = self._get_string() if self.header_version >= 8 else None
self.mod_flags: int = self._get_integer() if self.header_version >= 8 else None
def _load_file(self):
with open(self.save_file, "rb") as save_file_stream:
self.save_bytes = save_file_stream.read()
def _get_integer(self, size=4, struct_order="<", struct_format="i"):
_raw = self.save_bytes[:size]
_temp_value = struct.unpack(f"{struct_order}{struct_format}", _raw)
self.save_bytes = self.save_bytes[size:]
return _temp_value[0]
def _get_string(self, struct_order="<", struct_format="s"):
_temp_value = ""
_string_length = struct.unpack("<i", self.save_bytes[:4])[0]
if _string_length:
_raw = self.save_bytes[4:4 + _string_length - 1]
_temp_value = struct.unpack(
f"{struct_order}{_string_length-1}{struct_format}",
_raw
)
_temp_value = _temp_value[0]
_temp_value = _temp_value.decode()
self.save_bytes = self.save_bytes[4 + _string_length:]
return _temp_value
def _get_byte(self, size=1, struct_order="<", struct_format="?"):
_raw = self.save_bytes[:size]
_temp_value = struct.unpack(f"{struct_order}{struct_format}", _raw)
self.save_bytes = self.save_bytes[size:]
return _temp_value[0]
| 3 | 3 |
build/lib/opengraph_parse/opengraph_parse.py | ben-jacobson/opengraph-parse | 3 | 12761272 | from bs4 import BeautifulSoup
import requests
# By no means is this a complete list, but it is very easy to search for the ones you need later.
KNOWN_OPENGRAPH_TAGS = [
"og:site_name",
"og:title",
"og:locale",
"og:type",
"og:image",
"og:url",
"og:image:url",
"og:image:secure_url",
"og:image:type",
"og:image:width",
"og:image:height",
"og:image:alt",
]
def parse_page(page_url, tags_to_search = KNOWN_OPENGRAPH_TAGS):
'''
Parses a page, returns a JSON style dictionary of all OG tags found on that page.
Passing in tags_to_search is optional. By default it will search through KNOWN_OPENGRAPH_TAGS constant, but for the sake of efficiency, you may want to only search for 1 or 2 tags
Returns False if page is unreadable
'''
# read the html from the page
response = requests.get(page_url)
if response.status_code is not 200:
return False
# set up beautiful soup
soup = BeautifulSoup(response.content, 'html.parser')
# loop through the known list of opengraph tags, searching for each and appending a dictionary as we go.
found_tags = {}
for og_tag in tags_to_search:
new_found_tag = soup.find("meta", property=og_tag)
if new_found_tag is not None:
found_tags[new_found_tag["property"]] = new_found_tag["content"]
return found_tags
| 3.234375 | 3 |
pybrdst/pybrdst.py | joaocarlosmendes/pybrdst | 0 | 12761273 | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
class PyBrDST():
def easter_date(self, year):
a = year % 19
b = year // 100
c = year % 100
d = b // 4
e = b % 4
f = (b + 8) // 25
g = (b - f + 1) // 3
h = (19 * a + b - d - g + 15) % 30
i = c // 4
k = c % 4
L = (32 + 2 * e + 2 * i - h - k) % 7
m = (a + 11 * h + 22 * L) // 451
month = (h + L - 7 * m + 114) // 31
day = ((h + L - 7 * m + 114) % 31) + 1
return datetime(year, month, day)
def carnival_date(self, easter_day):
return easter_day - timedelta(days=47)
def begin_dst(self, year):
diff = 6 - datetime(year, 10, 1).weekday()
return datetime(year, 10, 1) + timedelta(days=diff + 14)
def end_dst(self, year):
diff = 6 - datetime(year, 2, 1).weekday()
end_stime = datetime(year, 2, 1) + timedelta(days=diff + 14)
if self.carnival_date(self.easter_date(year)) == end_stime:
return end_stime + timedelta(days=7)
return end_stime
def get_dst(self, year):
return (self.begin_dst(year), self.end_dst(year + 1))
| 3.453125 | 3 |
Python/FizzBuzz using list.py | veotani/Hacktoberfest-2020-FizzBuzz | 80 | 12761274 | <filename>Python/FizzBuzz using list.py
a=[]
for x in range(1,101):
if x%3==0:
a.append('Fizz')
elif x%5==0:
a.append('Buzz')
elif x%3==0 and x%5==0:
a.append("FizzBuzz")
else:
a.append(x)
print(*a)
| 3.71875 | 4 |
mean-variance/fastRNN.py | luddz/folk-music-similarity | 0 | 12761275 | <reponame>luddz/folk-music-similarity
import sys
import os
import time
import importlib
if sys.version_info < (3,0):
import cPickle as pickle
else:
import pickle
import numpy as np
import argparse
import pdb
import json
def throughRNN(seed=""):
metadata_path = '../config5-wrepeats-20160112-222521.pkl'
rng_seed = 42
temperature =1.0
ntunes = 1
with open(metadata_path) as f:
metadata = pickle.load(f)
token2idx = metadata['token2idx']
idx2token = dict((v, k) for k, v in token2idx.iteritems())
vocab_size = len(token2idx)
start_idx, end_idx = token2idx['<s>'], token2idx['</s>']
rng = np.random.RandomState(rng_seed)
vocab_idxs = np.arange(vocab_size)
LSTM_Wxi=[]
LSTM_Wxf=[]
LSTM_Wxc=[]
LSTM_Wxo=[]
LSTM_Whi=[]
LSTM_Whf=[]
LSTM_Whc=[]
LSTM_Who=[]
LSTM_bi=[]
LSTM_bf=[]
LSTM_bc=[]
LSTM_bo=[]
LSTM_cell_init=[]
LSTM_hid_init=[]
htm1=[]
ctm1=[]
numlayers=3 # hard coded for now, but this should be saved in the model pickle
for jj in range(numlayers):
LSTM_Wxi.append(metadata['param_values'][2+jj*14-1])
LSTM_Whi.append(metadata['param_values'][3+jj*14-1])
LSTM_bi.append(metadata['param_values'][4+jj*14-1])
LSTM_Wxf.append(metadata['param_values'][5+jj*14-1])
LSTM_Whf.append(metadata['param_values'][6+jj*14-1])
LSTM_bf.append(metadata['param_values'][7+jj*14-1])
LSTM_Wxc.append(metadata['param_values'][8+jj*14-1])
LSTM_Whc.append(metadata['param_values'][9+jj*14-1])
LSTM_bc.append(metadata['param_values'][10+jj*14-1])
LSTM_Wxo.append(metadata['param_values'][11+jj*14-1])
LSTM_Who.append(metadata['param_values'][12+jj*14-1])
LSTM_bo.append(metadata['param_values'][13+jj*14-1])
LSTM_cell_init.append(metadata['param_values'][14+jj*14-1])
LSTM_hid_init.append(metadata['param_values'][15+jj*14-1])
htm1.append(LSTM_hid_init[jj])
ctm1.append(LSTM_cell_init[jj])
FC_output_W = metadata['param_values'][43];
FC_output_b = metadata['param_values'][44];
def sigmoid(x): return 1/(1 + np.exp(-x))
def softmax(x,T):
expx=np.exp(x/T)
sumexpx=np.sum(expx)
if sumexpx==0:
maxpos=x.argmax()
x=np.zeros(x.shape, dtype=x.dtype)
x[0][maxpos]=1
else:
x=expx/sumexpx
return x # This is what we want to get!!!!
sizeofx=LSTM_Wxi[0].shape[0]
x = np.zeros(sizeofx, dtype=np.int8)
# Converting the seed passed as an argument into a list of idx
seed_sequence = [start_idx]
if seed is not None:
for token in seed.split(' '):
seed_sequence.append(token2idx[token])
matriciesWanted = []
# initialise network
for tok in seed_sequence[:-1]:
x = np.zeros(sizeofx, dtype=np.int8)
x[tok] = 1;
for jj in range(numlayers):
it=sigmoid(np.dot(x,LSTM_Wxi[jj]) + np.dot(htm1[jj],LSTM_Whi[jj]) + LSTM_bi[jj])
ft=sigmoid(np.dot(x,LSTM_Wxf[jj]) + np.dot(htm1[jj],LSTM_Whf[jj]) + LSTM_bf[jj])
ct=np.multiply(ft,ctm1[jj]) + np.multiply(it,np.tanh(np.dot(x,LSTM_Wxc[jj]) + np.dot(htm1[jj],LSTM_Whc[jj]) + LSTM_bc[jj]))
ot=sigmoid(np.dot(x,LSTM_Wxo[jj]) + np.dot(htm1[jj],LSTM_Who[jj]) + LSTM_bo[jj])
ht=np.multiply(ot,np.tanh(ct))
x=ht
ctm1[jj]=ct
htm1[jj]=ht
matrixWanted = softmax(np.dot(x,FC_output_W) + FC_output_b,temperature)
matriciesWanted.append(matrixWanted)
return matriciesWanted
| 2.015625 | 2 |
matrix_ops.py | rkibria/display3dfile | 0 | 12761276 | <gh_stars>0
import math
def matrixMult(v, m):
return (
m[ 0] * v[0] + m[ 1] * v[1] + m[ 2] * v[2] + m[ 3] * v[3],
m[ 4] * v[0] + m[ 5] * v[1] + m[ 6] * v[2] + m[ 7] * v[3],
m[ 8] * v[0] + m[ 9] * v[1] + m[10] * v[2] + m[11] * v[3],
m[12] * v[0] + m[13] * v[1] + m[14] * v[2] + m[15] * v[3],
)
def getTranslationMatrix(dx, dy, dz):
return [
1.0, 0.0, 0.0, float(dx),
0.0, 1.0, 0.0, float(dy),
0.0, 0.0, 1.0, float(dz),
0.0, 0.0, 0.0, 1.0,
]
def getRotateXMatrix(phi):
cos_phi = math.cos(phi)
sin_phi = math.sin(phi)
return [
1.0, 0.0, 0.0, 0.0,
0.0, cos_phi, -sin_phi, 0.0,
0.0, sin_phi, cos_phi, 0.0,
0.0, 0.0, 0.0, 1.0,
]
def getRotateYMatrix(phi):
cos_phi = math.cos(phi)
sin_phi = math.sin(phi)
return [
cos_phi, 0.0, sin_phi, 0.0,
0.0, 1.0, 0.0, 0.0,
-sin_phi, 0.0, cos_phi, 0.0,
0.0, 0.0, 0.0, 1.0,
]
def getRotateZMatrix(phi):
cos_phi = math.cos(phi)
sin_phi = math.sin(phi)
return [
cos_phi, -sin_phi, 0.0, 0.0,
sin_phi, cos_phi, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0,
]
def getScaleMatrix(sx, sy, sz):
return [
float(sx), 0.0, 0.0, 0.0,
0.0, float(sy), 0.0, 0.0,
0.0, 0.0, float(sz), 0.0,
0.0, 0.0, 0.0, 1.0,
]
| 2.53125 | 3 |
Turtle/squareSpiral.py | PraghadeshManivannan/Projects | 0 | 12761277 | <reponame>PraghadeshManivannan/Projects
from turtle import *
import random
#set the title
title('Square Spiral')
#colors for the square
color = ['red','green','blue','cyan']
#sets the screen color
bgcolor('black')
#hides the turtle
hideturtle()
#speed of the turtle
speed(0)
#square spiral
for i in range(360):
#color for the square
pencolor(color[i%4])
#forwards the turtle
forward((5*i)+1)
#checks the width
width(i/100 + 1)
#changes the angle of the square
left(92)
done()
| 4 | 4 |
apps/rbac/utils.py | YC-Cheung/hattori | 1 | 12761278 | def menu_tree_to_vue(menus):
res = []
for menu in menus:
tmp = {
'id': menu.id,
'name': menu.name,
'path': menu.path,
'component': menu.component,
'hidden': menu.is_show is False,
'meta': {
'title': menu.title,
'icon': menu.icon,
'no_cache': menu.is_cache is False,
'roles': [role.slug for role in menu.roles.all()]
}
}
if hasattr(menu, 'children'):
tmp['children'] = menu_tree_to_vue(menu.children)
res.append(tmp)
return res
class PermInspector(object):
"""
权限判断
"""
SUPER_ADMIN_IDS = [1]
def is_super_admin(self, user):
if not user:
return False
if user.id in self.SUPER_ADMIN_IDS:
return True
if not user.role_slugs:
return False
if 'super_admin' in user.role_slugs:
return True
return False
def check_role(self, user, role_slugs):
"""
检查用户角色
:param user:
:param role_slugs:
:return:
"""
if self.is_super_admin(user):
return True
if not role_slugs:
return True
user_role_slugs = user.role_slugs
if not user_role_slugs:
return False
inter_slugs = list(set(user_role_slugs) & set(role_slugs))
if inter_slugs:
return True
return False
perm_inspector = PermInspector()
| 2.28125 | 2 |
python/py_functionals/q1_map_and_lambda_expression.py | mxdzi/hackerrank | 0 | 12761279 | cube = lambda x: x ** 3 # complete the lambda function
def fibonacci(n):
# return a list of fibonacci numbers
a = 0
b = 1
for i in range(n):
yield a
a, b = b, a + b
def main():
n = int(input())
print(list(map(cube, fibonacci(n))))
if __name__ == '__main__':
main()
| 4.15625 | 4 |
PhythonExercicios/ex035.py | Luis-Otavio-Araujo/Curso-de-Python | 1 | 12761280 | <gh_stars>1-10
r1 = float(input('1° segmento: '))
r2 = float(input('2° segmento: '))
r3 = float(input('3° segmento: '))
print('==' * 20)
if r1 < r2 + r3 and r2 < r1 + r3 and r3 < r2 + r1:
print('Os segmentos acima FORMAM um triângulo!')
else:
print('Os segmentos NÃO FORMAM um triângulo!') | 3.828125 | 4 |
chapter8/demo/code/8-2_apriori_rules.py | hitaitengteng/python | 0 | 12761281 | <reponame>hitaitengteng/python
#-*- coding: utf-8 -*-
from __future__ import print_function
import pandas as pd
from apriori import * # 导入自行编写的apriori函数
import time # 导入时间库用来计算用时
inputfile = 'C:/Users/att/Desktop/python_practice_of_data_analysis_and_mining-master/chapter8/demo/data/apriori.txt' # 输入事务集文件
data = pd.read_csv(inputfile, header=None, dtype=object)
start = time.clock() # 计时开始
print(u'\n转换原始数据至0-1矩阵...')
ct = lambda x: pd.Series(1, index=x[pd.notnull(x)]) # 转换0-1矩阵的过渡函数
b = map(ct, data.as_matrix()) # 用map方式执行
data = pd.DataFrame(b).fillna(0) # 实现矩阵转换,空值用0填充
end = time.clock() # 计时结束
print(u'\n转换完毕,用时:%0.2f秒' % (end - start))
del b # 删除中间变量b,节省内存
support = 0.06 # 最小支持度
confidence = 0.75 # 最小置信度
ms = '---' # 连接符,默认'--',用来区分不同元素,如A--B。需要保证原始表格中不含有该字符
start = time.clock() # 计时开始
print(u'\n开始搜索关联规则...')
find_rule(data, support, confidence, ms)
end = time.clock() # 计时结束
print(u'\n搜索完成,用时:%0.2f秒' % (end - start))
| 2.625 | 3 |
src/oci/identity/models/db_credential_summary.py | pabs3/oci-python-sdk | 0 | 12761282 | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class DbCredentialSummary(object):
"""
As the name suggests, an `DbCredentialSummary` object contains information about an `DbCredential`.
The DB credential is used for DB authentication with
the [DB Service].
"""
def __init__(self, **kwargs):
"""
Initializes a new DbCredentialSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this DbCredentialSummary.
:type id: str
:param user_id:
The value to assign to the user_id property of this DbCredentialSummary.
:type user_id: str
:param description:
The value to assign to the description property of this DbCredentialSummary.
:type description: str
:param time_created:
The value to assign to the time_created property of this DbCredentialSummary.
:type time_created: datetime
:param time_expires:
The value to assign to the time_expires property of this DbCredentialSummary.
:type time_expires: datetime
:param lifecycle_state:
The value to assign to the lifecycle_state property of this DbCredentialSummary.
:type lifecycle_state: str
"""
self.swagger_types = {
'id': 'str',
'user_id': 'str',
'description': 'str',
'time_created': 'datetime',
'time_expires': 'datetime',
'lifecycle_state': 'str'
}
self.attribute_map = {
'id': 'id',
'user_id': 'userId',
'description': 'description',
'time_created': 'timeCreated',
'time_expires': 'timeExpires',
'lifecycle_state': 'lifecycleState'
}
self._id = None
self._user_id = None
self._description = None
self._time_created = None
self._time_expires = None
self._lifecycle_state = None
@property
def id(self):
"""
Gets the id of this DbCredentialSummary.
The OCID of the DB credential.
:return: The id of this DbCredentialSummary.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this DbCredentialSummary.
The OCID of the DB credential.
:param id: The id of this DbCredentialSummary.
:type: str
"""
self._id = id
@property
def user_id(self):
"""
Gets the user_id of this DbCredentialSummary.
The OCID of the user the DB credential belongs to.
:return: The user_id of this DbCredentialSummary.
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""
Sets the user_id of this DbCredentialSummary.
The OCID of the user the DB credential belongs to.
:param user_id: The user_id of this DbCredentialSummary.
:type: str
"""
self._user_id = user_id
@property
def description(self):
"""
Gets the description of this DbCredentialSummary.
The description you assign to the DB credential. Does not have to be unique, and it's changeable.
(For tenancies that support identity domains) You can have an empty description.
:return: The description of this DbCredentialSummary.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this DbCredentialSummary.
The description you assign to the DB credential. Does not have to be unique, and it's changeable.
(For tenancies that support identity domains) You can have an empty description.
:param description: The description of this DbCredentialSummary.
:type: str
"""
self._description = description
@property
def time_created(self):
"""
Gets the time_created of this DbCredentialSummary.
Date and time the `DbCredential` object was created, in the format defined by RFC3339.
Example: `2016-08-25T21:10:29.600Z`
:return: The time_created of this DbCredentialSummary.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this DbCredentialSummary.
Date and time the `DbCredential` object was created, in the format defined by RFC3339.
Example: `2016-08-25T21:10:29.600Z`
:param time_created: The time_created of this DbCredentialSummary.
:type: datetime
"""
self._time_created = time_created
@property
def time_expires(self):
"""
Gets the time_expires of this DbCredentialSummary.
Date and time when this credential will expire, in the format defined by RFC3339.
Null if it never expires.
Example: `2016-08-25T21:10:29.600Z`
:return: The time_expires of this DbCredentialSummary.
:rtype: datetime
"""
return self._time_expires
@time_expires.setter
def time_expires(self, time_expires):
"""
Sets the time_expires of this DbCredentialSummary.
Date and time when this credential will expire, in the format defined by RFC3339.
Null if it never expires.
Example: `2016-08-25T21:10:29.600Z`
:param time_expires: The time_expires of this DbCredentialSummary.
:type: datetime
"""
self._time_expires = time_expires
@property
def lifecycle_state(self):
"""
Gets the lifecycle_state of this DbCredentialSummary.
The credential's current state. After creating a DB credential, make sure its `lifecycleState` changes from
CREATING to ACTIVE before using it.
:return: The lifecycle_state of this DbCredentialSummary.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this DbCredentialSummary.
The credential's current state. After creating a DB credential, make sure its `lifecycleState` changes from
CREATING to ACTIVE before using it.
:param lifecycle_state: The lifecycle_state of this DbCredentialSummary.
:type: str
"""
self._lifecycle_state = lifecycle_state
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 2.234375 | 2 |
src/entitas/context.py | UpUpLiu/python_entitas | 1 | 12761283 | from collections import deque
from .entity import Entity
from .matcher import Matcher
from .group import Group
from .exceptions import MissingEntity
class Context(object):
"""A context is a data structure managing entities."""
def __init__(self):
#: Entities retained by this context.
self._entities = set()
#: An object pool to recycle entities.
self._reusable_entities = deque()
#: Entities counter.
self._entity_index = 0
#: Dictionary of matchers mapping groups.
self._groups = {}
self._entity_indices = {}
self.entity_class = Entity
@property
def entities(self):
return self._entities
def has_entity(self, entity):
"""Checks if the context contains this entity.
:param entity: Entity
:rtype: bool
"""
return entity in self._entities
def create_entity(self):
"""Creates an entity. Pop one entity from the pool if it is not
empty, otherwise creates a new one. Increments the entity index.
Then adds the entity to the list.
"""
entity = (self._reusable_entities.pop() if self._reusable_entities
else self._create_entity())
entity.activate(self._entity_index)
self._entity_index += 1
self._entities.add(entity)
entity.on_component_added += self._comp_added_or_removed
entity.on_component_removed += self._comp_added_or_removed
entity.on_component_replaced += self._comp_replaced
return entity
def init_entity(self, entity):
"""Creates an entity. Pop one entity from the pool if it is not
empty, otherwise creates a new one. Increments the entity index.
Then adds the entity to the list.
"""
entity.activate(self._entity_index)
self._entity_index += 1
self._entities.add(entity)
entity.on_component_added += self._comp_added_or_removed
entity.on_component_removed += self._comp_added_or_removed
entity.on_component_replaced += self._comp_replaced
return entity
def destroy_entity(self, entity):
"""Removes an entity from the list and add it to the pool. If
the context does not contain this entity, a
:class:`MissingEntity` exception is raised.
:param entity: Entity
"""
if not self.has_entity(entity):
raise MissingEntity()
entity.destroy()
self._entities.remove(entity)
self._reusable_entities.append(entity)
def get_group(self, matcher):
"""User can ask for a group of entities from the context. The
group is identified through a :class:`Matcher`.
:param entity: Matcher
"""
if matcher in self._groups:
return self._groups[matcher]
group = Group(matcher)
for entity in self._entities:
group.handle_entity_silently(entity)
self._groups[matcher] = group
return group
def set_entity_class(self, entity_class):
self.entity_class = entity_class
def _create_entity(self):
return self.entity_class()
def set_unique_component(self, comp_type, *args):
entity = self.create_entity()
new_comp = comp_type.new(...)
exec('self.{0}Entity = entity'.format(comp_type._name), globals(), locals())
exec('self.{0} = new_comp'.format(comp_type._name), globals(), locals())
comp = entity.add_with_component(comp_type, new_comp)
return comp, entity
def has_unique_component(self, comp_type):
name = comp_type._name
return exec('self.{0}Entity is not None'.format(name), globals(), locals())
def remove_unique_component(self, name):
oldEntity = exec('self.{0}Entity'.format(name), globals(), locals())
exec('self.{0} = None'.format(name), globals(), locals())
exec('self.{0}Entity = None'.format(name), globals(), locals())
self.destroy_entity(oldEntity)
def get_unique_component(self, comp_type):
group = self.get_group(Matcher(comp_type))
return group.single_entity.get(comp_type)
def add_entity_index(self, entity_index):
self._entity_indices[entity_index.type] = entity_index
def get_entity_index(self, comp_type):
return self._entity_indices[comp_type]
def _comp_added_or_removed(self, entity, comp):
for matcher in self._groups:
self._groups[matcher].handle_entity(entity, comp)
def _comp_replaced(self, entity, previous_comp, new_comp):
for matcher in self._groups:
group = self._groups[matcher]
group.update_entity(entity, previous_comp, new_comp)
def __repr__(self):
return '<Context ({}/{})>'.format(
len(self._entities), len(self._reusable_entities))
| 3.015625 | 3 |
caql/utils.py | deepneuralmachine/google-research | 23,901 | 12761284 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from concurrent import futures
import os
import pickle
from absl import flags
from absl import logging
import gym
import numpy as np
import tensorflow.compat.v1 as tf
from tf_agents.environments import suite_mujoco
from tf_agents.specs import array_spec
flags.DEFINE_integer('checkpoint_iterations', 50, 'Periodicity of checkpoints.')
flags.DEFINE_integer('eval_iterations', 50, 'Periodicity of evaluations.')
flags.DEFINE_integer('num_evals', 10, 'Number of evaluations.')
FLAGS = flags.FLAGS
_CHECKPOINT_FILENAME = 'model.ckpt'
def get_state_and_action_specs(gym_env, action_bounds=None):
"""Returns state and action specs for a Gym environment.
Args:
gym_env: gym.core.Env. A Gym environment.
action_bounds: list of strings. Min and max values in string for action
variables.
Returns:
(BoundedArraySpec, BoundedArraySpec). The first is a state spec and the
second is a action spec.
"""
if isinstance(gym_env.observation_space, gym.spaces.Box):
state_spec = array_spec.BoundedArraySpec(
shape=gym_env.observation_space.shape,
dtype=gym_env.observation_space.dtype,
minimum=gym_env.observation_space.low,
maximum=gym_env.observation_space.high)
else:
raise NotImplementedError(type(gym_env.observation_space))
if action_bounds:
assert len(action_bounds) == 2
action_min = np.tile(float(action_bounds[0]), gym_env.action_space.shape)
action_max = np.tile(float(action_bounds[1]), gym_env.action_space.shape)
else:
action_min = gym_env.action_space.low
action_max = gym_env.action_space.high
if isinstance(gym_env.action_space, gym.spaces.Box):
action_spec = array_spec.BoundedArraySpec(
shape=gym_env.action_space.shape,
dtype=gym_env.action_space.dtype,
minimum=action_min,
maximum=action_max)
else:
raise NotImplementedError(type(gym_env.action_space))
return state_spec, action_spec
def create_env(env_name):
"""Creates Environment."""
if env_name == 'Pendulum':
env = gym.make('Pendulum-v0')
elif env_name == 'Hopper':
env = suite_mujoco.load('Hopper-v2')
elif env_name == 'Walker2D':
env = suite_mujoco.load('Walker2d-v2')
elif env_name == 'HalfCheetah':
env = suite_mujoco.load('HalfCheetah-v2')
elif env_name == 'Ant':
env = suite_mujoco.load('Ant-v2')
elif env_name == 'Humanoid':
env = suite_mujoco.load('Humanoid-v2')
else:
raise ValueError('Unsupported environment: %s' % env_name)
return env
def _env_reset(env):
if hasattr(env, 'time_step_spec'):
return env.reset().observation
else:
return env.reset()
def _env_step(env, action):
if hasattr(env, 'time_step_spec'):
ts = env.step(action)
return ts.observation, ts.reward, env.done, env.get_info()
else:
return env.step(action)
def warm_up_replay_memory(session, behavior_policy, time_out, discount_factor,
replay_memory):
# The number of events in an epsidoe could be less than the maximum episode
# length (i.e., time_out) when the environment has a termination state.
min_replay_memory_size = FLAGS.batch_size * FLAGS.train_steps_per_iteration
while replay_memory.size < min_replay_memory_size:
num_events = min_replay_memory_size - replay_memory.size
num_episodes = int(num_events / time_out) + 1
collect_experience_parallel(num_episodes, session, behavior_policy,
time_out, discount_factor, replay_memory)
def collect_experience_parallel(num_episodes,
session,
behavior_policy,
time_out,
discount_factor,
replay_memory,
collect_init_state_step=False):
"""Executes threads for data collection."""
old_size = replay_memory.size
if num_episodes > 1:
with futures.ThreadPoolExecutor(
max_workers=FLAGS.collect_experience_parallelism) as executor:
for _ in range(num_episodes):
executor.submit(collect_experience, session, behavior_policy, time_out,
discount_factor, replay_memory, collect_init_state_step)
else:
collect_experience(session, behavior_policy, time_out, discount_factor,
replay_memory, collect_init_state_step)
return replay_memory.size - old_size
def collect_experience(session,
behavior_policy,
time_out,
discount_factor,
replay_memory,
collect_init_state_step=False):
"""Adds experiences into replay memory.
Generates an episode, computes Q targets for state and action pairs in the
episode, and adds them into the replay memory.
"""
with session.as_default():
with session.graph.as_default():
env = create_env(FLAGS.env_name)
episode, _, _ = _collect_episode(env, time_out, discount_factor,
behavior_policy, collect_init_state_step)
replay_memory.extend(episode)
if hasattr(env, 'close'):
env.close()
def _collect_episode(env, time_out, discount_factor, behavior_policy,
collect_init_state_step=False):
"""Collects episodes of trajectories by following a behavior policy."""
episode = []
episode_lengths = []
episode_rewards = []
state = _env_reset(env)
init_state = _env_reset(env)
done = False
episode_step_count = 0
e_reward = 0
for _ in range(time_out):
# First, sample an action
action = behavior_policy.action(state, use_action_function=True)
if action is None:
break
next_state, reward, done, info = _env_step(env, action)
reward = reward if not done else 0.0
# Save the experience to our buffer
if collect_init_state_step:
episode.append([
init_state, state, action, reward, next_state, episode_step_count,
done, info
])
else:
episode.append([state, action, reward, next_state, done, info])
# update state, e_reward and step count
state = next_state
if discount_factor < 1:
e_reward += (discount_factor**episode_step_count) * reward
else:
e_reward += reward
episode_step_count += 1
if done:
break
if episode_step_count > 0:
episode_lengths.append(episode_step_count)
episode_rewards.append(e_reward)
return (episode, episode_lengths, episode_rewards)
def periodic_updates(iteration,
train_step,
replay_memories,
greedy_policy,
saver,
sess,
time_out,
use_action_function=True,
tf_summary=None):
"""Evaluates the algorithm."""
if (FLAGS.checkpoint_dir and FLAGS.checkpoint_iterations and
iteration % FLAGS.checkpoint_iterations == 0):
logging.info('Iteration: %d, writing checkpoints..', iteration)
if not tf.gfile.Exists(FLAGS.checkpoint_dir):
tf.gfile.MakeDirs(FLAGS.checkpoint_dir)
checkpoint_file = os.path.join(FLAGS.checkpoint_dir, _CHECKPOINT_FILENAME)
saver.save(
sess, checkpoint_file, global_step=train_step, write_meta_graph=False)
for replay_memory in replay_memories:
replay_memory.save(FLAGS.checkpoint_dir, delete_old=True)
logging.info('Iteration: %d, completed writing checkpoints.', iteration)
if FLAGS.eval_iterations and iteration % FLAGS.eval_iterations == 0:
logging.info('Iteration: %d, evaluating the model..', iteration)
scores = []
action_magnitudes = []
episode_lens = []
future_list = []
with futures.ThreadPoolExecutor(max_workers=FLAGS.num_evals) as executor:
for _ in range(FLAGS.num_evals):
future_list.append(
executor.submit(
_evaluate_model,
time_out,
greedy_policy,
use_action_function=use_action_function,
render=False))
for future in futures.as_completed(future_list):
score, action_magnitude, episode_len = future.result()
scores.append(score)
action_magnitudes.append(action_magnitude)
episode_lens.append(episode_len)
avg_score = np.mean(scores)
avg_action_magitude = np.mean(action_magnitudes)
avg_episode_len = np.mean(episode_lens)
logging.info(
'Iteration: %d, avg_score: %.3f, avg_episode_len: %.3f, '
'avg_action_magnitude: %.3f', iteration, avg_score, avg_episode_len,
avg_action_magitude)
if tf_summary:
tf_summary.value.extend([
tf.Summary.Value(tag='avg_score', simple_value=avg_score),
tf.Summary.Value(
tag='avg_action_magnitude', simple_value=avg_action_magitude),
tf.Summary.Value(tag='avg_episode_len', simple_value=avg_episode_len)
])
def _evaluate_model(time_out,
greedy_policy,
use_action_function=False,
render=False):
"""Evaluates the model."""
env = create_env(FLAGS.env_name)
state = _env_reset(env)
total_reward = 0.0
total_action_magnitude = 0.0
episode_len = 0
for _ in range(time_out):
if render:
env.render()
action = greedy_policy.action(
np.reshape(state, [1, -1]), use_action_function)
if action is None:
break
next_state, reward, done, _ = _env_step(env, action)
state = next_state
total_reward += reward
if greedy_policy.continuous_action:
total_action_magnitude += np.linalg.norm(action, np.inf)
episode_len += 1
if done:
break
return total_reward, total_action_magnitude / episode_len, episode_len
def save_hparam_config(dict_to_save, config_dir):
"""Saves config file of hparam."""
filename = os.path.join(config_dir, 'hparam.pickle')
print('Saving results to %s' % filename)
if not tf.gfile.Exists(config_dir):
tf.gfile.MakeDirs(config_dir)
with tf.gfile.GFile(filename, 'w') as f:
pickle.dump(dict_to_save, f, protocol=2)
def action_projection(action, action_spec, softmax=False):
"""Projects action tensor onto a bound."""
if isinstance(action, np.ndarray):
if softmax:
e_x = np.exp(action - np.max(action, axis=1))
return e_x / np.sum(e_x, axis=1)
else:
return np.minimum(action_spec.maximum,
np.maximum(action_spec.minimum, action))
else:
# TF version
if softmax:
return tf.nn.softmax(action, axis=1)
else:
return tf.minimum(action_spec.maximum,
tf.maximum(action_spec.minimum, action))
def create_placeholders_for_q_net(tf_vars):
"""Creates placeholders for feeding values to TF variables.
Args:
tf_vars: list. A list of TF variables. These are variables for a neural
network approximating a Q function.
Returns:
dict. A dictionary mapping a string to a tf.placeholder.
"""
ph_dict = {}
for var in tf_vars:
ph_dict['{}_ph'.format(var.name)] = tf.placeholder(
dtype=var.dtype, shape=var.shape)
return ph_dict
def build_dummy_q_net(state, action, ph_dict, q_net_vars):
"""Builds a dummy Q network.
This function builds a neural network where parameters are given by
placeholders.
Args:
state: TF Tensor. State tensor.
action: TF Tensor. Action tensor.
ph_dict: dict. A dictionary mapping a TF variable's name to a
tf.placeholder. There is one placeholder for each variable in
`q_net_vars`.
q_net_vars: list. A list of TF variables. The list should have even number
of variables. One for weights and other for bias for each layer of a
neural network.
Returns:
TF Tensor. Output tensor of a Q network.
"""
assert bool(q_net_vars) and len(q_net_vars) % 2 == 0
net = tf.concat([state, action], axis=1)
# Specific for MLP
for itr, var in enumerate(q_net_vars):
if itr % 2 == 0:
# even itr, multiplicative weights
net = tf.einsum('ij,jk->ik', net, ph_dict['{}_ph'.format(var.name)])
else:
# odd itr, additive weights
net = tf.nn.bias_add(net, ph_dict['{}_ph'.format(var.name)])
# Output layer doesn't have an activation function.
if itr < len(q_net_vars) - 1:
net = tf.nn.relu(net)
return net
def make_tf_summary_histogram(values, num_bins=10):
"""Constructs a tf Summary of type histogram from a np array of values.
Args:
values: list or np.array.
num_bins: int. Number of histogram bins.
Returns:
tf.HistogramProto.
"""
values = np.reshape(values, [-1])
counts, limits = np.histogram(values, bins=num_bins)
return tf.HistogramProto(
min=np.amin(values),
max=np.amax(values),
num=values.size,
sum=np.sum(values),
sum_squares=np.sum(values**2),
bucket_limit=limits.tolist()[1:],
bucket=counts.tolist())
| 1.96875 | 2 |
src/mmwall.py | esabouraud/mmwall | 0 | 12761285 | #!/bin/python
# mmwall launcher
# mmwall currently only works on Windows and CentOS, for wallpaper local and remote setting:
# Win32 API, network drive mount and psexec for Windows >= XP.
# sftp, ssh and Gnome configuration tool for CentOS >= 5.
import json
import optparse
import set_wallpaper
import set_wallpaper_logon
import set_wallpaper_remote
import synergy_wallpaper
import randomdownload_wallpaper
VERSION_PROGRAM = '0.1.0'
def run_mmwall(cfgfile):
cfg = json.load(open(cfgfile))
screenratio = cfg['general']['screenratio']
imgsrc = cfg['general'].get('imgsrc')
#print screenratio
screenconf = []
for host in cfg['hosts']:
idx = host['id']
for screen in host['screens']:
screenconf.append((screen['screenwidth'], screen['screenheight'], screen['screenvoffset'], idx))
#print screenconf
randomdownload_wallpaper.get_wallpaper(True, screenratio, imgsrc)
synergy_wallpaper.make_wallpapers(True, screenconf)
for host in cfg['hosts']:
idx = host['id']
if host.get('logonscreenwidth') and host.get('logonscreenheight'):
logonscreensize = (host['logonscreenwidth'], host['logonscreenheight'])
else:
logonscreensize = (host['screens'][0]['screenwidth'], host['screens'][0]['screenheight'])
if host.get('remotehost'):
remotelogin = host.get('login')
remotepw = host.get('password')
if host.get('remoteos') == None or host.get('remoteos') == 'Windows':
set_wallpaper_remote.setremotewallwin(host['remotehost'], remotelogin, remotepw, idx, logonscreensize)
elif host.get('remoteos') == 'Linux':
set_wallpaper_remote.setremotewallgnome(host['remotehost'], remotelogin, remotepw, idx, logonscreensize)
else:
print 'Remote host os "%s" unsupported.' % host['remoteos']
else:
set_wallpaper.set_wallpaper('current', idx)
set_wallpaper_logon.set_wallpaper_logon('current', idx, logonscreensize)
if __name__=='__main__':
parser = optparse.OptionParser(description='mmwall: multi-machine background wallpaper changer', version="%prog " + VERSION_PROGRAM)
parser.add_option('-c', '--configuration', dest='cfgfile', default='mmwallcfg.json', metavar='FILEPATH', help='mmwall configuration file path')
(options, args) = parser.parse_args()
run_mmwall(options.cfgfile)
| 2.125 | 2 |
libs/configs/VOC2007/cfgs_res50_voc07_v2.py | DetectionTeamUCAS/RetinaNet_Tensorflow | 55 | 12761286 | <reponame>DetectionTeamUCAS/RetinaNet_Tensorflow
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import os
import tensorflow as tf
import math
"""
cls : aeroplane|| Recall: 0.9438596491228071 || Precison: 0.03152836380684482|| AP: 0.7773597235133686
cls : person|| Recall: 0.9754858657243817 || Precison: 0.021128815456515397|| AP: 0.8110363404804426
cls : sofa|| Recall: 0.9916317991631799 || Precison: 0.003753563509661071|| AP: 0.6926241276381035
cls : car|| Recall: 0.980849292256453 || Precison: 0.016935994019207545|| AP: 0.8612891545940344
cls : motorbike|| Recall: 0.9876923076923076 || Precison: 0.008316708552478172|| AP: 0.7979344387167545
cls : sheep|| Recall: 0.9752066115702479 || Precison: 0.01267318225754484|| AP: 0.6992188982186315
cls : horse|| Recall: 0.9798850574712644 || Precison: 0.010116893134753457|| AP: 0.8387371790700674
cls : train|| Recall: 0.9326241134751773 || Precison: 0.01885980638221585|| AP: 0.7910990164518434
cls : pottedplant|| Recall: 0.9020833333333333 || Precison: 0.0065086355915643275|| AP: 0.4667916780665033
cls : bus|| Recall: 0.9906103286384976 || Precison: 0.005389252145484267|| AP: 0.8013951733234702
cls : diningtable|| Recall: 0.9757281553398058 || Precison: 0.001669157947184853|| AP: 0.6493900213188186
cls : tvmonitor|| Recall: 0.9642857142857143 || Precison: 0.00617386604581549|| AP: 0.7361445157222517
cls : cat|| Recall: 0.9776536312849162 || Precison: 0.016213461805716402|| AP: 0.8708244700458685
cls : bottle|| Recall: 0.8976545842217484 || Precison: 0.004023394942563887|| AP: 0.5570627230945586
cls : cow|| Recall: 0.9877049180327869 || Precison: 0.009388025398309376|| AP: 0.7709867256180059
cls : bird|| Recall: 0.954248366013072 || Precison: 0.01937024588713957|| AP: 0.7632915804610957
cls : boat|| Recall: 0.9163498098859315 || Precison: 0.005341193679218102|| AP: 0.5818730701325913
cls : dog|| Recall: 0.9938650306748467 || Precison: 0.020776333789329686|| AP: 0.8264152853325744
cls : chair|| Recall: 0.9563492063492064 || Precison: 0.005576079160271786|| AP: 0.5278822308428679
cls : bicycle|| Recall: 0.9762611275964391 || Precison: 0.007213646728644098|| AP: 0.811791972130752
mAP is : 0.7316574162386302
"""
# ------------------------------------------------
VERSION = 'RetinaNet_20190522'
NET_NAME = 'resnet_v1_50' # 'MobilenetV2'
ADD_BOX_IN_TENSORBOARD = True
# ---------------------------------------- System_config
ROOT_PATH = os.path.abspath('../')
print(20*"++--")
print(ROOT_PATH)
GPU_GROUP = "0,1,2,3,4,5,6,7"
NUM_GPU = len(GPU_GROUP.strip().split(','))
SHOW_TRAIN_INFO_INTE = 10
SMRY_ITER = 100
SAVE_WEIGHTS_INTE = 5000 * 2
SUMMARY_PATH = ROOT_PATH + '/output/summary'
TEST_SAVE_PATH = ROOT_PATH + '/tools/test_result'
if NET_NAME.startswith("resnet"):
weights_name = NET_NAME
elif NET_NAME.startswith("MobilenetV2"):
weights_name = "mobilenet/mobilenet_v2_1.0_224"
else:
raise Exception('net name must in [resnet_v1_101, resnet_v1_50, MobilenetV2]')
PRETRAINED_CKPT = ROOT_PATH + '/data/pretrained_weights/' + weights_name + '.ckpt'
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
EVALUATE_DIR = ROOT_PATH + '/output/evaluate_result_pickle/'
# ------------------------------------------ Train config
RESTORE_FROM_RPN = False
FIXED_BLOCKS = 1 # allow 0~3
FREEZE_BLOCKS = [True, False, False, False, False] # for gluoncv backbone
USE_07_METRIC = True
MUTILPY_BIAS_GRADIENT = None # 2.0 # if None, will not multipy
GRADIENT_CLIPPING_BY_NORM = None # 10.0 if None, will not clip
BATCH_SIZE = 1
EPSILON = 1e-5
MOMENTUM = 0.9
LR = 5e-4 * NUM_GPU * BATCH_SIZE
DECAY_STEP = [SAVE_WEIGHTS_INTE*12, SAVE_WEIGHTS_INTE*16, SAVE_WEIGHTS_INTE*20]
MAX_ITERATION = SAVE_WEIGHTS_INTE*20
WARM_SETP = int(1.0 / 8.0 * SAVE_WEIGHTS_INTE)
# -------------------------------------------- Data_preprocess_config
DATASET_NAME = 'pascal' # 'pascal', 'coco'
PIXEL_MEAN = [123.68, 116.779, 103.939] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
PIXEL_MEAN_ = [0.485, 0.456, 0.406]
PIXEL_STD = [0.229, 0.224, 0.225] # R, G, B. In tf, channel is RGB. In openCV, channel is BGR
IMG_SHORT_SIDE_LEN = 600
IMG_MAX_LENGTH = 1000
CLASS_NUM = 20
# --------------------------------------------- Network_config
SUBNETS_WEIGHTS_INITIALIZER = tf.random_normal_initializer(mean=0.0, stddev=0.01, seed=None)
SUBNETS_BIAS_INITIALIZER = tf.constant_initializer(value=0.0)
PROBABILITY = 0.01
FINAL_CONV_BIAS_INITIALIZER = tf.constant_initializer(value=-math.log((1.0 - PROBABILITY) / PROBABILITY))
WEIGHT_DECAY = 1e-4
# ---------------------------------------------Anchor config
LEVEL = ['P3', 'P4', 'P5', 'P6', 'P7']
BASE_ANCHOR_SIZE_LIST = [32, 64, 128, 256, 512]
ANCHOR_STRIDE = [8, 16, 32, 64, 128]
ANCHOR_SCALES = [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]
ANCHOR_RATIOS = [0.5, 1.0, 2.0]
ANCHOR_SCALE_FACTORS = None
USE_CENTER_OFFSET = True
# --------------------------------------------RPN config
SHARE_NET = True
USE_P5 = True
IOU_POSITIVE_THRESHOLD = 0.5
IOU_NEGATIVE_THRESHOLD = 0.4
NMS = True
NMS_IOU_THRESHOLD = 0.5
MAXIMUM_DETECTIONS = 100
FILTERED_SCORE = 0.01
VIS_SCORE = 0.5
| 1.828125 | 2 |
src/api/tests/tests_timetable.py | memclutter/clinic-crm | 3 | 12761287 | import datetime
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from clinic.models import Doctor, Speciality
from timetables.models import Timetable
class TimetableTestCase(APITestCase):
def test_return_empty_list(self):
response = self._get_response()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, [])
def test_return_not_empty_list(self):
spec = Speciality.objects.create(title='Test')
doc = Doctor.objects.create(speciality=spec, first_name='Doctor', last_name='Test')
Timetable.objects.create(doctor=doc,
day_of_week=Timetable.DW_MON,
start_time=datetime.time(hour=8),
end_time=datetime.time(hour=17),
break_start_time=datetime.time(hour=12),
break_end_time=datetime.time(hour=13))
Timetable.objects.create(doctor=doc,
day_of_week=Timetable.DW_TUE,
start_time=datetime.time(hour=9),
end_time=datetime.time(hour=18),
break_start_time=datetime.time(hour=12),
break_end_time=datetime.time(hour=13))
response = self._get_response()
self.assertEqual(len(response.data), Timetable.objects.count())
def test_return_correcct_list(self):
spec = Speciality.objects.create(title='Test')
doc = Doctor.objects.create(speciality=spec, first_name='Doctor', last_name='Test')
tt = Timetable.objects.create(doctor=doc,
day_of_week=Timetable.DW_MON,
start_time=datetime.time(hour=8),
end_time=datetime.time(hour=17),
break_start_time=datetime.time(hour=12),
break_end_time=datetime.time(hour=13))
response = self._get_response()
tf = '%H:%M:%S'
self.assertEqual(response.data[0]['doctor'], doc.id)
self.assertEqual(response.data[0]['day_of_week'], tt.day_of_week)
self.assertEqual(response.data[0]['start_time'], tt.start_time.strftime(tf))
self.assertEqual(response.data[0]['end_time'], tt.end_time.strftime(tf))
self.assertEqual(response.data[0]['break_start_time'], tt.break_start_time.strftime(tf))
self.assertEqual(response.data[0]['break_end_time'], tt.break_end_time.strftime(tf))
def test_filter_by_doctor(self):
spec = Speciality.objects.create(title='Test')
doc1 = Doctor.objects.create(speciality=spec, first_name='Doctor', last_name='One')
doc2 = Doctor.objects.create(speciality=spec, first_name='Doctor', last_name='Two')
Timetable.objects.create(doctor=doc1,
day_of_week=Timetable.DW_MON,
start_time=datetime.time(hour=8),
end_time=datetime.time(hour=17),
break_start_time=datetime.time(hour=12),
break_end_time=datetime.time(hour=13))
Timetable.objects.create(doctor=doc1,
day_of_week=Timetable.DW_TUE,
start_time=datetime.time(hour=9),
end_time=datetime.time(hour=18),
break_start_time=datetime.time(hour=12),
break_end_time=datetime.time(hour=13))
Timetable.objects.create(doctor=doc2,
day_of_week=Timetable.DW_MON,
start_time=datetime.time(hour=8),
end_time=datetime.time(hour=17),
break_start_time=datetime.time(hour=12),
break_end_time=datetime.time(hour=13))
response = self._get_response(doctor=doc1.id)
self.assertEqual(len(response.data), doc1.timetable_set.count())
def test_filter_by_day_of_week(self):
spec = Speciality.objects.create(title='Test')
doc1 = Doctor.objects.create(speciality=spec, first_name='Doctor', last_name='One')
doc2 = Doctor.objects.create(speciality=spec, first_name='Doctor', last_name='Two')
Timetable.objects.create(doctor=doc1,
day_of_week=Timetable.DW_MON,
start_time=datetime.time(hour=8),
end_time=datetime.time(hour=17),
break_start_time=datetime.time(hour=12),
break_end_time=datetime.time(hour=13))
Timetable.objects.create(doctor=doc1,
day_of_week=Timetable.DW_TUE,
start_time=datetime.time(hour=9),
end_time=datetime.time(hour=18),
break_start_time=datetime.time(hour=12),
break_end_time=datetime.time(hour=13))
Timetable.objects.create(doctor=doc2,
day_of_week=Timetable.DW_MON,
start_time=datetime.time(hour=8),
end_time=datetime.time(hour=17),
break_start_time=datetime.time(hour=12),
break_end_time=datetime.time(hour=13))
response = self._get_response(day_of_week=Timetable.DW_MON)
self.assertEqual(len(response.data), Timetable.objects.filter(day_of_week=Timetable.DW_MON).count())
def _get_response(self, **kwargs):
url = reverse('api:timetable-list')
return self.client.get(url, data=kwargs, format='json')
| 2.3125 | 2 |
jogos/migrations/0001_initial.py | rafacasa/zebra-manager | 0 | 12761288 | # Generated by Django 3.2.5 on 2021-07-23 19:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import localflavor.br.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('arbitragem', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Competicao',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=200, verbose_name='Nome da Competição')),
('data_inicio', models.DateField(verbose_name='Início da Competição')),
('data_final', models.DateField(verbose_name='Final da Competição')),
('esta_ativa', models.BooleanField(default=True, verbose_name='Está Ativa?')),
('slug_competicao', models.SlugField(blank=True, unique=True)),
],
options={
'verbose_name': 'Competição',
'verbose_name_plural': 'Competições',
},
),
migrations.CreateModel(
name='Estadio',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome_estadio', models.CharField(max_length=200, verbose_name='Nome do Estádio')),
('endereco_estadio', models.CharField(blank=True, max_length=200, verbose_name='Endereco do Estadio')),
('cidade_estadio', models.CharField(blank=True, max_length=200, verbose_name='Cidade')),
('estado_estadio', localflavor.br.models.BRStateField(blank=True, max_length=2, verbose_name='Estado')),
('cep_estadio', localflavor.br.models.BRPostalCodeField(blank=True, max_length=9, verbose_name='CEP')),
],
options={
'verbose_name': 'Estádio',
'verbose_name_plural': 'Estádios',
},
),
migrations.CreateModel(
name='Time',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=200, unique=True, verbose_name='Nome do Time')),
('esta_ativo', models.BooleanField(default=True, verbose_name='Está Ativo?')),
('slug_time', models.SlugField(blank=True, unique=True)),
],
options={
'verbose_name': 'Time',
'verbose_name_plural': 'Times',
},
),
migrations.CreateModel(
name='Partida',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data_hora', models.DateTimeField(verbose_name='Horário da partida')),
('placar_mandante', models.IntegerField(default=0, verbose_name='Placar do Time Mandante')),
('placar_visitante', models.IntegerField(default=0, verbose_name='Placar do Time Visitante')),
('link_video', models.URLField(blank=True, verbose_name='Vídeo da Partida')),
('teve_periodo_extra', models.BooleanField(default=False, verbose_name='Houve Período Extra?')),
('competicao', models.ForeignKey(limit_choices_to={'esta_ativa': True}, on_delete=django.db.models.deletion.PROTECT, to='jogos.competicao', verbose_name='Competição')),
('escala_arbitragem', models.ManyToManyField(through='arbitragem.Escala', to=settings.AUTH_USER_MODEL, verbose_name='Escala de Arbitragem')),
('estadio', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='jogos.estadio', verbose_name='Estádio')),
('mandante', models.ForeignKey(limit_choices_to={'esta_ativo': True}, on_delete=django.db.models.deletion.PROTECT, related_name='mandante', to='jogos.time', verbose_name='Time Mandante')),
('visitante', models.ForeignKey(limit_choices_to={'esta_ativo': True}, on_delete=django.db.models.deletion.PROTECT, related_name='visitante', to='jogos.time', verbose_name='Time Visitante')),
],
options={
'verbose_name': 'Partida',
'verbose_name_plural': 'Partidas',
},
),
]
| 1.835938 | 2 |
securitybot/utils/initSecrets.py | gyrospectre/securitybot | 3 | 12761289 | from securitybot import loader
PATH_ROOT = 'securitybot'
def main():
config = loader.load_yaml('config/bot.yaml')
secrets_provider = config['secretsmgmt']['provider']
secretsclient = loader.build_secrets_client(
secrets_provider=secrets_provider,
connection_config=config['secretsmgmt'][secrets_provider]
)
SECRETS = config['secretsmgmt']['secrets']
for client_type, clients in SECRETS.items():
client = config[client_type]['provider']
print('Chosen {} provider is {}.'.format(client_type, client))
if client in clients:
fullsecret = {}
for secret in clients[client]:
value = input(
"Enter value to store for {} secret {}: ".format(
client,
secret
)
)
fullsecret[secret] = value
secretsclient.create_secret(
name='{}/{}/{}'.format(PATH_ROOT, client_type, client),
value=fullsecret,
description='SecurityBot secrets for {} provider {}.'.format(
client_type, client
)
)
else:
print("No secrets found to store for client {}".format(client))
print('Finished.')
if __name__ == '__main__':
main()
| 2.59375 | 3 |
hpl2netCDF_client/config/__init__.py | achho/dl_toolbox | 5 | 12761290 | <gh_stars>1-10
import hpl2netCDF_client.hpl2netCDF_client
from hpl2netCDF_client.config import config | 0.988281 | 1 |
analysis/tests/test_analysis.py | snspam/sn_spam | 0 | 12761291 | """
Tests the analysis module.
"""
import unittest
import pandas as pd
import mock
from .context import analysis
from .context import evaluation
from .context import interpretability
from .context import label
from .context import purity
from .context import util
from .context import test_utils as tu
class AnalysisTestCase(unittest.TestCase):
def setUp(self):
config_obj = tu.sample_config()
mock_label_obj = mock.Mock(label.Label)
mock_purity_obj = mock.Mock(purity.Purity)
mock_evaluation_obj = mock.Mock(evaluation.Evaluation)
mock_interpretability_obj = mock.Mock(
interpretability.Interpretability)
util_obj = util.Util()
self.test_obj = analysis.Analysis(config_obj, mock_label_obj,
mock_purity_obj, mock_evaluation_obj,
mock_interpretability_obj, util_obj)
def tearDown(self):
self.test_obj = None
def test_init(self):
# setup
test_obj = self.test_obj
# assert
self.assertTrue(isinstance(test_obj.purity_obj,
purity.Purity))
self.assertTrue(isinstance(test_obj.evaluation_obj,
evaluation.Evaluation))
self.assertTrue(isinstance(test_obj.interpretability_obj,
interpretability.Interpretability))
def test_relabel(self):
self.test_obj.label_obj.relabel = mock.Mock()
self.test_obj.relabel()
self.test_obj.label_obj.relabel.assert_called()
def test_purity(self):
self.test_obj.purity_obj.test_relations = mock.Mock()
self.test_obj.test_purity('df')
self.test_obj.purity_obj.test_relations.assert_called_with('df')
def test_evaluate(self):
self.test_obj.config_obj.modified = True
self.test_obj.check_dataframe = mock.Mock(return_value='df2')
self.test_obj.evaluation_obj.evaluate = mock.Mock()
self.test_obj.evaluate('df')
self.test_obj.check_dataframe.assert_called_with('df')
self.test_obj.evaluation_obj.evaluate.assert_called_with('df2',
modified=True)
def test_explain(self):
self.test_obj.interpretability_obj.explain = mock.Mock()
self.test_obj.check_dataframe = mock.Mock(return_value='df2')
self.test_obj.explain('df')
self.test_obj.check_dataframe.assert_called_with('df')
self.test_obj.interpretability_obj.explain.assert_called_with('df2')
def test_check_dataframe(self):
result = self.test_obj.check_dataframe('df')
self.assertTrue(result == 'df')
def test_check_dataframe_none(self):
self.test_obj.define_file_folders = mock.Mock(return_value='folds/')
self.test_obj.read_fold = mock.Mock(return_value='df')
result = self.test_obj.check_dataframe(None)
self.test_obj.define_file_folders.assert_called()
self.test_obj.read_fold.assert_called_with('folds/')
self.assertTrue(result == 'df')
def test_define_file_folders(self):
result = self.test_obj.define_file_folders()
self.assertTrue(result == 'ind/data/soundcloud/folds/')
def test_read_fold(self):
self.test_obj.util_obj.check_file = mock.Mock(return_value=True)
pd.read_csv = mock.Mock(return_value='df')
result = self.test_obj.read_fold('f/')
self.test_obj.util_obj.check_file.assert_called_with('f/test_1.csv')
pd.read_csv.assert_called_with('f/test_1.csv', lineterminator='\n')
self.assertTrue(result == 'df')
def test_suite():
suite = unittest.TestLoader().loadTestsFromTestCase(AnalysisTestCase)
return suite
if __name__ == '__main__':
unittest.main()
| 2.75 | 3 |
cnn_pytroch/util.py | Izu97/Cat-vs-Dogs-Classifier-with-Pytroch-and-Tensorflow | 0 | 12761292 | <filename>cnn_pytroch/util.py
import os
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from sklearn.utils import shuffle
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from variables import*
def get_data():
if not os.path.exists(train_data) or not os.path.exists(test_data):
print("Data preprocessing and Saving !!!")
cat_dir = os.path.join(current_dir, cat_folder)
dog_dir = os.path.join(current_dir, dog_folder)
cat_image_paths = [os.path.join(cat_dir, image) for image in os.listdir(cat_dir) if os.path.splitext(image)[1] in ['.jpg','.png']]
dog_image_paths = [os.path.join(dog_dir, image) for image in os.listdir(dog_dir) if os.path.splitext(image)[1] in ['.jpg','.png']]
cat_data = read_image(cat_image_paths,dog_image_paths)
dog_data = read_image(cat_image_paths,dog_image_paths, cat=False)
data = np.concatenate((cat_data,dog_data))
np.random.shuffle(data)
Ntrain = int((len(cat_data) + len(dog_data)) * cutoff)
TrainData = data[:Ntrain]
TestData = data[Ntrain:]
np.save(train_data,TrainData)
np.save(test_data,TestData)
else:
print("Data Loading !!!")
TrainData = np.load(train_data, allow_pickle=True)
TestData = np.load(test_data, allow_pickle=True)
return TrainData, TestData
def read_image(cat_image_paths,dog_image_paths,cat=True):
if cat:
image_path = cat_image_paths
anim = "cat"
else:
image_path = dog_image_paths
anim = "dog"
data = []
for path in cat_image_paths:
try:
img=cv2.imread(path, cv2.IMREAD_GRAYSCALE)
img=cv2.resize(img, (input_size,input_size))
img = img / 255.0
data.append([np.array(img),encoder[anim]])
except Exception as e:
pass
return np.array(data)
| 2.59375 | 3 |
baghera_tool/cli.py | stracquadaniolab/baghera | 9 | 12761293 | <gh_stars>1-10
#!/usr/bin/env python
import logging
import argh
import sys
import os
import inspect
import datetime
from baghera_tool.logging import setup_console_logger
from baghera_tool import preprocess as pre
from baghera_tool import command as cmd
def main():
setup_console_logger()
"""
command dispatcher
"""
argh.dispatch_commands([pre.create_files,
pre.generate_snp_file,
cmd.gene_heritability,
cmd.gw_heritability,
])
if __name__ == "__main__":
"""
MAIN
"""
main()
| 1.625 | 2 |
vtelem/stream/__init__.py | vkottler/vtelem | 3 | 12761294 | """
vtelem - Some Queue wrappers and 'stream' utilities.
"""
# built-in
from queue import Empty, Queue
from typing import Any, Optional
QUEUE_TIMEOUT = 2
def queue_get(queue: Queue, timeout: int = QUEUE_TIMEOUT) -> Optional[Any]:
"""
Wrap a de-queue operation into one that will return None if the timeout
is met.
"""
try:
return queue.get(timeout=timeout)
except Empty:
return None
def queue_get_none(queue: Queue, timeout: int = QUEUE_TIMEOUT) -> None:
"""Attempt to get a literal 'None' from the queue."""
result = queue.get(timeout=timeout)
assert result is None
| 2.765625 | 3 |
zeeguu/core/test/model_test_mixin.py | mircealungu/Zeeguu-API-2 | 8 | 12761295 | # import warnings
# warnings.filterwarnings("ignore", category=DeprecationWarning)
import requests_mock
import zeeguu.core.model
from faker import Faker
from unittest import TestCase
from zeeguu.core.test.test_data.mocking_the_web import mock_requests_get
class ModelTestMixIn(TestCase):
db = zeeguu.core.db
def setUp(self):
self.faker = Faker()
self.db.create_all()
def tearDown(self):
super(ModelTestMixIn, self).tearDown()
self.faker = None
# sometimes the tearDown freezes on drop_all
# and it seems that it's because there's still
# a session open somewhere. Better call first:
self.db.session.close()
self.db.drop_all()
def run(self, result=None):
# For the unit tests we use several HTML documents
# that are stored locally so we don't have to download
# them for every test
# To do this we mock requests.get
with requests_mock.Mocker() as m:
mock_requests_get(m)
super(ModelTestMixIn, self).run(result)
| 2.359375 | 2 |
arekit/contrib/networks/context/configurations/bilstm.py | nicolay-r/AREk | 18 | 12761296 | import tensorflow as tf
from arekit.contrib.networks.context.configurations.rnn import RNNConfig
from arekit.contrib.networks.tf_helpers.cell_types import CellTypes
class BiLSTMConfig(RNNConfig):
__lstm_cell_initializer = tf.keras.initializers.glorot_normal()
def __init__(self):
super(BiLSTMConfig, self).__init__()
super(BiLSTMConfig, self).modify_hidden_size(128)
super(BiLSTMConfig, self).modify_l2_reg(0.001)
super(BiLSTMConfig, self).modify_dropout_rnn_keep_prob(0.8)
super(BiLSTMConfig, self).modify_cell_type(CellTypes.BasicLSTM)
super(BiLSTMConfig, self).modify_bias_initializer(tf.constant_initializer(0.1))
super(BiLSTMConfig, self).modify_weight_initializer(tf.contrib.layers.xavier_initializer())
# region properties
@property
def LSTMCellInitializer(self):
return self.__lstm_cell_initializer
# endregion
| 2.15625 | 2 |
externals/skia/third_party/externals/sfntly/cpp/tools/utils.py | terrajobst/linux-packaging-skiasharp | 2,151 | 12761297 | <gh_stars>1000+
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limit
"""Common utility functions used by multiple scripts."""
import os
def GetFontList(path, exts, negate=False):
"""Recursively gets the list of files that from path such that."""
# negate = False: files that match one of the extensions in exts.
# negate = True: files that match no extension in exts.
paths = []
# for root, dirs, files in os.walk(path): makes the lint tool unhappy
# because of dirs being unused :(
for entry in os.walk(path):
root = entry[0]
files = entry[2]
for path in files:
has_ext_list = map(lambda ext: path[-len(ext):] == ext, exts)
result = reduce(lambda a, h: a or h, has_ext_list, False)
# normal: we want to include a file that matches at least one extension
# negated: we want to include a file that matches no extension
if negate != result:
paths.append(os.path.join(root, path))
return paths
def GetLevelList(path, max_level=1, negate=False):
"""Recursively gets the list of files that from path such that."""
# negate = False: files that are at most |max_level|s deep.
# negate = True: files that are more than |max_level|s deep.
paths = []
for entry in os.walk(path):
root = entry[0]
files = entry[2]
for path in files:
root_path = os.path.join(root, path)
level = path.count(os.path.sep)
if (not negate and level <= max_level) or (negate and level > max_level):
paths.append(root_path)
return paths
def FixPath(path):
if path[-1] != '/':
return path + '/'
return path
| 2.828125 | 3 |
codes/construct_d2q.py | jacky18008/TREC-CAsT-ASCFDA | 0 | 12761298 | <gh_stars>0
import torch
from transformers import T5Tokenizer, T5ForConditionalGeneration
import argparse
from tqdm import tqdm
parser = argparse.ArgumentParser(description='Generate potential queries with t5.')
parser.add_argument('--collections', help='the path to the collections')
parser.add_argument('--output', help='the path to the rewritten collections')
args = parser.parse_args()
# doc2-t5-query
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
tokenizer = T5Tokenizer.from_pretrained('castorini/doc2query-t5-base-msmarco')
model = T5ForConditionalGeneration.from_pretrained('castorini/doc2query-t5-base-msmarco')
model.to(device)
delimeters = [",", ".", "?", "!"]
with open(args.collections, "r") as fp_in:
with open(args.output, "w") as fp_out:
for line in tqdm(fp_in):
line = line.strip("\n")
p_id, text = line.split("\t")
# split text into sentences
for d in delimeters:
text.replace(d, ",")
sentences = text.split(",")
for sentence in sentences:
input_ids = tokenizer.encode(sentence, return_tensors='pt').to(device)
output = model.generate(
input_ids=input_ids,
max_length=64,
do_sample=True,
top_k=10,
num_return_sequences=1)[0]
potential_query = tokenizer.decode(output, skip_special_tokens=True)
fp_out.write(f"{p_id}\t{sentence}\t{potential_query}\n")
| 2.125 | 2 |
py_eye/script/stefy_script.py | robbisg/py_eye | 0 | 12761299 | #######################################################
# Copyright (c) 2013 <NAME>
#
# See the file license.txt for copying permission.
########################################################
#############################################################################################################
#############################################################################################################
#### Start from python console ####################
#### execfile('/home/robbis/development/eclipse/PyMVPA/src/stefy_script.py') #####
#############################################################################################################
#############################################################################################################
import os
import stefy
pathFile = '/home/robbis/development/eyeAnalysis/subj/'
listafile = os.listdir(pathFile);
listafile.sort()
listafile = [elem for elem in listafile if elem != 'Sub15.txt' and elem != 'Sub17.txt']
#datapath = '/home/robbis/development/eyeAnalysis/subj/Sub1.txt'
attpath = '/home/robbis/development/eyeAnalysis/stefy_attr.txt'
for elem in listafile:
data = pathFile;
print '-------------Analyzing '+elem+' ----------------'
result = stefy.analyzeFile(data, attpath, elem)
print result
| 1.84375 | 2 |
tests/pipert/core/test_routine.py | Elon-Abulafia/PipeRT | 0 | 12761300 | <gh_stars>0
import pytest
import time
from torch.multiprocessing import Event
from pipert.core.routine import Routine, Events, State
from pipert.core.errors import NoRunnerException
from tests.pipert.core.utils.dummy_routine import DummyRoutine
class DummySleepRoutine(Routine):
@staticmethod
def get_constructor_parameters():
pass
def does_routine_use_queue(self, queue):
pass
def __init__(self, sleep_time, name=""):
super().__init__(name)
self.stop_event = Event()
self.sleep_time = sleep_time
def main_logic(self, *args, **kwargs):
time.sleep(self.sleep_time)
return True
def setup(self, *args, **kwargs):
pass
def cleanup(self, *args, **kwargs):
pass
def dummy_before_handler(routine):
with pytest.raises(AttributeError):
_ = routine.state.dummy
routine.state.dummy = 666
routine.stop_event.set()
def dummy_after_handler(routine):
assert routine.state.dummy == 666
routine.state.dummy += 1
def dummy_before_stop_handler(routine):
routine.stop_event.set()
def test_routine_as_thread():
r = DummyRoutine()
e = Event()
r.stop_event = e
r.as_thread()
r.start()
e.set()
r.runner.join()
def test_routine_as_process():
r = DummyRoutine()
e = Event()
r.stop_event = e
r.as_process()
r.start()
e.set()
r.runner.join()
def test_routine_no_runner():
r = DummyRoutine(name="dummy")
with pytest.raises(NoRunnerException):
r.start()
e = Event()
r.stop_event = e
r.as_thread()
try:
r.start()
except NoRunnerException:
pytest.fail("NoRunnerException was thrown...")
e.set()
r.runner.join()
def test_add_event_handler():
r = DummyRoutine()
e = Event()
r.stop_event = e
r.as_thread()
r.add_event_handler(Events.BEFORE_LOGIC, dummy_before_handler)
r.add_event_handler(Events.AFTER_LOGIC, dummy_after_handler)
@r.on(Events.AFTER_LOGIC)
def dummy_handler(_):
pass
r.start()
r.runner.join()
assert r.state.dummy == 667
def test_has_event_handler():
r = DummyRoutine()
assert not r.has_event_handler(dummy_before_handler, Events.BEFORE_LOGIC)
assert not r.has_event_handler(dummy_before_handler)
r.add_event_handler(Events.BEFORE_LOGIC, dummy_before_handler)
assert r.has_event_handler(dummy_before_handler, Events.BEFORE_LOGIC)
assert r.has_event_handler(dummy_before_handler)
with pytest.raises(ValueError):
r.add_event_handler("wrong_event", dummy_before_handler)
def test_remove_event_handler():
r = DummyRoutine()
with pytest.raises(ValueError):
r.remove_event_handler(dummy_before_handler, Events.BEFORE_LOGIC)
r.add_event_handler(Events.BEFORE_LOGIC, dummy_before_handler)
assert r.has_event_handler(dummy_before_handler)
r.remove_event_handler(dummy_before_handler, Events.BEFORE_LOGIC)
assert not r.has_event_handler(dummy_before_handler)
with pytest.raises(ValueError):
r.remove_event_handler(dummy_before_handler, Events.BEFORE_LOGIC)
def test_pacer_faster_pace():
fast_routine = DummySleepRoutine(1 / 60)
fast_routine.pace(1)
fast_routine.add_event_handler(Events.AFTER_LOGIC,
dummy_before_stop_handler,
first=True)
fast_routine.as_thread()
start_time = time.time()
fast_routine.start()
fast_routine.runner.join()
elapsed_time = time.time()
assert round(elapsed_time - start_time, 1) == round(1, 1)
def test_pacer_slower_pace():
slow_routine = DummySleepRoutine(1 / 1)
slow_routine.pace(2)
slow_routine.add_event_handler(Events.AFTER_LOGIC,
dummy_before_stop_handler,
first=True)
slow_routine.as_thread()
start_time = time.time()
slow_routine.start()
slow_routine.runner.join()
elapsed_time = time.time()
assert round(elapsed_time - start_time, 1) == round(1 / 1, 1)
| 2.03125 | 2 |
hata/discord/player.py | Zeref-Draganeel/hata | 0 | 12761301 | <reponame>Zeref-Draganeel/hata
# -*- coding: utf-8 -*-
__all__ = ('AudioSource', 'DownloadError', 'LocalAudio', 'YTAudio')
import os, sys, subprocess, shlex
from time import perf_counter
from audioop import mul as audio_mul
from pathlib import Path
from ..backend.utils import alchemy_incendiary
from ..backend.futures import Task, Event, sleep, CancelledError
from .core import KOKORO
from .opus import FRAME_LENGTH, FRAME_SIZE, SAMPLES_PER_FRAME
PLAYER_DELAY = FRAME_LENGTH/1000.0
del FRAME_LENGTH
DEFAULT_EXECUTABLE = 'ffmpeg'
if os.name == 'nt':
SUBPROCESS_STARTUP_INFO = subprocess.STARTUPINFO()
SUBPROCESS_STARTUP_INFO.dwFlags |= subprocess.STARTF_USESHOWWINDOW
SUBPROCESS_STARTUP_INFO.wShowWindow = subprocess.SW_HIDE
else:
SUBPROCESS_STARTUP_INFO = None
STREAM_OPTIONS = (
'-reconnect', '1',
# '-reconnect_streamed', '1',
# '-reconnect_delay_max', '3',
)
class AudioSource:
"""
Base class for audio sources.
Class Attributes
----------------
NEEDS_ENCODE : `bool` = `True`
Whether the source is not opus encoded.
REPEATABLE : `bool` = `False`
Whether the source can be repeated after it is exhausted once.
"""
__slots__ = ()
NEEDS_ENCODE = True
REPEATABLE = False
async def read(self):
"""
Reads 20ms audio data.
Indicates end of stream by returning `None`.
Subclasses should implement it.
This method is a coroutine.
Returns
-------
audio_data : `bytes` or `None`
"""
return None
async def cleanup(self):
"""
Cleans up the allocated resources by the audio source.
Subclasses should overwrite it.
This method is a coroutine.
"""
pass
def __del__(self):
"""Cleans up the audio source if ``.cleanup`` was not called for any reason."""
Task(self.cleanup(), KOKORO)
@property
def title(self):
"""
Placeholder method for title attribute.
Always returns an empty string.
Returns
-------
title : `str`
"""
return ''
@property
def path(self):
"""
Placeholder method for path attribute.
Always returns `None`.
Returns
-------
path : `None`
"""
return None
async def postprocess(self):
"""
Called before the audio of the source would be played.
This method is a coroutine.
"""
pass
class LocalAudio(AudioSource):
"""
Represents a ffmpeg pcm audio.
You must have the ffmpeg or avconv executable in your path environment variable in order for this to work.
Attributes
----------
_process_args : `tuple` ((`list` of `str`), (`None` or `file-like`))
Parameters and the stdin used to open the postprocess when postprocess happens.
_stdout : `_io.BufferedReader`
Stdout of `.process`.
path : `str` or `None`
The audio source's path if applicable. Defaults to `None`.
process : `subprocess.Popen`
The ffmpeg or the avconv subprocess.
title : `str`
The audio source's title if applicable. Defaults to empty string.
Class Attributes
----------------
NEEDS_ENCODE : `bool` = `True`
Whether the source is not opus encoded.
REPEATABLE : `bool` = `True`
Whether the source can be repeated after it is exhausted once.
"""
REPEATABLE = True
@staticmethod
def _create_process_preprocess(source, executable, pipe, before_options, options):
"""
Creates a subprocess's args to open.
Parameters
----------
source : `str`, `Path` or `file-like`
The source audio file's path or `file-like` if `pipe` is `True`.
executable : `str`
The executable's name to use. Defaults to `'ffmpeg'`.
pipe : `bool`
Whether the source is passed to stdin.
before_options : `str` or (`iterable` of `str`)
Extra parameters passed before the `-i` flag.
options : `str` or (`iterable` of `str`)
Extra parameters passed after the `-i` flag.
Returns
-------
executable : `str`
The executable's name.
args : `list` of `str`
Subprocess parameters.
stdin : `None or `file-like`
Input for the postprocess.
Raises
------
TypeError
- If `pipe` was given as `True` meanwhile `source` was not given as a `file-like` supporting `.fileno()`
method.
- If `pipe` was given as `False`, meanwhile `source` was not given as `str` or `Path` instance.
ValueError
- Executable as not found.
- Popen failed.
"""
if pipe:
try:
fileno_function = source.__class__.fileno
except AttributeError as err:
raise TypeError('The given `source` not supports `.fileno()` method') from err
try:
fileno_function(source)
except TypeError as err:
raise TypeError('The given `source` not supports `.fileno()` method') from err
else:
source_type = source.__class__
if source_type is str:
pass
elif issubclass(source_type, Path):
source = str(source)
elif issubclass(source_type, str):
source = str(source)
else:
raise TypeError('The given `source` should be given as `str` or as `Path` instance, got '
f'{source_type}.')
args = []
if (before_options is not None):
if isinstance(before_options, str):
before_options = shlex.split(before_options)
args.extend(before_options)
args.append('-i')
args.append('-' if pipe else source)
args.append('-f')
args.append('s16le')
args.append('-ar')
args.append('48000')
args.append('-ac')
args.append('2')
args.append('-loglevel')
args.append('panic')
if (options is not None):
if isinstance(options, str):
options = shlex.split(options)
args.extend(options)
args.append('pipe:1')
return executable, args, (source if pipe else None)
async def postprocess(self):
"""
Creates the process of the audio player.
This method is a coroutine.
Raises
------
ValueError
- Executable as not found.
- Popen failed.
"""
process = self.process
if process is None:
executable, args, stdin = self._process_args
try:
process = await KOKORO.subprocess_exec(executable, *args, stdin=stdin, stdout=subprocess.PIPE,
startup_info=SUBPROCESS_STARTUP_INFO)
except FileNotFoundError:
raise ValueError(f'{executable!r} was not found.') from None
except subprocess.SubprocessError as err:
raise ValueError(f'Opening subprocess failed: {err.__class__.__name__}: {err}') from err
self.process = process
self._stdout = process.stdout
__slots__ = ('_process_args', '_stdout', 'path', 'process', 'title', )
# use __new__, so __del__ wont run
async def __new__(cls, source, executable=DEFAULT_EXECUTABLE, pipe=False, before_options=None,
options=None, title=None):
"""
Creates a new ``LocalAudio`` instance.
This method is a coroutine.
Parameters
----------
source : `str` or `file-like`
The source audio file's path or `file-like` if `pipe` is `True`.
executable : `str`, Optional
The executable's name to use. Defaults to `'ffmpeg'`.
pipe : `bool`, Optional
Whether the source is passed to stdin. Defaults to `False`
before_options : `str` or (`iterable` of `str`), Optional
Extra parameters passed before the `-i` flag.
options : `str` or (`iterable` of `str`), Optional
Extra parameters passed after the `-i` flag.
Returns
-------
self : ``LocalAudio``
Raises
------
TypeError
- If `pipe` was given as `True` meanwhile `source` was not given as a `file-like` supporting `.fileno()`
method.
- If `pipe` was given as `False`, meanwhile `source` was not given as `str` or `Path` instance.
"""
args = cls._create_process_preprocess(source, executable, pipe, before_options, options)
self = object.__new__(cls)
self._process_args = args
self.process = None
self._stdout = None
if pipe:
path = None
if title is None:
title = getattr(source, 'name', None)
if title is None:
title = ''
else:
title = os.path.splitext(title)[0].replace('_', ' ')
else:
path = source
if title is None:
title = os.path.splitext(os.path.basename(path))[0].replace('_', ' ')
self.path = path
self.title = title
return self
async def read(self):
"""
Reads 20ms audio data.
Indicates end of stream by returning zero `None`.
This method is a coroutine.
Returns
-------
audio_data : `bytes` or `None`
"""
stdout = self._stdout
if stdout is None:
result = None
else:
try:
result = await stdout.read(FRAME_SIZE)
except (CancelledError, ConnectionError):
result = None
else:
if len(result) != FRAME_SIZE:
result = None
return result
async def cleanup(self):
"""
Closes ``.process``.
This method is a coroutine.
"""
process = self.process
if process is None:
return
await process.kill()
if process.poll() is None:
await process.communicate()
self._stdout = None
self.process = None
try:
import youtube_dl
except ImportError:
youtube_dl = None
DownloadError = None
YTAudio = None
else:
from youtube_dl.utils import DownloadError
youtube_dl.utils.bug_reports_message = lambda: ''
YTdl = youtube_dl.YoutubeDL({
'format' : 'bestaudio/best',
'outtmpl' : '%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames' : True,
'noplaylist' : True,
'nocheckcertificate': True,
'ignoreerrors' : False,
'logtostderr' : False,
'quiet' : True,
'no_warnings' : True,
'default_search' : 'auto',
'source_address' : '0.0.0.0',
})
class YTAudio(LocalAudio):
"""
Represents an audio sourced downloaded from youtube.
You must have the ffmpeg or avconv executable in your path environment variable in order for this to work.
Attributes
----------
_process_args : `tuple` ((`list` of `str`), (`None` or `file-like`))
Parameters and the stdin used to open the postprocess when postprocess happens.
_stdout : `_io.BufferedReader`
Stdout of `.process`.
path : `str` or `None`
The audio source's path if applicable. Defaults to `None`.
process : `subprocess.Popen`
The ffmpeg or the avconv subprocess.
title : `str`
The audio source's title if applicable. Defaults to empty string.
url : `str`
The source url of the downloaded audio.
Class Attributes
----------------
NEEDS_ENCODE : `bool` = `True`
Whether the source is not opus encoded.
REPEATABLE : `bool` = `True`
Whether the source can be repeated after it is exhausted once.
"""
__slots__ = ('url', )
@staticmethod
def _preprocess(cls, url, stream):
"""
Downloads the audio source by the given url or title.
This function runs inside of an executor thread.
Parameters
----------
url : `str`
Returns
-------
path : `str`
The title of the downloaded audio.
data : `dict` of (`str`, `Any`)
All extracted data by YTDL.
args : `list` of `str`
Subprocess parameters.
Raises
------
DownloadError
Downloading the audio source failed.
"""
data = YTdl.extract_info(url, download=(not stream))
if 'entries' in data: #playlist
data = data['entries'][0]
if stream:
path = data['url']
before_options = STREAM_OPTIONS
else:
path = YTdl.prepare_filename(data)
before_options = None
args = cls._create_process_preprocess(path, DEFAULT_EXECUTABLE, False, before_options, ('-vn',))
return path, data, args
async def __new__(cls, url, stream=True):
"""
Creates a new ``YTAudio`` instance.
This method is a coroutine.
Parameters
----------
url : `str`
The url or the title of the video.
stream : `bool`
Whether the audio should be streamed.
Returns
-------
self : ``YTAudio``
Raises
------
DownloadError
Downloading the audio source failed.
PermissionError
The given file started to be played at the same time by an other player as well.
TypeError
- If `pipe` was given as `True` meanwhile `source` was not given as a `file-like` supporting `.fileno()`
method.
- If `pipe` was given as `False`, meanwhile `source` was not given as `str` or `Path` instance.
"""
path, data, args = await KOKORO.run_in_executor(alchemy_incendiary(cls._preprocess,(cls, url, stream)))
# Create self only at the end, so the `__del__` wont pick it up
self = object.__new__(cls)
self._process_args = args
self.process = None
self._stdout = None
self.path = path
self.title = data.get('title', None)
self.url = data.get('url', None)
return self
class AudioPlayer:
"""
Sends voice data through the voice client's socket.
Attributes
----------
client : ``VoiceClient``
The voice client of audio player.
done : `bool`
Whether the audio player finished playing it's source.
resumed_waiter : `threading.Event`
Indicates whether the the audio player is not paused.
source : ``AudioSource`` instance
The audio source what the player reads each 20 ms.
should_update : `bool`
Whether the voice client should update itself.
task : `None` or ``Task``
Audio reader task. Set as `None` if the reader is stopped.
"""
__slots__ = ('client', 'done', 'resumed_waiter', 'should_update', 'source', 'task')
def __init__(self, voice_client, source):
"""
Creates an starts the audio player.
Parameters
----------
voice_client : ``VoiceClient``
The voice client of audio player.
source : ``AudioSource`` instance
The audio source what the player reads each 20 ms.
"""
self.source = source
self.client = voice_client
resumed_waiter = Event(KOKORO)
resumed_waiter.set() # we are not paused
self.resumed_waiter = resumed_waiter
self.should_update = True
self.done = False
self.task = Task(self.run(), KOKORO)
async def run(self):
"""
The main runner of ``AudioPlayer``. Is instantly started inside of a ``Task`` as the player is created.
This method is a coroutine.
"""
voice_client = self.client
start = perf_counter()
loops = 0
source = None
try:
while True:
if self.should_update:
source = await self.update(source)
if source is None:
break
start = perf_counter()
loops = 0
continue
# are we disconnected from voice?
if not voice_client.connected.is_set():
await voice_client.connected
start = perf_counter()
loops = 0
continue
loops += 1
data = await source.read()
if data is None:
self.source = None
await source.cleanup()
self.pause()
async with voice_client.lock:
await voice_client.call_after(voice_client, source)
source = None
self.should_update = True # safety first
continue
sequence = voice_client._sequence
if sequence == 65535:
sequence = 0
else:
sequence += 1
voice_client._sequence = sequence
if source.NEEDS_ENCODE:
pref_volume = voice_client._pref_volume
if (pref_volume != 1.0):
data = audio_mul(data, 2, pref_volume)
data = voice_client._encoder.encode(data)
header = b''.join([
b'\x80x',
voice_client._sequence.to_bytes(2, 'big'),
voice_client._timestamp.to_bytes(4, 'big'),
voice_client._audio_source.to_bytes(4, 'big'),
])
nonce = header+b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
packet = bytearray(header)+voice_client._secret_box.encrypt(bytes(data), nonce).ciphertext
voice_client.send_packet(packet)
timestamp = voice_client._timestamp+SAMPLES_PER_FRAME
if timestamp > 4294967295:
timestamp = 0
voice_client._timestamp = timestamp
delay = PLAYER_DELAY+((start+PLAYER_DELAY*loops)-perf_counter())
if delay < 0.0:
continue
await sleep(delay, KOKORO)
except BaseException as err:
if voice_client.player is self:
voice_client.player = None
self.done = True
self.source = None
if (source is not None):
await source.cleanup()
source = None
if isinstance(err, CancelledError):
return
await KOKORO.render_exc_async(err, before=[
'Exception occurred at \n',
repr(self),
'\n',
])
else:
if voice_client.player is self:
voice_client.player = None
finally:
self.task = None
# Force resume if applicable.
if voice_client.player is None:
queue = voice_client.queue
if queue:
voice_client.player = type(self)(voice_client, queue.pop(0))
async def update(self, actual_source):
"""
Updates the player if ``.should_update`` is set as `True`.
Waits for it's resumed waiter to be set if paused. If the voice player's source is updated, then initializes it
as well and closes the old one too.
This method is a coroutine.
Parameters
----------
actual_source : `None` or ``AudioSource`` instance
The actual audio source of the player.
Returns
-------
new_source : `None` or `AudioSource`` instance
New source of the player to play.
Can be same as the `actual_source`.
"""
resumed_waiter = self.resumed_waiter
if not resumed_waiter.is_set():
await resumed_waiter
self.should_update = False
new_source = self.source
if (new_source is None):
if (self.client.player is not self):
self.done = True
return None
if (actual_source is not None):
await actual_source.cleanup()
return None
if (new_source is actual_source):
return actual_source
if (actual_source is not None):
await actual_source.cleanup()
await new_source.postprocess()
return new_source
def pause(self):
"""
Pauses the player.
"""
self.resumed_waiter.clear()
self.should_update = True
def resume(self):
"""
Resumes the player if paused.
"""
resumed_waiter = self.resumed_waiter
if not resumed_waiter.is_set():
resumed_waiter.set()
self.should_update = True
def stop(self):
"""
Stops the player if running.
"""
if self.done:
return
self.done = True
task = self.task
if (task is not None):
task.cancel()
self.should_update = True
self.resumed_waiter.set()
def set_source(self, source):
"""
Sets they player source.
Parameters
----------
source : `None` or ``AudioSource`` instance
The new source of the player.
"""
self.source = source
self.should_update = True
resumed_waiter = self.resumed_waiter
if not resumed_waiter.is_set():
resumed_waiter.set()
| 2.0625 | 2 |
players/expectimax_weighted_probabilities_with_filter_player.py | nazaruka/Catan-AI | 9 | 12761302 | <reponame>nazaruka/Catan-AI
from players.expectimax_weighted_probabilities_player import ExpectimaxWeightedProbabilitiesPlayer
from players.filters import create_bad_robber_placement_and_monte_carlo_filter
class ExpectimaxWeightedProbabilitiesWithFilterPlayer(ExpectimaxWeightedProbabilitiesPlayer):
def __init__(self, seed=None, timeout_seconds=5, branching_factor=387):
super().__init__(seed=seed,
timeout_seconds=timeout_seconds,
filter_moves=create_bad_robber_placement_and_monte_carlo_filter(seed, self, branching_factor))
| 1.992188 | 2 |
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/config/urls.py | ghn/django-template | 0 | 12761303 | import django.views.static
from django.conf import settings
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.urls import include, path
from django.views.generic import TemplateView
admin.autodiscover()
urlpatterns = [
path('', TemplateView.as_view(template_name='base.html')),
path('admin/', admin.site.urls),
{%- if cookiecutter.use_djangocms == 'y' %}
path('', include('cms.urls')),
{%- endif %}
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
path('media/<path:path>/', django.views.static.serve, {
'document_root': settings.MEDIA_ROOT, 'show_indexes': True
}),
path('__debug__/', include(debug_toolbar.urls)),
] + staticfiles_urlpatterns() + urlpatterns
| 1.867188 | 2 |
2018/day12.py | JonSn0w/advent-of-code | 1 | 12761304 | <filename>2018/day12.py
from sys import stdin
from collections import defaultdict
n = 102
state = ([0]*5)+[int(i == '#') for i in stdin.readline().split(' ')[-1]]+([0]*n*2)
input()
pattern = defaultdict(lambda: False)
for i in stdin.readlines():
p = i.strip().split(' => ')
gen = [int(j =='#') for j in p[0]]
pattern[''.join(map(str, gen))] = (p[1] == '#')
# print(f'0: {"".join(["#" if i else "." for i in state])}')
for i in range(1, n+1):
temp = []+state
for j in range(2, len(state)):
temp[j] = int(pattern[''.join(map(str, state[j-2:j+3]))])
state = temp
# print(f'{i}: {"".join(["#" if i else "." for i in state])})
# part 1
# print(sum(i-5 for i in range(len(state)) if state[i]))
# part 2
total = sum(i-5 for i in range(len(state)) if state[i])
print(((50000000000-102)*46)+total) | 2.65625 | 3 |
test/test_quic.py | artem-belov/vpp | 0 | 12761305 | <reponame>artem-belov/vpp
#!/usr/bin/env python
""" Vpp QUIC tests """
import unittest
import os
import subprocess
import signal
from framework import VppTestCase, VppTestRunner, running_extended_tests, \
Worker
from vpp_ip_route import VppIpTable, VppIpRoute, VppRoutePath
class QUICAppWorker(Worker):
""" QUIC Test Application Worker """
def __init__(self, build_dir, appname, args, logger, env={}):
app = "%s/vpp/bin/%s" % (build_dir, appname)
self.args = [app] + args
super(QUICAppWorker, self).__init__(self.args, logger, env)
class QUICTestCase(VppTestCase):
""" QUIC Test Case """
@classmethod
def setUpClass(cls):
super(QUICTestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(QUICTestCase, cls).tearDownClass()
def setUp(self):
var = "VPP_BUILD_DIR"
self.build_dir = os.getenv(var, None)
if self.build_dir is None:
raise Exception("Environment variable `%s' not set" % var)
self.vppDebug = 'vpp_debug' in self.build_dir
self.timeout = 20
self.pre_test_sleep = 0.3
self.post_test_sleep = 0.3
self.vapi.session_enable_disable(is_enabled=1)
def tearDown(self):
self.vapi.session_enable_disable(is_enabled=0)
def thru_host_stack_ipv4_setup(self):
super(QUICTestCase, self).setUp()
self.create_loopback_interfaces(2)
self.uri = "quic://%s/1234" % self.loop0.local_ip4
common_args = ["uri", self.uri, "fifo-size", "64"]
self.server_echo_test_args = common_args + ["appns", "server"]
self.client_echo_test_args = common_args + ["appns", "client",
"test-bytes"]
table_id = 1
for i in self.lo_interfaces:
i.admin_up()
if table_id != 0:
tbl = VppIpTable(self, table_id)
tbl.add_vpp_config()
i.set_table_ip4(table_id)
i.config_ip4()
table_id += 1
# Configure namespaces
self.vapi.app_namespace_add_del(namespace_id=b"server",
sw_if_index=self.loop0.sw_if_index)
self.vapi.app_namespace_add_del(namespace_id=b"client",
sw_if_index=self.loop1.sw_if_index)
# Add inter-table routes
self.ip_t01 = VppIpRoute(self, self.loop1.local_ip4, 32,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_table_id=2)], table_id=1)
self.ip_t10 = VppIpRoute(self, self.loop0.local_ip4, 32,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_table_id=1)], table_id=2)
self.ip_t01.add_vpp_config()
self.ip_t10.add_vpp_config()
self.logger.debug(self.vapi.cli("show ip fib"))
def thru_host_stack_ipv4_tear_down(self):
# Delete inter-table routes
self.ip_t01.remove_vpp_config()
self.ip_t10.remove_vpp_config()
for i in self.lo_interfaces:
i.unconfig_ip4()
i.set_table_ip4(0)
i.admin_down()
def start_internal_echo_server(self, args):
error = self.vapi.cli("test echo server %s" % ' '.join(args))
if error:
self.logger.critical(error)
self.assertNotIn("failed", error)
def start_internal_echo_client(self, args):
error = self.vapi.cli("test echo client %s" % ' '.join(args))
if error:
self.logger.critical(error)
self.assertNotIn("failed", error)
def internal_ipv4_transfer_test(self, server_args, client_args):
self.start_internal_echo_server(server_args)
self.start_internal_echo_client(client_args)
def start_external_echo_server(self, args):
self.worker_server = QUICAppWorker(self.build_dir, "quic_echo",
args, self.logger)
self.worker_server.start()
def start_external_echo_client(self, args):
self.client_echo_test_args += "use-svm-api"
self.worker_client = QUICAppWorker(self.build_dir, "quic_echo",
args, self.logger)
self.worker_client.start()
self.worker_client.join(self.timeout)
try:
self.validateExternalTestResults()
except Exception as error:
self.fail("Failed with %s" % error)
def external_ipv4_transfer_test(self, server_args, client_args):
self.start_external_echo_server(server_args)
self.sleep(self.pre_test_sleep)
self.start_external_echo_client(client_args)
self.sleep(self.post_test_sleep)
def validateExternalTestResults(self):
if os.path.isdir('/proc/{}'.format(self.worker_server.process.pid)):
self.logger.info("Killing server worker process (pid %d)" %
self.worker_server.process.pid)
os.killpg(os.getpgid(self.worker_server.process.pid),
signal.SIGTERM)
self.worker_server.join()
self.logger.info("Client worker result is `%s'" %
self.worker_client.result)
error = False
if self.worker_client.result is None:
try:
error = True
self.logger.error(
"Timeout: %ss! Killing client worker process (pid %d)" %
(self.timeout, self.worker_client.process.pid))
os.killpg(os.getpgid(self.worker_client.process.pid),
signal.SIGKILL)
self.worker_client.join()
except OSError:
self.logger.debug(
"Couldn't kill client worker process")
raise
if error:
raise Exception(
"Timeout! Client worker did not finish in %ss" % timeout)
self.assert_equal(self.worker_client.result, 0,
"Binary test return code")
class QUICInternalEchoIPv4TestCase(QUICTestCase):
""" QUIC Internal Echo IPv4 Transfer Test Cases """
@classmethod
def setUpClass(cls):
super(QUICInternalEchoIPv4TestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(QUICInternalEchoIPv4TestCase, cls).tearDownClass()
def setUp(self):
super(QUICInternalEchoIPv4TestCase, self).setUp()
self.thru_host_stack_ipv4_setup()
def tearDown(self):
super(QUICInternalEchoIPv4TestCase, self).tearDown()
self.thru_host_stack_ipv4_tear_down()
def show_commands_at_teardown(self):
self.logger.debug(self.vapi.cli("show session verbose 2"))
@unittest.skipUnless(running_extended_tests, "part of extended tests")
def test_quic_internal_transfer(self):
""" QUIC internal echo client/server transfer """
self.internal_ipv4_transfer_test(self.server_echo_test_args,
self.client_echo_test_args +
["no-output", "mbytes", "10"])
class QUICInternalSerialEchoIPv4TestCase(QUICTestCase):
""" QUIC Internal Serial Echo IPv4 Transfer Test Cases """
@classmethod
def setUpClass(cls):
super(QUICInternalSerialEchoIPv4TestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(QUICInternalSerialEchoIPv4TestCase, cls).tearDownClass()
def setUp(self):
super(QUICInternalSerialEchoIPv4TestCase, self).setUp()
self.thru_host_stack_ipv4_setup()
def tearDown(self):
super(QUICInternalSerialEchoIPv4TestCase, self).tearDown()
self.thru_host_stack_ipv4_tear_down()
def show_commands_at_teardown(self):
self.logger.debug(self.vapi.cli("show session verbose 2"))
@unittest.skipUnless(running_extended_tests, "part of extended tests")
def test_quic_serial_internal_transfer(self):
""" QUIC serial internal echo client/server transfer """
client_args = (self.client_echo_test_args +
["no-output", "mbytes", "10"])
self.internal_ipv4_transfer_test(self.server_echo_test_args,
client_args)
self.start_internal_echo_client(client_args)
self.start_internal_echo_client(client_args)
self.start_internal_echo_client(client_args)
self.start_internal_echo_client(client_args)
class QUICInternalEchoIPv4MultiStreamTestCase(QUICTestCase):
""" QUIC Internal Echo IPv4 Transfer Test Cases """
@classmethod
def setUpClass(cls):
super(QUICInternalEchoIPv4MultiStreamTestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(QUICInternalEchoIPv4MultiStreamTestCase, cls).tearDownClass()
def setUp(self):
super(QUICInternalEchoIPv4MultiStreamTestCase, self).setUp()
self.thru_host_stack_ipv4_setup()
def tearDown(self):
super(QUICInternalEchoIPv4MultiStreamTestCase, self).tearDown()
self.thru_host_stack_ipv4_tear_down()
def show_commands_at_teardown(self):
self.logger.debug(self.vapi.cli("show session verbose 2"))
@unittest.skipUnless(running_extended_tests, "part of extended tests")
def test_quic_internal_multistream_transfer(self):
""" QUIC internal echo client/server multi-stream transfer """
self.internal_ipv4_transfer_test(self.server_echo_test_args,
self.client_echo_test_args +
["quic-streams", "10",
"mbytes", "1",
"no-output"])
class QUICExternalEchoIPv4TestCase(QUICTestCase):
""" QUIC External Echo IPv4 Transfer Test Cases """
@classmethod
def setUpClass(cls):
super(QUICExternalEchoIPv4TestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(QUICExternalEchoIPv4TestCase, cls).tearDownClass()
def setUp(self):
super(QUICExternalEchoIPv4TestCase, self).setUp()
self.thru_host_stack_ipv4_setup()
def tearDown(self):
super(QUICExternalEchoIPv4TestCase, self).tearDown()
self.thru_host_stack_ipv4_tear_down()
def show_commands_at_teardown(self):
self.logger.debug(self.vapi.cli("show session verbose 2"))
@unittest.skipUnless(running_extended_tests, "part of extended tests")
def test_quic_external_transfer(self):
""" QUIC external echo client/server transfer """
self.external_ipv4_transfer_test(self.server_echo_test_args +
["socket-name", self.api_sock],
self.client_echo_test_args +
["socket-name", self.api_sock,
"mbytes", "10"])
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| 2.0625 | 2 |
aiomadeavr/avr.py | frawau/aiomadeavr | 0 | 12761306 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
#
# Inspired by https://github.com/silvester747/aio_marantz_avr
#
# This is to control Denon/Marantz AVR devices
#
# Copyright (c) 2020 <NAME>
#
# Note large part of this code was taken from scapy and other opensource software
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
# IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
"""Control of an AVR over Telnet."""
import asyncio
import logging
import re
from enum import Enum
from typing import Any, List, Mapping, Optional, Callable
from .enums import *
# Some replacement for the surround sound format
SSTRANSFORM = [
("Audio-", " "),
("Dd", "Dolby Digital "),
("Hd", "HD "),
("DD", "Dolby Digital "),
("Dts", "DTS"),
["Mstr", "Master "],
("Dsur", "Digital Surround "),
("Mtrx", "Matrix"),
("Dscrt", "Discrete "),
("Mch", "Multi-Channel "),
(" Es ", " ES "),
]
EXTRAS = ["SSINFAI"]
NEEDSPACE = ["PSDEL", "PSDYNVOL", "PSDRC"]
def cc_string(identifier: str) -> str:
""" From https://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python """
matches = re.finditer(
".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", identifier
)
return " ".join([m.group(0) for m in matches])
def only_int(val: str) -> str:
return "".join(
[x for x in val if x in ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]]
)
class AvrError(Exception):
"""Base class for all errors returned from an AVR."""
pass
class AvrTimeoutError(AvrError):
"""A request to the AVR has timed out."""
pass
async def avr_factory(
name: str, host: str, port: int = 23, timeout: float = 3.0
) -> "MDAVR":
"""Connect to an AVR.
:param name: The name of this device.
:type url: str
:param addr: The device IP address
:type name: str
:returns: A device instance or None if connection cannot be established
:rtype: MDAVR
"""
try:
reader, writer = await asyncio.open_connection(host, port=port)
return MDAVR(name, reader, writer, timeout)
except:
return None
def _on_off_from_bool(value: bool) -> str:
if value:
return "ON"
else:
return "OFF"
def _on_off_to_bool(value: str) -> bool:
return value == "ON"
class _CommandDef:
code: str
label: str
vals: Optional[Enum]
def __init__(self, label: str, vals: Any):
self.label = label
self.values = vals
class MDAVR:
"""Connection to a Marantz AVR over Telnet.
Uses `connect` to create a connection to the AVR.
"""
CMDS_DEFS: Mapping[str, _CommandDef] = {
"PW": _CommandDef("Power", Power),
"ZM": _CommandDef("Main Zone", Power),
"Z2": _CommandDef("Zone 2", Power),
"Z3": _CommandDef("Zone 3", Power),
"MU": _CommandDef("Muted", None),
"Z2MU": _CommandDef("Z2 Muted", None),
"Z3MU": _CommandDef("Z3 Muted", None),
"MV": _CommandDef("Volume", None),
"Z2MV": _CommandDef("Z2 Volume", None),
"Z3MV": _CommandDef("Z3 Volume", None),
"SI": _CommandDef("Source", InputSource),
"Z2SI": _CommandDef("Z2 Source", InputSource),
"Z3SI": _CommandDef("Z3 Source", InputSource),
"MS": _CommandDef("Surround Mode", SurroundMode),
"CV": _CommandDef("Channel Bias", ChannelBias),
"PV": _CommandDef("Picture Mode", PictureMode),
"ECO": _CommandDef("Eco Mode", EcoMode),
"SSSOD": _CommandDef("Available Source", InputSource),
"PSDEL": _CommandDef("Sound Delay", None),
"PSDRC": _CommandDef("Dynamic Range Compression", DRCMode),
"PSDYNVOL": _CommandDef("Dynamic Volume", DynamicMode),
# SSANA ? analog inputs
# SSHDM ? Mapping between source and HDMI connection
# SSDIN ? digital inputs, COax OPtical
# SSSPC ? Speakers' configuration
# SSPAA ? Not sure. Active speakers config? Also returns SSSPC
# SSQSNZMA ? Smart select.. what for?
}
_reader: asyncio.StreamReader
_writer: asyncio.StreamWriter
_timeout: float
def __init__(
self,
name: str,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
timeout: float,
):
self.name = name
self._reader = reader
self._writer = writer
self._timeout = timeout
self.status = {}
self.maxvol = 98 # Good default ;)
self.alive = True
self.write_queue = asyncio.Queue()
for x in self.CMDS_DEFS:
if len(x) < 5:
self.status[self.CMDS_DEFS[x].label] = "-"
for x in ["PSDEL", "PSDRC", "PSDYNVOL"]:
self.status[self.CMDS_DEFS[x].label] = "-"
self.cvend = True
self.notify = None
self.mysources = []
# Start reading
self.wtask = asyncio.get_event_loop().create_task(self._do_write())
self.rtask = asyncio.get_event_loop().create_task(self._do_read())
self._get_capabilities()
self.refresh()
def _get_capabilities(self):
"""
Here we try to get the various capabilities of the device connected.
"""
# Let's get the available Sources
self.write_queue.put_nowait(("SSSOD", " ?"))
def _get_current(self, cmd):
return self.status[self.CMDS_DEFS[cmd].label]
def _get_list(self, cmd):
return [cc_string(x.name) for x in list(self.CMDS_DEFS[cmd].values)]
# API Starts here
@property
def power(self) -> Optional[Power]:
"""Power state of the AVR."""
return self._get_current("PW")
@property
def zmain(self) -> Optional[Power]:
"""Power state of the AVR."""
return self._get_current("ZM")
@property
def z2(self) -> Optional[Power]:
"""Power state of the AVR."""
return self._get_current("Z2")
@property
def z3(self) -> Optional[Power]:
"""Power state of the AVR."""
return self._get_current("Z3")
@property
def muted(self) -> Optional[bool]:
"""Boolean if volume is currently muted."""
return self._get_current("MU")
@property
def z2_muted(self) -> Optional[bool]:
"""Boolean if volume is currently muted."""
return self._get_current("Z2MU")
@property
def z3_muted(self) -> Optional[bool]:
"""Boolean if volume is currently muted."""
return self._get_current("Z3MU")
@property
def volume(self) -> Optional[float]:
"""Volume level of the AVR zone (00..max_volume)."""
return self._get_current("MV")
@property
def z2_volume(self) -> Optional[float]:
"""Volume level of the AVR zone (00..max_volume)."""
return self._get_current("Z2MV")
@property
def z2_volume(self) -> Optional[float]:
"""Volume level of the AVR zone (00..max_volume)."""
return self._get_current("Z2MV")
@property
def max_volume(self) -> Optional[float]:
"""Maximum volume level of the AVR zone."""
return self.maxvol
@property
def source(self) -> str:
"""Name of the current input source."""
return self._get_current("SI")
@property
def z2_source(self) -> str:
"""Name of the current input source."""
return self._get_current("Z2SI")
@property
def z2_source(self) -> str:
"""Name of the current input source."""
return self._get_current("Z3SI")
@property
def source_list(self) -> List[str]:
"""List of available input sources."""
if self.mysources:
return self.mysources
return self._get_list("SI")
@property
def sound_mode(self) -> str:
"""Name of the current sound mode."""
return self._get_current("MS")
@property
def sound_mode_list(self) -> List[str]:
"""List of available sound modes."""
return self._get_list("MS")
@property
def picture_mode(self) -> str:
"""Name of the current sound mode."""
return self._get_current("PV")
@property
def picture_mode_list(self) -> List[str]:
"""List of available sound modes."""
return self._get_list("PV")
@property
def eco_mode(self) -> str:
"""Current ECO mode."""
return self._get_current("ECO")
@property
def eco_mode_list(self) -> List[str]:
"""List of available exo modes."""
return self._get_list("ECO")
@property
def channels_bias(self) -> Mapping[str, float]:
return self._get_current("CV")
@property
def channels_bias_list(self) -> List[str]:
"""List of currently available."""
return [x for x in self._get_current("CV").keys()]
@property
def drc_mode(self) -> str:
"""Current ECO mode."""
return self._get_current("PSDRC")
@property
def drc_mode_list(self) -> List[str]:
"""List of available sound modes."""
return self._get_list("PSDRC")
@property
def dynamic_volume_mode(self) -> str:
"""Current ECO mode."""
return self._get_current("PSDYNVOL")
@property
def dynamic_volume_mode_list(self) -> List[str]:
"""List of available sound modes."""
return self._get_list("PSDYNVOL")
@property
def delay(self) -> str:
"""Current ECO mode."""
return self._get_current("PSDEL")
def refresh(self) -> None:
"""Refresh all properties from the AVR."""
for cmd_def in self.CMDS_DEFS:
if cmd_def in NEEDSPACE:
qs = " ?"
else:
qs = "?"
fut = self.write_queue.put_nowait((cmd_def, qs))
def turn_on(self) -> None:
"""Turn the AVR on."""
self.write_queue.put_nowait(("PW", "ON"))
def turn_off(self) -> None:
"""Turn the AVR off."""
self.write_queue.put_nowait(("PW", "STANDBY"))
def main_turn_on(self) -> None:
"""Turn the AVR on."""
self.write_queue.put_nowait(("ZM", "ON"))
def main_turn_off(self) -> None:
"""Turn the AVR off."""
self.write_queue.put_nowait(("ZM", "OFF"))
def z2_turn_on(self) -> None:
"""Turn the AVR on."""
self.write_queue.put_nowait(("Z2", "ON"))
def z2_turn_off(self) -> None:
"""Turn the AVR off."""
self.write_queue.put_nowait(("Z2", "OFF"))
def z3_turn_on(self) -> None:
"""Turn the AVR on."""
self.write_queue.put_nowait(("Z3", "ON"))
def z3_turn_off(self) -> None:
"""Turn the AVR off."""
self.write_queue.put_nowait(("Z3", "OFF"))
def mute_volume(self, mute: bool) -> None:
"""Mute or unmute the volume.
Arguments:
mute -- True to mute, False to unmute.
"""
self.write_queue.put_nowait(("MU", _on_off_from_bool(mute)))
def _zone_mute_volume(self, zone: str, mute: bool) -> None:
"""Mute or unmute the volume.
Arguments:
mute -- True to mute, False to unmute.
"""
self.write_queue.put_nowait((zone, _on_off_from_bool(mute)))
def z2_mute_volume(self, mute: bool) -> None:
"""Mute or unmute the volume.
Arguments:
mute -- True to mute, False to unmute.
"""
self._zone_mute_volume("Z2MU", mute)
def z3_mute_volume(self, mute: bool) -> None:
"""Mute or unmute the volume.
Arguments:
mute -- True to mute, False to unmute.
"""
self._zone_mute_volume("Z3MU", mute)
def set_volume(self, level: float) -> None:
"""Set the volume level.
Arguments:
level -- An integer value between 0 and `max_volume`.
"""
if level > self.maxvol:
level = maxvol
if int(10 * level) % 10:
# Needs to be a nultiple of 5
level = int(5 * round(10 * level / 5))
else:
level = int(level)
self.write_queue.put_nowait(("MV", f"{level:02}"))
def volume_up(self) -> None:
"""Turn the volume level up one notch."""
self._zone_volume("MV", "UP")
def volume_down(self) -> None:
"""Turn the volume level down one notch."""
self._zone_volume("MV", "DOWN")
def z2_set_volume(self, level: float) -> None:
"""Set the volume level.
Arguments:
level -- An integer value between 0 and `max_volume`.
"""
self._zone_set_volume("Z2", level)
def z3_set_volume(self, level: float) -> None:
"""Set the volume level.
Arguments:
level -- An integer value between 0 and `max_volume`.
"""
self._zone_set_volume("Z3", level)
def z2_volume_up(self) -> None:
"""Turn the volume level down one notch."""
self._zone_volume("Z2", "UP")
def z3_volume_up(self) -> None:
"""Turn the volume level down one notch."""
self._zone_volume("Z3", "UP")
def z2_volume_down(self) -> None:
"""Turn the volume level down one notch."""
self._zone_volume("Z2", "DOWN")
def z3_volume_down(self) -> None:
"""Turn the volume level down one notch."""
self._zone_volume("Z3", "DOWN")
def set_channel_bias(self, chan: str, level: float) -> None:
"""Set the volume level.
Arguments:
chan -- channel to set
level -- A float value between -12.0 and +12.0
"""
if chan not in self.channels_bias:
logging.warning(f"Channel {chan} is not available right now.")
return
if self.channels_bias[chan] != level:
chan = chan.replace(" ", "")
level = level + 50 # 50 is 0dB
if level < 38:
level = 38
elif level > 62:
level = 62
if int(10 * level) % 10:
# Needs to be a nultiple of 5
level = int(5 * round(10 * level / 5))
else:
level = int(level)
cmd = None
for x in self.CMDS_DEFS["CV"].values:
if x.name == chan:
cmd = x.value
break
if cmd:
self.write_queue.put_nowait(("CV", f"{cmd} {level:02}"))
else:
logging.error(
f"Channel {chan} should exist. This should not have happened."
)
def channel_bias_up(self, chan: str) -> None:
"""Turn the volume level up one notch."""
if chan not in self.channels_bias:
logging.warning(f"Channel {chan} is not available right now.")
return
if self.channels_bias[chan] == 12:
# We are at the limit. It won't respond
logging.debugf(f"Channel {chan} it at the upper limit.")
return
chan = chan.replace(" ", "")
cmd = None
for x in self.CMDS_DEFS["CV"].values:
if x.name == chan:
cmd = x.value
break
if cmd:
self.write_queue.put_nowait(("CV", f"{cmd} UP"))
else:
logging.error(
f"Channel {chan} should exist. This should not have happened."
)
def channel_bias_down(self, chan: str) -> None:
"""Turn the volume level down one notch."""
if chan not in self.channels_bias:
logging.warning(f"Channel {chan} is not available right now.")
return
if self.channels_bias[chan] == -12:
# We are at the limit. It won't respond
logging.debugf(f"Channel {chan} it at the lowewr limit.")
return
chan = chan.replace(" ", "")
cmd = None
for x in self.CMDS_DEFS["CV"].values:
if x.name == chan:
cmd = x.value
break
if cmd:
self.write_queue.put_nowait(("CV", f"{cmd} DOWN"))
else:
logging.error(
f"Channel {chan} should exist. This should not have happened."
)
def channels_bias_reset(self):
self.write_queue.put_nowait(("CV", "ZRL"))
def select_source(self, source: str) -> None:
"""Select the input source."""
try:
source = self.CMDS_DEFS["SI"].values[source.replace(" ", "")]
except:
logging.warning(f"Warning: {source} is not a valid source")
return
self.write_queue.put_nowait(("SI", source.value))
def z2_select_source(self, source: str) -> None:
"""Select the input source."""
try:
source = self.CMDS_DEFS["SI"].values[source.replace(" ", "")]
except:
logging.warning(f"Warning: {source} is not a valid source")
return
self.write_queue.put_nowait(("Z2", source.value))
def z3_select_source(self, source: str) -> None:
"""Select the input source."""
try:
source = self.CMDS_DEFS["SI"].values[source.replace(" ", "")]
except:
logging.warning(f"Warning: {source} is not a valid source")
return
self.write_queue.put_nowait(("Z3", source.value))
def select_sound_mode(self, mode: str) -> None:
"""Select the sound mode."""
try:
mode = self.CMDS_DEFS["MS"].values[mode.replace(" ", "")]
except:
logging.warning(f"Warning: {mode} is not a valid mode")
return
self.write_queue.put_nowait(("MS", mode.value))
def select_picture_mode(self, mode: str) -> None:
"""Select the sound mode."""
try:
mode = self.CMDS_DEFS["PV"].values[mode.replace(" ", "")]
except:
logging.warning(f"Warning: {mode} is not a valid mode")
return
self.write_queue.put_nowait(("PV", mode.value))
def select_eco_mode(self, mode: str) -> None:
"""Select the sound mode."""
try:
mode = self.CMDS_DEFS["ECO"].values[mode.replace(" ", "").title()]
except:
logging.warning(f"Warning: {mode} is not a valid eco mode")
return
self.write_queue.put_nowait(("ECO", mode.value))
def set_delay(self, level: float) -> None:
"""Set the volume level.
Arguments:
level -- An integer value between 0 and `max_volume`.
"""
level = int(level)
if level < 0:
level = 0
if level > 999:
level = 999
self.write_queue.put_nowait(("PSDEL", f" {level:03}"))
def select_drc_mode(self, mode: str) -> None:
"""Select the sound mode."""
try:
mode = self.CMDS_DEFS["PSDRC"].values[mode.replace(" ", "").title()]
except:
logging.warning(f"Warning: {mode} is not a valid DRC mode")
return
self.write_queue.put_nowait(("PSDRC", " " + mode.value))
def select_dynamic_volume_mode(self, mode: str) -> None:
"""Select the sound mode."""
try:
mode = self.CMDS_DEFS["PSDYNVOL"].values[mode.replace(" ", "").title()]
except:
logging.warning(f"Warning: {mode} is not a valid Dynamic Volume mode")
return
self.write_queue.put_nowait(("PSDYNVOL", " " + mode.value))
def notifyme(self, func: Callable) -> None:
"""Register a callback for when an event happens. The callable should have 2 parameters,
The label of the the changing value and the new value
"""
self.notify = func
def close(self):
self.alive = False
self._writer.close()
self.rtask.cancel()
self.wtask.cancel()
logging.debug(f"Closed device {self.name}")
# API ends here
def _zone_volume(self, zone: str, uod: str) -> None:
"""Turn the volume level up one notch."""
self.write_queue.put_nowait((zone, uod))
def _zone_set_volume(self, zone: str, level: float) -> None:
"""Set the volume level.
Arguments:
zone -- The zone affected
level -- An integer value between 0 and `max_volume`.
"""
if level > self.maxvol:
level = maxvol
level = int(level)
self.write_queue.put_nowait((zone, f"{level:02}"))
async def _send_command(self, cmd: str, val: Any) -> asyncio.Future:
tosend = f"{cmd}{val}\r"
logging.debug(f"Sending {tosend}")
self._writer.write(tosend.encode())
await self._writer.drain()
logging.debug("Write drained")
def _process_response(self, response: str) -> Optional[str]:
matches = [cmd for cmd in self.CMDS_DEFS.keys() if response.startswith(cmd)] + [
cmd for cmd in EXTRAS if response.startswith(cmd)
]
if not matches:
return None
if len(matches) > 1:
matches.sort(key=len, reverse=True)
match = matches[0]
if getattr(self, "_parse_" + match, None):
getattr(self, "_parse_" + match)(response.strip()[len(match) :].strip())
else:
# A few special cases ... for now
if response.startswith("SSINFAISFSV"):
try:
sr = int(only_int(response.split(" ")[-1]))
if sr > 200:
sr = round(sr / 10, 1)
else:
sr = float(sr)
self.status["Sampling Rate"] = sr
except Exception as e:
if response.split(" ")[-1] == "NON":
elf.status["Sampling Rate"] = "-"
else:
logging.debug(f"Error with sampling rate: {e}")
else:
self._parse_many(match, response.strip()[len(match) :].strip())
logging.debug(f"Warning _parse_{match} is not defined.")
return match
def _parse_many(self, cmd: str, resp: str) -> None:
for x in self.CMDS_DEFS[cmd].values:
if resp == x.value:
lbl = self.CMDS_DEFS[cmd].label
if self.status[lbl] != cc_string(x.name):
self.status[lbl] = cc_string(x.name)
if self.notify:
self.notify(lbl, self.status[lbl])
def _parse_MV(self, resp: str) -> None:
level = only_int(resp)
if level:
if len(level) > 2:
level = int(level) / 10
else:
level = float(level)
if resp.startswith("MAX"):
self.maxvol = level
else:
lbl = self.CMDS_DEFS["MV"].label
if self.status[lbl] != level:
self.status[lbl] = level
if self.notify:
self.notify(lbl, self.status[lbl])
def _parse_MU(self, resp: str) -> None:
nval = resp == "ON"
lbl = self.CMDS_DEFS["MU"].label
if self.status[lbl] != nval:
self.status[lbl] = nval
if self.notify:
self.notify(lbl, self.status[lbl])
def _parse_Z2MU(self, resp: str) -> None:
nval = resp == "ON"
lbl = self.CMDS_DEFS["Z2MU"].label
if self.status[lbl] != nval:
self.status[lbl] = nval
if self.notify:
self.notify(lbl, self.status[lbl])
def _parse_Z3MU(self, resp: str) -> None:
nval = resp == "ON"
lbl = self.CMDS_DEFS["Z3MU"].label
if self.status[lbl] != nval:
self.status[lbl] = nval
if self.notify:
self.notify(lbl, self.status[lbl])
def _parse_zone(self, zone: str, resp: str) -> None:
""" Naturaly, those idiots had tn overload the zone prefix for
power, volume and source...
"""
if resp in ["ON", "OFF"]:
self._parse_many(zone, resp)
return
if resp.startswith("SMART"):
# not handled
return
if resp.startswith("FAVORITE"):
# not handled, learn to spell!
return
try:
logging.debug(f"Checking level for {zone}")
level = only_int(resp)
if len(level) > 2:
level = int(level) / 10
else:
level = float(level)
lbl = self.CMDS_DEFS[zone + "MV"].label
if self.status[lbl] != level:
self.status[lbl] = level
if self.notify:
self.notify(lbl, self.status[lbl])
except:
# Probably the source
try:
self._parse_many(zone + "SI", resp)
except Exception as e:
logging.debug(f"Failed when parsing {zone}: {e}")
def _parse_Z2(self, resp: str) -> None:
self._parse_zone("Z2", resp)
def _parse_Z3(self, resp: str) -> None:
self._parse_zone("Z3", resp)
def _parse_CV(self, resp: str) -> None:
""" Different here... Needs to be reset"""
if resp == "END":
self.cvend = True
if self.notify:
lbl = self.CMDS_DEFS["CV"].label
self.notify(lbl, self.status[lbl])
else:
if self.cvend:
self.status[self.CMDS_DEFS["CV"].label] = {}
self.cvend = False
spkr, level = resp.split(" ")
if level:
if len(level) > 2:
level = int(level) / 10
else:
level = float(level)
level -= 50
for x in self.CMDS_DEFS["CV"].values:
if x.value == spkr:
spkrname = cc_string(x.name)
break
try:
self.status[self.CMDS_DEFS["CV"].label][spkrname] = level
except:
logging.debug(f"Unknown speaker code {spkr}")
def _parse_SSSOD(self, resp: str) -> None:
""" Different here..."""
if resp == " END":
self.mysources.sort()
logging.debug(f"My source is now {self.mysources}")
return
si, f = resp.split(" ")
if f == "USE":
for x in self.CMDS_DEFS["SSSOD"].values:
if si == x.value:
self.mysources.append(cc_string(x.name))
break
def _parse_MS(self, resp: str) -> None:
""" Different here... What we get is not what we send. So we try to transform
the result through semi-cllever string manipulation
"""
resp = resp.replace("+", " ")
resp = " ".join([x.title() for x in resp.split(" ")])
for old, new in SSTRANSFORM:
resp = resp.replace(old, new)
# Clean up spaces
resp = re.sub(r"[_\W]+", " ", resp)
lbl = self.CMDS_DEFS["MS"].label
if self.status[lbl] != resp:
self.status[lbl] = resp
if self.notify:
self.notify(lbl, self.status[lbl])
def _parse_PSDEL(self, resp: str) -> None:
level = only_int(resp)
if level:
level = int(level)
lbl = self.CMDS_DEFS["PSDEL"].label
if self.status[lbl] != level:
self.status[lbl] = level
if self.notify:
self.notify(lbl, self.status[lbl])
async def _do_read(self):
""" Keep on reading the info coming from the AVR"""
while self.alive:
data = b""
while not data or data[-1] != ord("\r"):
char = await self._reader.read(1)
if char == b"":
break
data += char
if data == b"":
# Gone
self.close()
return
logging.debug(f"Received: {data}")
try:
match = self._process_response(data.decode().strip("\r"))
except Exception as e:
logging.debug(f"Problem processing response: {e}")
async def _do_write(self):
""" Keep on reading the info coming from the AVR"""
while self.alive:
cmd, param = await self.write_queue.get()
if cmd:
await self._send_command(cmd, param)
self.write_queue.task_done()
| 1.9375 | 2 |
privacyscanner/scanmodules/chromedevtools/extractors/securityheaders.py | malexmave/privacyscanner | 0 | 12761307 | <reponame>malexmave/privacyscanner
from privacyscanner.scanmodules.chromedevtools.extractors.base import Extractor
class SecurityHeadersExtractor(Extractor):
def extract_information(self):
response = self.page.get_response_by_url(self.result['final_url'])
if response is None:
self.logger.error('Could not find response for final url')
return
headers = response['headers_lower']
header_names = ['Referrer-Policy', 'X-Content-Type-Options',
'X-Frame-Options', 'Expect-CT',
'Access-Control-Allow-Origin']
security_headers = {}
for header_name in header_names:
security_headers[header_name] = self._get_header(headers, header_name)
hsts_value = None
if 'strict-transport-security' in headers:
hsts_value = self._parse_hsts(headers['strict-transport-security'])
security_headers['Strict-Transport-Security'] = hsts_value
csp_value = None
if 'content-security-policy' in headers:
csp_value = self._parse_csp(headers['content-security-policy'])
security_headers['Content-Security-Policy'] = csp_value
xss_protection = None
if 'x-xss-protection' in headers:
xss_protection = self._parse_xss_protection(headers['x-xss-protection'])
security_headers['X-XSS-Protection'] = xss_protection
self.result['security_headers'] = security_headers
@staticmethod
def _parse_csp(header_value):
csp = {}
parts = [part.strip() for part in header_value.split(';')]
for part in parts:
if not part:
continue
values = part.split()
key = values[0]
values = values[1:]
csp[key.lower()] = values
csp['header_value'] = header_value
return csp
@staticmethod
def _parse_hsts(header_value):
parts = [part.strip() for part in header_value.split(';')]
max_age = None
for part in parts:
if part.startswith('max-age='):
max_age = part.split('=', 1)[1]
try:
max_age = int(max_age)
except ValueError:
pass
break
return {
'header_value': header_value,
'includeSubDomains': 'includeSubDomains' in parts,
'preload': 'preload' in parts,
'max-age': max_age
}
@staticmethod
def _parse_xss_protection(header_value):
mode = None
is_active = None
if ';' in header_value:
is_active, mode_str = header_value.split(';', 1)
is_active = is_active.strip() == '1'
if mode_str.strip().startswith('mode='):
mode = mode_str.split('=', 1)[1].strip()
return {
'header_value': header_value,
'is_active': is_active,
'mode': mode
}
@staticmethod
def _get_header(headers, header_name):
header_name = header_name.lower()
if header_name in headers:
value = headers[header_name]
# Chrome will separate multiple headers with a newline,
# however, RFC 2616 says that they should be interpreted
# with a comma in between. See RFC2616 Sect. 4.2:
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
value = value.replace('\n', ',')
return value
return None | 2.375 | 2 |
tests/providers/test_base_stats_provider.py | jonathonmellor/mimesis-stats | 4 | 12761308 | <reponame>jonathonmellor/mimesis-stats<filename>tests/providers/test_base_stats_provider.py
import pytest
from mimesis_stats.providers.base_stats import BaseStatsDataProvider
@pytest.mark.parametrize(
"value, proportion, null_value, return_value",
[(1, 0, None, 1), ("A", 1, None, None), ({"A": 1}, 0, None, {"A": 1}), (1, 1, "NULL", "NULL")],
)
def test_base_stats_replace(value, proportion, null_value, return_value):
"""Test does not require seed setting for deterministic results"""
generator = BaseStatsDataProvider()
assert generator._replace(value=value, proportion=proportion, replacement=null_value) == return_value
@pytest.mark.parametrize(
"values, proportions, null_values, return_value",
[
((1, 2, 3), [0, 0, 0], [None, None, None], (1, 2, 3)),
((1, 2, 3), [0, 1, 0], [None, None, None], (1, None, 3)),
],
)
def test_base_stats_replace_multiple(values, proportions, null_values, return_value):
"""Test does not require seed setting for deterministic results"""
generator = BaseStatsDataProvider()
assert generator._replace_multiple(values=values, proportions=proportions, replacements=null_values) == return_value
| 2.21875 | 2 |
db/models/company.py | matchd-ch/matchd-backend | 1 | 12761309 | <filename>db/models/company.py
from django.contrib.contenttypes.models import ContentType
from django.core.validators import RegexValidator
from django.db import models
from django.conf import settings
from django.db.models import Q
from django.db.models.signals import pre_delete
from wagtail.search import index
from .attachment import Attachment
from .profile_type import ProfileType
from .profile_state import ProfileState
class Company(models.Model, index.Indexed):
# fields for company / university
type = models.CharField(choices=ProfileType.choices, max_length=255, blank=True)
state = models.CharField(choices=ProfileState.choices,
max_length=255,
blank=False,
default=ProfileState.INCOMPLETE)
profile_step = models.IntegerField(default=1)
slug = models.SlugField(unique=True)
name = models.CharField(max_length=255, blank=False)
zip = models.CharField(max_length=10, blank=False)
city = models.CharField(max_length=255, blank=False)
street = models.CharField(max_length=255, blank=True)
phone = models.CharField(max_length=12,
blank=True,
validators=[RegexValidator(regex=settings.PHONE_REGEX)])
website = models.URLField(max_length=2048, blank=True)
branches = models.ManyToManyField('db.Branch', related_name='companies')
description = models.TextField(max_length=1000, blank=True)
# fields for company only
soft_skills = models.ManyToManyField('db.SoftSkill', related_name='companies')
uid = models.CharField(max_length=255,
blank=False,
validators=[RegexValidator(regex=settings.UID_REGEX)])
services = models.TextField(blank=True)
member_it_st_gallen = models.BooleanField(blank=True, default=False)
benefits = models.ManyToManyField('db.Benefit', related_name='companies')
cultural_fits = models.ManyToManyField('db.CulturalFit', related_name='companies')
# fields for university only
top_level_organisation_description = models.TextField(max_length=1000, blank=True)
top_level_organisation_website = models.URLField(max_length=2048, blank=True)
link_education = models.URLField(max_length=2048, blank=True, null=True)
link_projects = models.URLField(max_length=2048, blank=True, null=True)
link_thesis = models.URLField(max_length=2048, blank=True, null=True)
def get_profile_content_type(self):
return ContentType.objects.get(app_label='db', model='company')
def get_profile_id(self):
return self.id
@classmethod
def get_indexed_objects(cls):
query = Q(state=ProfileState.PUBLIC)
query |= Q(state=ProfileState.ANONYMOUS)
return cls.objects.filter(query).prefetch_related('branches', 'cultural_fits',
'soft_skills')
search_fields = [
index.RelatedFields('branches', [
index.FilterField('id'),
]),
index.RelatedFields('cultural_fits', [
index.FilterField('id'),
]),
index.RelatedFields('soft_skills', [
index.FilterField('id'),
])
]
class CompanySignalHandler:
@staticmethod
def pre_delete(sender, instance, **kwargs):
pre_delete.disconnect(CompanySignalHandler.pre_delete,
Company,
dispatch_uid='db.models.CompanySignalHandler.pre_delete')
# employees
for user in instance.users.all():
user.delete()
# attachments of project postings
project_posting_type = ContentType.objects.get(app_label='db', model='projectposting')
for project_posting in instance.project_postings.all():
attachments = Attachment.objects.filter(content_type=project_posting_type,
object_id=project_posting.id)
for attachment in attachments:
attachment.attachment_object.delete()
attachments.delete()
# avatars / moods
company_type = ContentType.objects.get(app_label='db', model='company')
attachments = Attachment.objects.filter(content_type=company_type, object_id=instance.id)
for attachment in attachments:
attachment.attachment_object.delete()
attachments.delete()
pre_delete.connect(CompanySignalHandler.pre_delete,
Company,
dispatch_uid='db.models.CompanySignalHandler.pre_delete')
pre_delete.connect(CompanySignalHandler.pre_delete,
Company,
dispatch_uid='db.models.CompanySignalHandler.pre_delete')
| 1.921875 | 2 |
conformer/optim/lr_scheduler/transformer_lr_scheduler.py | phanxuanphucnd/conformer | 5 | 12761310 | # -*- coding: utf-8 -*-
import math
from arizona_asr.optim.lr_scheduler.lr_scheduler import LearningRateScheduler
class TransformerLRScheduler(LearningRateScheduler):
""" Transformer Learning Rate Scheduler proposed in "Attention Is All You Need" """
def __init__(self, optimizer, peak_lr, final_lr, final_lr_scale, warmup_steps, decay_steps):
assert isinstance(warmup_steps, int), "warmup_steps should be inteager type"
assert isinstance(decay_steps, int), "total_steps should be inteager type"
super(TransformerLRScheduler, self).__init__(optimizer, 0.0)
self.final_lr = final_lr
self.peak_lr = peak_lr
self.warmup_steps = warmup_steps
self.decay_steps = decay_steps
self.warmup_rate = self.peak_lr / self.warmup_steps
self.decay_factor = -math.log(final_lr_scale) / self.decay_steps
self.lr = self.init_lr
self.update_step = 0
def _decide_stage(self):
if self.update_step < self.warmup_steps:
return 0, self.update_step
if self.warmup_steps <= self.update_step < self.warmup_steps + self.decay_steps:
return 1, self.update_step - self.warmup_steps
return 2, None
def step(self):
self.update_step += 1
stage, steps_in_stage = self._decide_stage()
if stage == 0:
self.lr = self.update_step * self.warmup_rate
elif stage == 1:
self.lr = self.peak_lr * math.exp(-self.decay_factor * steps_in_stage)
elif stage == 2:
self.lr = self.final_lr
else:
raise ValueError("Undefined stage")
self.set_lr(self.optimizer, self.lr)
return self.lr | 2.796875 | 3 |
payloads/promethea.py | k3170makan/PyMLProjects | 156 | 12761311 | #!/usr/bin/python
import numpy
from random import random
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
from sys import argv
from sys import stdout
from sys import exit
import model
from model import PasswordLSTM
"""
Promethea - a mysical female half-god who walks between the real and the immateira
(the realm of the idealistic real) granting man kind access to this magical realm that makes anything possible.
Promethea is meant to be a simple front end to making use of the LSTM stuff to plugin into other tools
like Burp, ZapProxy, Terminal etc all you do is call this script give it a payload and it returns the autocomplete
according to the way you trained it and the weight file you give it.
class Promethea:
def __init__(self,payload_filename, - name of file with the payloads used to train
weights_filename, - name of file with trained up weights in
payload, - stirng of charaters for the seed of predicition
nchars - number of characters to predict):
Fuzzing with Promethea:
1 - payload "autocomplete" mode (here's some input that is well formed, what do you think would be a good
way to complete this IF IT WERE a payload actually?)
2 - blind payload generation (just spit out what you know to spit out)
"""
class Promethea:
def __init__(self,payload_filename,weights_filename,payload,nchars):
self.payload_filename = payload_filename
self.weights_filename = weights_filename
self.prep_data(self.payload_filename,payload)
self.init_payload = self.payload
self.lstm = PasswordLSTM(self.X,self.y)
self.lstm.load_weights(weights_filename)
self.predict_length = nchars
"""
Returns next character in sequence prediction
Args:
current_sequence (char) - sequence to predict from
Returns:
(char) - next character in sequence
"""
def predict(self):
return self.get_next(self.init_payload)
def get_next(self,seed):
outstring = ""
for i in range(self.predict_length):
x = numpy.reshape(seed,(1,len(seed),1))
x = x / float(self.n_vocab)
prediction = self.lstm.predict(x,verbose=0)
index = numpy.argmax(prediction)
result = self.int_to_char[index]
outstring = outstring + result
seed.append(index)
seed = seed[1:len(seed)]
return outstring
"""
prep_data(data_filename,
payload)
Prepares the data to feed to the nextwork for prediction
The Keras Sequential model needs a presentation of the vocab we taught it to generate from,
essentially it only spits out character positions in a table of all possible characters - so if you want
her to speak payloads you need to give her that list of chars she as trained on.
Args:
input_file (string) - list of payloads promethea was trained on (we might move over to a simpler
vocab reload mechanism perhaps since this is annoying)
Returns:
(x <list>) - x a hot encoding of the vocabulary holding initial character sequences
"""
def prep_data(self,data_filename,payload):
seq_length = model.SEQ_LEN #need to make this SEQ_LEN an LSTM attribute rather than model level one
raw_text = open(data_filename).read()
self.chars = sorted(list(set(raw_text)))
self.n_chars = len(raw_text)
self.n_vocab = len(self.chars)
self.int_to_char = dict((i,c) for i,c in enumerate(self.chars))
self.char_to_int = dict((c,i) for i,c in enumerate(self.chars))
self.payload = [self.char_to_int[char] for char in payload]
dataX = []
dataY = []
for i in range(self.n_chars - seq_length):
seq_in = raw_text[i:i + seq_length]
seq_out = raw_text[i + seq_length]
dataX.append([self.char_to_int[char] for char in seq_in])
dataY.append(self.char_to_int[seq_out])
self.n_patterns = len(dataX)
X = numpy.reshape(dataX,(self.n_patterns,seq_length,1))
self.X = X / float(self.n_vocab)
self.y = np_utils.to_categorical(dataY)
if __name__=="__main__":
seq_length = model.SEQ_LEN
#ill modularize this eventually
if len(argv) != 5:
print "Usage: %s [payload] [nchars] [data file] [weights filename]" % (argv[0])
print "Example: %s 'javascript' 100 awesome_polyglots.txt weights-for-generating-xss-payloads.txt" % (argv[0])
print "Example: %s 'body onload=' 100 more_polyglots.txt weights-for-generating-phpxss.txt" % (argv[0])
exit(1)
payload = argv[1]
print "[*] Seed: '%s'\n" % payload
nchars = int(argv[2])
data_filename = argv[3]
#generate using LSTM network
weights_filename = argv[4]
promethea = Promethea(data_filename,weights_filename,payload,nchars)
print promethea.predict()
| 2.734375 | 3 |
sdk/python/pulumi_aws_native/dynamodb/global_table.py | AaronFriel/pulumi-aws-native | 29 | 12761312 | <filename>sdk/python/pulumi_aws_native/dynamodb/global_table.py<gh_stars>10-100
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['GlobalTableArgs', 'GlobalTable']
@pulumi.input_type
class GlobalTableArgs:
def __init__(__self__, *,
attribute_definitions: pulumi.Input[Sequence[pulumi.Input['GlobalTableAttributeDefinitionArgs']]],
key_schema: pulumi.Input[Sequence[pulumi.Input['GlobalTableKeySchemaArgs']]],
replicas: pulumi.Input[Sequence[pulumi.Input['GlobalTableReplicaSpecificationArgs']]],
billing_mode: Optional[pulumi.Input[str]] = None,
global_secondary_indexes: Optional[pulumi.Input[Sequence[pulumi.Input['GlobalTableGlobalSecondaryIndexArgs']]]] = None,
local_secondary_indexes: Optional[pulumi.Input[Sequence[pulumi.Input['GlobalTableLocalSecondaryIndexArgs']]]] = None,
s_se_specification: Optional[pulumi.Input['GlobalTableSSESpecificationArgs']] = None,
stream_specification: Optional[pulumi.Input['GlobalTableStreamSpecificationArgs']] = None,
table_name: Optional[pulumi.Input[str]] = None,
time_to_live_specification: Optional[pulumi.Input['GlobalTableTimeToLiveSpecificationArgs']] = None,
write_provisioned_throughput_settings: Optional[pulumi.Input['GlobalTableWriteProvisionedThroughputSettingsArgs']] = None):
"""
The set of arguments for constructing a GlobalTable resource.
"""
pulumi.set(__self__, "attribute_definitions", attribute_definitions)
pulumi.set(__self__, "key_schema", key_schema)
pulumi.set(__self__, "replicas", replicas)
if billing_mode is not None:
pulumi.set(__self__, "billing_mode", billing_mode)
if global_secondary_indexes is not None:
pulumi.set(__self__, "global_secondary_indexes", global_secondary_indexes)
if local_secondary_indexes is not None:
pulumi.set(__self__, "local_secondary_indexes", local_secondary_indexes)
if s_se_specification is not None:
pulumi.set(__self__, "s_se_specification", s_se_specification)
if stream_specification is not None:
pulumi.set(__self__, "stream_specification", stream_specification)
if table_name is not None:
pulumi.set(__self__, "table_name", table_name)
if time_to_live_specification is not None:
pulumi.set(__self__, "time_to_live_specification", time_to_live_specification)
if write_provisioned_throughput_settings is not None:
pulumi.set(__self__, "write_provisioned_throughput_settings", write_provisioned_throughput_settings)
@property
@pulumi.getter(name="attributeDefinitions")
def attribute_definitions(self) -> pulumi.Input[Sequence[pulumi.Input['GlobalTableAttributeDefinitionArgs']]]:
return pulumi.get(self, "attribute_definitions")
@attribute_definitions.setter
def attribute_definitions(self, value: pulumi.Input[Sequence[pulumi.Input['GlobalTableAttributeDefinitionArgs']]]):
pulumi.set(self, "attribute_definitions", value)
@property
@pulumi.getter(name="keySchema")
def key_schema(self) -> pulumi.Input[Sequence[pulumi.Input['GlobalTableKeySchemaArgs']]]:
return pulumi.get(self, "key_schema")
@key_schema.setter
def key_schema(self, value: pulumi.Input[Sequence[pulumi.Input['GlobalTableKeySchemaArgs']]]):
pulumi.set(self, "key_schema", value)
@property
@pulumi.getter
def replicas(self) -> pulumi.Input[Sequence[pulumi.Input['GlobalTableReplicaSpecificationArgs']]]:
return pulumi.get(self, "replicas")
@replicas.setter
def replicas(self, value: pulumi.Input[Sequence[pulumi.Input['GlobalTableReplicaSpecificationArgs']]]):
pulumi.set(self, "replicas", value)
@property
@pulumi.getter(name="billingMode")
def billing_mode(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "billing_mode")
@billing_mode.setter
def billing_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "billing_mode", value)
@property
@pulumi.getter(name="globalSecondaryIndexes")
def global_secondary_indexes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GlobalTableGlobalSecondaryIndexArgs']]]]:
return pulumi.get(self, "global_secondary_indexes")
@global_secondary_indexes.setter
def global_secondary_indexes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GlobalTableGlobalSecondaryIndexArgs']]]]):
pulumi.set(self, "global_secondary_indexes", value)
@property
@pulumi.getter(name="localSecondaryIndexes")
def local_secondary_indexes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GlobalTableLocalSecondaryIndexArgs']]]]:
return pulumi.get(self, "local_secondary_indexes")
@local_secondary_indexes.setter
def local_secondary_indexes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GlobalTableLocalSecondaryIndexArgs']]]]):
pulumi.set(self, "local_secondary_indexes", value)
@property
@pulumi.getter(name="sSESpecification")
def s_se_specification(self) -> Optional[pulumi.Input['GlobalTableSSESpecificationArgs']]:
return pulumi.get(self, "s_se_specification")
@s_se_specification.setter
def s_se_specification(self, value: Optional[pulumi.Input['GlobalTableSSESpecificationArgs']]):
pulumi.set(self, "s_se_specification", value)
@property
@pulumi.getter(name="streamSpecification")
def stream_specification(self) -> Optional[pulumi.Input['GlobalTableStreamSpecificationArgs']]:
return pulumi.get(self, "stream_specification")
@stream_specification.setter
def stream_specification(self, value: Optional[pulumi.Input['GlobalTableStreamSpecificationArgs']]):
pulumi.set(self, "stream_specification", value)
@property
@pulumi.getter(name="tableName")
def table_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "table_name")
@table_name.setter
def table_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "table_name", value)
@property
@pulumi.getter(name="timeToLiveSpecification")
def time_to_live_specification(self) -> Optional[pulumi.Input['GlobalTableTimeToLiveSpecificationArgs']]:
return pulumi.get(self, "time_to_live_specification")
@time_to_live_specification.setter
def time_to_live_specification(self, value: Optional[pulumi.Input['GlobalTableTimeToLiveSpecificationArgs']]):
pulumi.set(self, "time_to_live_specification", value)
@property
@pulumi.getter(name="writeProvisionedThroughputSettings")
def write_provisioned_throughput_settings(self) -> Optional[pulumi.Input['GlobalTableWriteProvisionedThroughputSettingsArgs']]:
return pulumi.get(self, "write_provisioned_throughput_settings")
@write_provisioned_throughput_settings.setter
def write_provisioned_throughput_settings(self, value: Optional[pulumi.Input['GlobalTableWriteProvisionedThroughputSettingsArgs']]):
pulumi.set(self, "write_provisioned_throughput_settings", value)
class GlobalTable(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
attribute_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GlobalTableAttributeDefinitionArgs']]]]] = None,
billing_mode: Optional[pulumi.Input[str]] = None,
global_secondary_indexes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GlobalTableGlobalSecondaryIndexArgs']]]]] = None,
key_schema: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GlobalTableKeySchemaArgs']]]]] = None,
local_secondary_indexes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GlobalTableLocalSecondaryIndexArgs']]]]] = None,
replicas: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GlobalTableReplicaSpecificationArgs']]]]] = None,
s_se_specification: Optional[pulumi.Input[pulumi.InputType['GlobalTableSSESpecificationArgs']]] = None,
stream_specification: Optional[pulumi.Input[pulumi.InputType['GlobalTableStreamSpecificationArgs']]] = None,
table_name: Optional[pulumi.Input[str]] = None,
time_to_live_specification: Optional[pulumi.Input[pulumi.InputType['GlobalTableTimeToLiveSpecificationArgs']]] = None,
write_provisioned_throughput_settings: Optional[pulumi.Input[pulumi.InputType['GlobalTableWriteProvisionedThroughputSettingsArgs']]] = None,
__props__=None):
"""
Version: None. Resource Type definition for AWS::DynamoDB::GlobalTable
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: GlobalTableArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Version: None. Resource Type definition for AWS::DynamoDB::GlobalTable
:param str resource_name: The name of the resource.
:param GlobalTableArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(GlobalTableArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
attribute_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GlobalTableAttributeDefinitionArgs']]]]] = None,
billing_mode: Optional[pulumi.Input[str]] = None,
global_secondary_indexes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GlobalTableGlobalSecondaryIndexArgs']]]]] = None,
key_schema: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GlobalTableKeySchemaArgs']]]]] = None,
local_secondary_indexes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GlobalTableLocalSecondaryIndexArgs']]]]] = None,
replicas: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GlobalTableReplicaSpecificationArgs']]]]] = None,
s_se_specification: Optional[pulumi.Input[pulumi.InputType['GlobalTableSSESpecificationArgs']]] = None,
stream_specification: Optional[pulumi.Input[pulumi.InputType['GlobalTableStreamSpecificationArgs']]] = None,
table_name: Optional[pulumi.Input[str]] = None,
time_to_live_specification: Optional[pulumi.Input[pulumi.InputType['GlobalTableTimeToLiveSpecificationArgs']]] = None,
write_provisioned_throughput_settings: Optional[pulumi.Input[pulumi.InputType['GlobalTableWriteProvisionedThroughputSettingsArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = GlobalTableArgs.__new__(GlobalTableArgs)
if attribute_definitions is None and not opts.urn:
raise TypeError("Missing required property 'attribute_definitions'")
__props__.__dict__["attribute_definitions"] = attribute_definitions
__props__.__dict__["billing_mode"] = billing_mode
__props__.__dict__["global_secondary_indexes"] = global_secondary_indexes
if key_schema is None and not opts.urn:
raise TypeError("Missing required property 'key_schema'")
__props__.__dict__["key_schema"] = key_schema
__props__.__dict__["local_secondary_indexes"] = local_secondary_indexes
if replicas is None and not opts.urn:
raise TypeError("Missing required property 'replicas'")
__props__.__dict__["replicas"] = replicas
__props__.__dict__["s_se_specification"] = s_se_specification
__props__.__dict__["stream_specification"] = stream_specification
__props__.__dict__["table_name"] = table_name
__props__.__dict__["time_to_live_specification"] = time_to_live_specification
__props__.__dict__["write_provisioned_throughput_settings"] = write_provisioned_throughput_settings
__props__.__dict__["arn"] = None
__props__.__dict__["stream_arn"] = None
__props__.__dict__["table_id"] = None
super(GlobalTable, __self__).__init__(
'aws-native:dynamodb:GlobalTable',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'GlobalTable':
"""
Get an existing GlobalTable resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = GlobalTableArgs.__new__(GlobalTableArgs)
__props__.__dict__["arn"] = None
__props__.__dict__["attribute_definitions"] = None
__props__.__dict__["billing_mode"] = None
__props__.__dict__["global_secondary_indexes"] = None
__props__.__dict__["key_schema"] = None
__props__.__dict__["local_secondary_indexes"] = None
__props__.__dict__["replicas"] = None
__props__.__dict__["s_se_specification"] = None
__props__.__dict__["stream_arn"] = None
__props__.__dict__["stream_specification"] = None
__props__.__dict__["table_id"] = None
__props__.__dict__["table_name"] = None
__props__.__dict__["time_to_live_specification"] = None
__props__.__dict__["write_provisioned_throughput_settings"] = None
return GlobalTable(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="attributeDefinitions")
def attribute_definitions(self) -> pulumi.Output[Sequence['outputs.GlobalTableAttributeDefinition']]:
return pulumi.get(self, "attribute_definitions")
@property
@pulumi.getter(name="billingMode")
def billing_mode(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "billing_mode")
@property
@pulumi.getter(name="globalSecondaryIndexes")
def global_secondary_indexes(self) -> pulumi.Output[Optional[Sequence['outputs.GlobalTableGlobalSecondaryIndex']]]:
return pulumi.get(self, "global_secondary_indexes")
@property
@pulumi.getter(name="keySchema")
def key_schema(self) -> pulumi.Output[Sequence['outputs.GlobalTableKeySchema']]:
return pulumi.get(self, "key_schema")
@property
@pulumi.getter(name="localSecondaryIndexes")
def local_secondary_indexes(self) -> pulumi.Output[Optional[Sequence['outputs.GlobalTableLocalSecondaryIndex']]]:
return pulumi.get(self, "local_secondary_indexes")
@property
@pulumi.getter
def replicas(self) -> pulumi.Output[Sequence['outputs.GlobalTableReplicaSpecification']]:
return pulumi.get(self, "replicas")
@property
@pulumi.getter(name="sSESpecification")
def s_se_specification(self) -> pulumi.Output[Optional['outputs.GlobalTableSSESpecification']]:
return pulumi.get(self, "s_se_specification")
@property
@pulumi.getter(name="streamArn")
def stream_arn(self) -> pulumi.Output[str]:
return pulumi.get(self, "stream_arn")
@property
@pulumi.getter(name="streamSpecification")
def stream_specification(self) -> pulumi.Output[Optional['outputs.GlobalTableStreamSpecification']]:
return pulumi.get(self, "stream_specification")
@property
@pulumi.getter(name="tableId")
def table_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "table_id")
@property
@pulumi.getter(name="tableName")
def table_name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "table_name")
@property
@pulumi.getter(name="timeToLiveSpecification")
def time_to_live_specification(self) -> pulumi.Output[Optional['outputs.GlobalTableTimeToLiveSpecification']]:
return pulumi.get(self, "time_to_live_specification")
@property
@pulumi.getter(name="writeProvisionedThroughputSettings")
def write_provisioned_throughput_settings(self) -> pulumi.Output[Optional['outputs.GlobalTableWriteProvisionedThroughputSettings']]:
return pulumi.get(self, "write_provisioned_throughput_settings")
| 1.507813 | 2 |
Testing/gg.py | DEADSEC-SECURITY/CODEX | 30 | 12761313 | <filename>Testing/gg.py
x = "'/home/deadsec/Desktop/CODEX/Data/HandShakes/PWF1717189-01.cap'"
x = x[1:-1]
print(x)
| 1.421875 | 1 |
nicos_mlz/frm2/commands/imaging.py | ebadkamil/nicos | 12 | 12761314 | <gh_stars>10-100
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# <NAME> <<EMAIL>>
#
# *****************************************************************************
from os.path import relpath
from nicos import session
from nicos.commands import helparglist, usercommand
from nicos.commands.device import maw
from nicos.commands.imaging import grtomo, tomo
from nicos.commands.measure import count
from nicos.devices.datasinks.image import ImageSink
__all__ = ['tomo', 'openbeamimage', 'darkimage', 'grtomo']
def changeImgSinkSubdir(newsubdir):
for entry in session.datasinks:
if isinstance(entry, ImageSink):
entry._setROParam('subdir', newsubdir)
@usercommand
@helparglist('shutter, [images], [detectors], [presets]')
# pylint: disable=keyword-arg-before-vararg
def openbeamimage(shutter=None, nimages=1, *detlist, **preset):
"""Acquire one or more open beam images."""
if isinstance(shutter, int):
nimages, shutter = shutter, None
exp = session.experiment
det = exp.detectors[0] if exp.detectors else None
limadev = det._attached_images[0] if det and det._attached_images else None
# TODO: better ideas for shutter control
if shutter:
# Shutter was given, so open it
maw(shutter, 'open')
elif limadev and getattr(limadev, '_shutter', None):
# No shutter; try the lima way
oldmode = limadev.shuttermode
limadev.shuttermode = 'auto'
try:
if hasattr(exp, 'curimgtype'):
exp.curimgtype = 'openbeam'
changeImgSinkSubdir(relpath(exp.openbeamdir, exp.datapath))
return [count(*detlist, **preset) for _ in range(nimages)]
finally:
changeImgSinkSubdir('')
if hasattr(exp, 'curimgtype'):
exp.curimgtype = 'standard'
if shutter:
maw(shutter, 'closed')
elif limadev and getattr(limadev, '_shutter', None):
limadev.shuttermode = oldmode
@usercommand
@helparglist('shutter, [nimages], [detectors], [presets]')
# pylint: disable=keyword-arg-before-vararg
def darkimage(shutter=None, nimages=1, *detlist, **preset):
"""Acquire one or more dark images."""
if isinstance(shutter, int):
nimages, shutter = shutter, None
exp = session.experiment
det = exp.detectors[0] if exp.detectors else None
limadev = det._attached_images[0] if det and det._attached_images else None
# TODO: better ideas for shutter control
if shutter:
# Shutter was given, so open it
maw(shutter, 'closed')
elif limadev and getattr(limadev, '_shutter', None):
# No shutter; try the lima way
oldmode = limadev.shuttermode
limadev.shuttermode = 'always_closed'
try:
if hasattr(exp, 'curimgtype'):
exp.curimgtype = 'dark'
changeImgSinkSubdir(relpath(exp.darkimagedir, exp.datapath))
return [count(*detlist, **preset) for _ in range(nimages)]
finally:
changeImgSinkSubdir('')
if hasattr(exp, 'curimgtype'):
exp.curimgtype = 'standard'
if shutter:
maw(shutter, 'open')
elif limadev and getattr(limadev, '_shutter', None):
limadev.shuttermode = oldmode
| 1.9375 | 2 |
timeweb/timewebapp/migrations/0042_timewebmodel_tags.py | snapsnap123/TimeWeb | 1 | 12761315 | # Generated by Django 3.2.4 on 2021-06-18 04:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('timewebapp', '0041_alter_settingsmodel_background_image'),
]
operations = [
migrations.AddField(
model_name='timewebmodel',
name='tags',
field=models.JSONField(blank=True, null=True),
),
]
| 1.554688 | 2 |
hardly/handlers/distgit.py | jpopelka/hardly | 0 | 12761316 | <filename>hardly/handlers/distgit.py
# Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
import re
from logging import getLogger
from os import getenv
from re import fullmatch
from typing import Optional
from hardly.handlers.abstract import TaskName
from ogr.abstract import GitProject, PullRequest
from packit.api import PackitAPI
from packit.config.job_config import JobConfig
from packit.config.package_config import PackageConfig
from packit.local_project import LocalProject
from packit_service.worker.events import MergeRequestGitlabEvent, PipelineGitlabEvent
from packit_service.worker.handlers import JobHandler
from packit_service.worker.handlers.abstract import (
reacts_to,
)
from packit_service.worker.reporting import StatusReporter, BaseCommitStatus
from packit_service.worker.result import TaskResults
logger = getLogger(__name__)
# @configured_as(job_type=JobType.dist_git_pr) # Requires a change in packit
@reacts_to(event=MergeRequestGitlabEvent)
class DistGitMRHandler(JobHandler):
task_name = TaskName.dist_git_pr
def __init__(
self,
package_config: PackageConfig,
job_config: JobConfig,
event: dict,
):
super().__init__(
package_config=package_config,
job_config=job_config,
event=event,
)
self.mr_identifier = event.get("identifier")
self.mr_title = event.get("title")
self.mr_description = event.get("description")
self.mr_url = event.get("url")
self.source_project_url = event.get("source_project_url")
self.target_repo = (
event.get("target_repo_namespace") + "/" + event.get("target_repo_name")
)
self.target_repo_branch = event.get("target_repo_branch")
def run(self) -> TaskResults:
"""
If user creates a merge-request on the source-git repository,
create a matching merge-request to the dist-git repository.
"""
if not self.handle_target():
logger.debug(
"Not creating a dist-git MR from "
f"{self.target_repo}:{self.target_repo_branch}"
)
return TaskResults(success=True, details={})
if not self.package_config:
logger.debug("No package config found.")
return TaskResults(success=True, details={})
logger.debug(f"About to create a dist-git MR from source-git MR {self.mr_url}")
source_project = self.service_config.get_project(url=self.source_project_url)
self.local_project = LocalProject(
git_project=source_project,
ref=self.data.commit_sha,
working_dir=self.service_config.command_handler_work_dir,
)
self.api = PackitAPI(
config=self.service_config,
package_config=self.package_config,
upstream_local_project=self.local_project,
)
dg_mr_info = f"""###### Info for package maintainer
This MR has been automatically created from
[this source-git MR]({self.mr_url}).
Please review the contribution and once you are comfortable with the content,
you should trigger a CI pipeline run via `Pipelines → Run pipeline`."""
dg_mr = self.api.sync_release(
version=self.api.up.get_specfile_version(),
add_new_sources=False,
title=self.mr_title,
description=f"{self.mr_description}\n\n---\n{dg_mr_info}",
sync_default_files=False,
# we rely on this in PipelineHandler below
local_pr_branch_suffix=f"src-{self.mr_identifier}",
)
if dg_mr:
comment = f"""[Dist-git MR #{dg_mr.id}]({dg_mr.url})
has been created for sake of triggering the downstream checks.
It ensures that your contribution is valid and can be incorporated in CentOS Stream
as dist-git is still the authoritative source for the distribution.
We want to run checks there only so they don't need to be reimplemented in source-git as well."""
self.project.get_pr(int(self.mr_identifier)).comment(comment)
return TaskResults(success=True)
def handle_target(self) -> bool:
"""Tell if a target repo and branch pair of an MR should be handled or ignored."""
handled_targets = self.service_config.gitlab_mr_targets_handled
# If nothing is configured, all targets are handled.
if not handled_targets:
return True
for target in handled_targets:
if re.fullmatch(target.repo or ".+", self.target_repo) and re.fullmatch(
target.branch or ".+", self.target_repo_branch
):
return True
return False
@reacts_to(event=PipelineGitlabEvent)
class PipelineHandler(JobHandler):
task_name = TaskName.pipeline
def __init__(
self,
package_config: PackageConfig,
job_config: JobConfig,
event: dict,
):
super().__init__(
package_config=package_config,
job_config=job_config,
event=event,
)
# It would ideally be taken from package_config.dist_git_namespace
# instead of from getenv, but package_config is None because there's no config file.
self.src_git_namespace: str = getenv("DISTGIT_NAMESPACE").replace(
"/rpms", "/src"
)
# project name is expected to be the same in dist-git and src-git
self.src_git_name: str = event["project_name"]
# branch name
self.git_ref: str = event["git_ref"]
self.status: str = event["status"]
self.detailed_status: str = event["detailed_status"]
self.pipeline_url: str = (
f"{event['project_url']}/-/pipelines/{event['pipeline_id']}"
)
# lazy
self._src_git_project: Optional[GitProject] = None
self._src_git_mr_id: Optional[int] = None
self._src_git_mr: Optional[PullRequest] = None
self._status_reporter: Optional[StatusReporter] = None
@property
def src_git_mr_id(self) -> Optional[int]:
"""
https://docs.gitlab.com/ee/user/project/integrations/webhooks.html#pipeline-events
suggests, there's a merge_request field containing relation to the (dist-git) MR.
Sadly, it's not always true, as in our staging repos,
in which case the merge_request info is empty.
Luckily, we've stored the src-git MR in the branch name (self.git_ref)
from which the dist-git MR is created.
See how we set local_pr_branch_suffix in DistGitMRHandler.run()
:return: src-git MR number or None if self.git_ref doesn't contain it
"""
if not self._src_git_mr_id:
# git_ref is expected in a form {version}-{dist_git_branch}-src-{mr_number}
m = fullmatch(r".+-.+-src-(\d+)", self.git_ref)
self._src_git_mr_id = int(m[1]) if m else None
return self._src_git_mr_id
@property
def src_git_project(self) -> GitProject:
if not self._src_git_project:
self._src_git_project = self.project.service.get_project(
namespace=self.src_git_namespace,
repo=self.src_git_name,
)
return self._src_git_project
@property
def src_git_mr(self) -> PullRequest:
if not self._src_git_mr:
self._src_git_mr = self.src_git_project.get_pr(self.src_git_mr_id)
return self._src_git_mr
@property
def status_reporter(self) -> StatusReporter:
if not self._status_reporter:
self._status_reporter = StatusReporter.get_instance(
project=self.src_git_project,
# The head_commit is latest commit of the MR.
# If there was a new commit pushed before the pipeline ended, the report
# might be incorrect until the new (for the new commit) pipeline finishes.
commit_sha=self.src_git_mr.head_commit,
pr_id=self.src_git_mr_id,
)
return self._status_reporter
def run(self) -> TaskResults:
"""
When a dist-git MR CI Pipeline changes status, create a commit
status in the original src-git MR with a link to the Pipeline.
"""
if not self.src_git_mr_id:
logger.debug("Not a source-git related pipeline")
return TaskResults(success=True, details={})
pipeline_status_to_base_commit_status = {
"success": BaseCommitStatus.success,
"failed": BaseCommitStatus.failure,
"pending": BaseCommitStatus.pending,
"running": BaseCommitStatus.running,
}
# Our account(s) have no access (unless it's manually added) into the fork repos,
# to set the commit status (which would look like a Pipeline result)
# so the status reporter fallbacks to adding a commit comment.
# To not pollute MRs with too many comments, we might later skip
# the 'Pipeline is pending/running' events.
self.status_reporter.set_status(
state=pipeline_status_to_base_commit_status[self.status],
description=f"Changed status to {self.detailed_status}.",
check_name="Dist-git MR CI Pipeline",
url=self.pipeline_url,
)
return TaskResults(success=True, details={})
| 1.828125 | 2 |
etl_server/blueprint.py | datopian/ckan-ng-harvest-app | 1 | 12761317 | from flask import Blueprint, request
from .controllers import Controllers
def make_blueprint(db_connection_string=None, configuration={}): # noqa
"""Create blueprint.
"""
controllers = Controllers(configuration=configuration,
connection_string=db_connection_string)
# Create instance
blueprint = Blueprint('etl_server', 'etl_server')
def query_pipelines_():
return controllers.query_pipelines()
def configuration_():
return controllers.configuration()
def edit_pipeline_():
if request.method == 'POST':
body = request.json
id = body.get('id')
return controllers.create_or_edit_pipeline(id, body)
else:
return {}
def query_pipeline_(id):
return controllers.query_pipeline(id)
def delete_pipeline_(id):
return controllers.delete_pipeline(id)
def start_pipeline_(id):
return controllers.start_pipeline(id)
# Register routes
blueprint.add_url_rule(
'pipelines', 'query_pipelines', query_pipelines_, methods=['GET'])
blueprint.add_url_rule(
'pipeline', 'edit_pipeline', edit_pipeline_, methods=['POST'])
blueprint.add_url_rule(
'pipeline/<id>', 'query_pipeline', query_pipeline_, methods=['GET'])
blueprint.add_url_rule(
'pipeline/start/<id>', 'start_pipeline', start_pipeline_, methods=['POST'])
blueprint.add_url_rule(
'pipeline/<id>', 'delete_pipeline', delete_pipeline_, methods=['DELETE'])
blueprint.add_url_rule(
'configuration', 'configuration', configuration_, methods=['GET'])
# Return blueprint
return blueprint
| 2.609375 | 3 |
Limpieza de datos/transformdata_computrabajo.py | richardriverag/Analysisemployability | 0 | 12761318 | <gh_stars>0
import datetime
import argparse
import json
def main(args):
data={}
data['empleos'] = []
with open('data_computrabajo.json',encoding='utf8') as file:
informacion = json.load(file)
c=0
anio='2020'
mes=''
dia=''
for e in informacion['empleos']:
fecha=e['publicado']
fechadividida=fecha.split()
for i in range(0,2):
try:
int(fechadividida[i])
dia=fechadividida[i]
except:
m=fechadividida[i]
if m=='enero':
mes='01'
elif m=='febrero':
mes='02'
elif m=='marzo':
mes='03'
else:
mes='03'
dia='27'
data['empleos'].append({
'date_collected': e['date_collected'],
'ciudad': e['ciudad'],
'publicado': anio+'-'+mes+'-'+dia,
'cargo': e['cargo'],
'jornada': e['jornada'],
'contrato': e['contrato'],
'salario': e['salario'],
'descripcion': e['descripcion'],
'empresa': e['empresa']
})
with open('data_computrabajo_clean.json', 'w') as file:
json.dump(data, file, indent=4)
if __name__=='__main__':
argparser = argparse.ArgumentParser(prog='Crawler template',
description='''Collects info from jobs portals.''')
argparser.add_argument('-f',
help='file with URLS')
args = argparser.parse_args()
main(args)
| 3.0625 | 3 |
interview_challenges/barclays_codility_test/question_1.py | noelevans/playground | 1 | 12761319 | <reponame>noelevans/playground
import itertools
def solution(N):
# write your code in Python 2.7
nums = [int(v) for v in str(N)]
return len(set(itertools.permutations(nums)))
def main():
print solution(1213)
print solution(0)
print solution(99999)
if __name__ == '__main__':
main()
| 3.453125 | 3 |
rulelist/mdl/mdl_base_codes.py | HMProenca/RuleList | 8 | 12761320 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 8 16:34:06 2019
@author: gathu
"""
from functools import lru_cache
from math import log, ceil, sqrt, log2
from scipy.special import comb,perm, gammaln
from rulelist.util.extra_maths import log2_0
@lru_cache(maxsize=20000,typed=True)
def log_multinomial(cardinality, n):
return log2_0(multinomial_with_recurrence(cardinality, n))
def multinomial_with_recurrence(cardinality, n):
""" Computes the Normalized Maximum Likelihood (NML) code length
cardinality - number of categories for a single-nominal or multinomial distribution
n - number of points / samples
complexity - COMP(cardinality,n) - the complexity (without logarithm)
"""
complexity = 1.0
b = 1.0
d = 10 # seven digit precision
if cardinality == 1:
complexity = 1.0
elif n == 0:
complexity = 0
else:
bound = int(ceil(2 + sqrt(2 * n * d * log(10)))) # using equation (38)
for k in range(1, bound + 1):
b = (n - k + 1) / n * b
complexity += b
old_sum = 1.0
for j in range(3, cardinality + 1):
new_sum = complexity + (n * old_sum) / (j - 2)
old_sum = complexity
complexity = new_sum
return complexity
@lru_cache(maxsize=20000,typed=True)
def universal_code_integers(value: int) -> float:
""" computes the universal code of integers
"""
const = 2.865064
logsum = log2(const)
cond = True # condition
if value == 0:
logsum = 0
elif value > 0:
while cond: # Recursive log
value = log2(value)
cond = value > 0.000001
if value < 0.000001:
break
logsum += value
elif value < 0:
raise ValueError('n should be larger than 0. The value was: {}'.format(value))
return logsum
@lru_cache(maxsize=20000,typed=True)
def log2_gamma_half(n: int):
le2 = 0.6931471805599453 # log(2)
return gammaln(n / 2) / le2 if n > 0 else 0
def universal_code_integers_maximum(n: int, maximum : int) -> float:
""" computes the universal code of integers when there is a known maximum integer
This is equivalent to applying the maximum entropy principle knowing the maximum,
and it equalitarian division of the non-used probability (the ones after the maximum)
by all the used number (1 until maximum).
"""
probability_until_max = sum([2**-universal_code_integers(n_aux) for n_aux in range(1,maximum+1)])
probability_left = 1 - probability_until_max
probability_n = 2**-universal_code_integers(n)+ probability_left/maximum
logsum = -log2(probability_n)
return logsum
def uniform_code(n: int) -> float:
return log2(n) if n != 0 else 0
def uniform_combination_code(n: int, maximum: int) -> float:
""" Code based on n-combination of maximum.
This code is used when order of the elements does not matter.
:param n:
:param maximum:
:return:
"""
return log2(comb(maximum, n))
def uniform_permutation_code(n: int, maximum: int) -> float:
""" Code based on n-permutations of maximum.
This code is used when order of the elements matters.
:param n:
:param maximum:
:return:
"""
return log2(perm(maximum, n)) | 2.328125 | 2 |
backend/org/models.py | arturfelipe/condobus | 0 | 12761321 | <reponame>arturfelipe/condobus
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Organization(models.Model):
name = models.CharField(_('Name'), max_length=200)
logo = models.ImageField(null=True, blank=True)
description = models.TextField(_('Description'), null=True, blank=True)
rules = models.TextField(_('Rules'), null=True, blank=True)
class Meta:
verbose_name = _('Organization')
verbose_name_plural = _('Organizations')
def __str__(self):
return self.name
| 2.140625 | 2 |
examples/campaigns.py | AbdulMoeed-140212/mailwizz-python-sdk | 0 | 12761322 | <reponame>AbdulMoeed-140212/mailwizz-python-sdk
from datetime import datetime, timedelta
from setup_api import setup
from mailwizz.endpoint.campaigns import Campaigns
"""
SETUP THE API
"""
setup()
"""
CREATE THE ENDPOINT
"""
endpoint = Campaigns()
"""
GET ALL ITEMS
"""
response = endpoint.get_campaigns(page=1, per_page=10)
"""
DISPLAY RESPONSE
"""
print(response.content)
"""
GET ONE ITEM
"""
response = endpoint.get_campaign('CAMPAIGN_UID')
"""
DISPLAY RESPONSE
"""
print(response.content)
"""
CREATE ONE CAMPAIGN
"""
response = endpoint.create({
'name': 'My API Campaign', # required
'type': 'regular', # optional: regular or autoresponder
'from_name': '<NAME>', # required
'from_email': '<EMAIL>', # required
'subject': 'Hey, i am testing the campaigns via API', # required
'reply_to': '<EMAIL>', # required
'send_at': (datetime.now() + timedelta(hours=10)).strftime('%Y-%m-%d %H:%M:%S'),
# required, this will use the timezone which customer selected
'list_uid': 'LIST_UID', # required
# 'segment_uid' : 'SEGMENT-UNIQUE-ID',# optional, only to narrow down
# optional block, defaults are shown
'options': {
'url_tracking': 'no', # yes | no
'json_feed': 'no', # yes | no
'xml_feed': 'no', # yes | no
'plain_text_email': 'yes', # yes | no
'email_stats': None, # a valid email address where we should send the stats after campaign done
# - if autoresponder uncomment bellow:
# 'autoresponder_event' : 'AFTER-SUBSCRIBE', # AFTER-SUBSCRIBE or AFTER-CAMPAIGN-OPEN
# 'autoresponder_time_unit' : 'hour', # minute, hour, day, week, month, year
# 'autoresponder_time_value' : 1, # 1 hour after event
# 'autoresponder_open_campaign_id' : 1, # INT id of campaign, only if event is AFTER-CAMPAIGN-OPEN,
# - if this campaign is advanced recurring, you can set a cron job style frequency.
# - please note that this applies only for regular campaigns.
# 'cronjob' : '0 0 * * *', # once a day
# 'cronjob_enabled' : 1, # 1 or 0
},
# required block, archive or template_uid or content : required.
# the templates examples can be found here: Examples
'template': {
# 'archive' : open('template-example.zip', 'r').read(),
'template_uid': 'TEMPLATE_UID',
# 'content' : open('template-example.html', 'rb').read(),
'inline_css': 'no', # yes | no
# 'plain_text' : None, # leave empty to auto generate
'auto_plain_text': 'yes', # yes | no
},
})
"""
DISPLAY RESPONSE
"""
print(response.content)
"""
UPDATE ONE CAMPAIGN
"""
response = endpoint.update('CAMPAIGN_UID', {
'name': 'My API Campaign - UPDATED', # required
'from_name': '<NAME>', # required
'from_email': '<EMAIL>', # required
'subject': 'Hey, i am testing the campaigns via API', # required
'reply_to': '<EMAIL>', # required
'send_at': (datetime.now() + timedelta(hours=10)).strftime('%Y-%m-%d %H:%M:%S'),
# required, this will use the timezone which customer selected
'list_uid': 'LIST_UID', # required
# 'segment_uid' : 'SEGMENT-UNIQUE-ID',# optional, only to narrow down
# optional block, defaults are shown
'options': {
'url_tracking': 'no', # yes | no
'json_feed': 'no', # yes | no
'xml_feed': 'no', # yes | no
'plain_text_email': 'yes', # yes | no
'email_stats': None, # a valid email address where we should send the stats after campaign done
# - if autoresponder uncomment bellow:
# 'autoresponder_event' : 'AFTER-SUBSCRIBE', # AFTER-SUBSCRIBE or AFTER-CAMPAIGN-OPEN
# 'autoresponder_time_unit' : 'hour', # minute, hour, day, week, month, year
# 'autoresponder_time_value' : 1, # 1 hour after event
# 'autoresponder_open_campaign_id' : 1, # INT id of campaign, only if event is AFTER-CAMPAIGN-OPEN,
# - if this campaign is advanced recurring, you can set a cron job style frequency.
# - please note that this applies only for regular campaigns.
# 'cronjob' : '0 0 * * *', # once a day
# 'cronjob_enabled' : 1, # 1 or 0
},
# required block, archive or template_uid or content : required.
# the templates examples can be found here: Examples
'template': {
# 'archive' : open('template-example.zip', 'r').read(),
'template_uid': 'TEMPLATE_UID',
# 'content' : open('template-example.html', 'rb').read(),
'inline_css': 'no', # yes | no
# 'plain_text' : None, # leave empty to auto generate
'auto_plain_text': 'yes', # yes | no
},
})
"""
DISPLAY RESPONSE
"""
print(response.content)
"""
COPY ONE CAMPAIGN
"""
response = endpoint.copy('CAMPAIGN_UID')
"""
DISPLAY RESPONSE
"""
print(response.content)
"""
MARK ONE CAMPAIGN AS SENT
"""
response = endpoint.mark_sent('CAMPAIGN_UID')
"""
DISPLAY RESPONSE
"""
print(response.content)
"""
PAUSE/UNPAUSE ONE CAMPAIGN
"""
response = endpoint.pause_unpause('CAMPAIGN_UID')
"""
DISPLAY RESPONSE
"""
print(response.content)
"""
DELETE ONE CAMPAIGN
"""
response = endpoint.delete('CAMPAIGN_UID')
"""
DISPLAY RESPONSE
"""
print(response.content)
| 2.75 | 3 |
src/ralph/assets/models/assets.py | pinoatrome/ralph | 1,668 | 12761323 | <filename>src/ralph/assets/models/assets.py
# -*- coding: utf-8 -*-
import datetime
import logging
from dateutil.relativedelta import relativedelta
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import MinValueValidator
from django.db import models
from django.utils.translation import ugettext_lazy as _
from mptt.models import MPTTModel, TreeForeignKey
from ralph.accounts.models import Team
from ralph.admin.autocomplete import AutocompleteTooltipMixin
from ralph.assets.models.base import BaseObject
from ralph.assets.models.choices import (
ModelVisualizationLayout,
ObjectModelType
)
from ralph.lib.custom_fields.models import (
CustomFieldMeta,
WithCustomFieldsMixin
)
from ralph.lib.mixins.fields import NullableCharField
from ralph.lib.mixins.models import (
AdminAbsoluteUrlMixin,
NamedMixin,
PriceMixin,
TimeStampMixin
)
from ralph.lib.permissions import PermByFieldMixin
from ralph.lib.permissions.models import PermissionsBase
logger = logging.getLogger(__name__)
class AssetHolder(
AdminAbsoluteUrlMixin,
NamedMixin.NonUnique,
TimeStampMixin,
models.Model
):
pass
class BusinessSegment(AdminAbsoluteUrlMixin, NamedMixin, models.Model):
pass
class ProfitCenter(AdminAbsoluteUrlMixin, NamedMixin, models.Model):
description = models.TextField(blank=True)
class Environment(
AdminAbsoluteUrlMixin, NamedMixin, TimeStampMixin, models.Model
):
pass
class Service(
PermByFieldMixin,
AdminAbsoluteUrlMixin,
NamedMixin,
TimeStampMixin,
models.Model
):
# Fixme: let's do service catalog replacement from that
_allow_in_dashboard = True
active = models.BooleanField(default=True)
uid = NullableCharField(max_length=40, unique=True, blank=True, null=True)
profit_center = models.ForeignKey(ProfitCenter, null=True, blank=True)
business_segment = models.ForeignKey(BusinessSegment, null=True, blank=True)
cost_center = models.CharField(max_length=100, blank=True)
environments = models.ManyToManyField(
'Environment', through='ServiceEnvironment'
)
business_owners = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='services_business_owner',
blank=True,
)
technical_owners = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='services_technical_owner',
blank=True,
)
support_team = models.ForeignKey(
Team, null=True, blank=True, related_name='services',
)
def __str__(self):
return '{}'.format(self.name)
@classmethod
def get_autocomplete_queryset(cls):
return cls._default_manager.filter(active=True)
class ServiceEnvironment(
AdminAbsoluteUrlMixin,
AutocompleteTooltipMixin,
BaseObject
):
_allow_in_dashboard = True
service = models.ForeignKey(Service)
environment = models.ForeignKey(Environment)
autocomplete_tooltip_fields = [
'service__business_owners',
'service__technical_owners',
'service__support_team',
]
def __str__(self):
return '{} - {}'.format(self.service.name, self.environment.name)
class Meta:
unique_together = ('service', 'environment')
ordering = ('service__name', 'environment__name')
@property
def service_name(self):
return self.service.name
@property
def service_uid(self):
return self.service.uid
@property
def environment_name(self):
return self.environment.name
@classmethod
def get_autocomplete_queryset(cls):
return cls._default_manager.filter(service__active=True)
class ManufacturerKind(AdminAbsoluteUrlMixin, NamedMixin, models.Model):
pass
class Manufacturer(
AdminAbsoluteUrlMixin,
NamedMixin,
TimeStampMixin,
models.Model
):
_allow_in_dashboard = True
manufacturer_kind = models.ForeignKey(
ManufacturerKind, verbose_name=_('manufacturer kind'),
null=True,
blank=True,
on_delete=models.SET_NULL,
)
AssetModelMeta = type('AssetModelMeta', (CustomFieldMeta, PermissionsBase), {})
class AssetModel(
PermByFieldMixin,
NamedMixin.NonUnique,
TimeStampMixin,
AdminAbsoluteUrlMixin,
WithCustomFieldsMixin,
models.Model,
metaclass=AssetModelMeta
):
# TODO: should type be determined based on category?
_allow_in_dashboard = True
type = models.PositiveIntegerField(
verbose_name=_('type'), choices=ObjectModelType(),
)
manufacturer = models.ForeignKey(
Manufacturer, on_delete=models.PROTECT, blank=True, null=True
)
category = TreeForeignKey(
'Category', null=True, related_name='models'
)
power_consumption = models.PositiveIntegerField(
verbose_name=_("Power consumption"),
default=0,
)
height_of_device = models.FloatField(
verbose_name=_("Height of device"),
default=0,
validators=[MinValueValidator(0)],
)
cores_count = models.PositiveIntegerField(
verbose_name=_("Cores count"),
default=0,
)
visualization_layout_front = models.PositiveIntegerField(
verbose_name=_("visualization layout of front side"),
choices=ModelVisualizationLayout(),
default=ModelVisualizationLayout().na.id,
blank=True,
)
visualization_layout_back = models.PositiveIntegerField(
verbose_name=_("visualization layout of back side"),
choices=ModelVisualizationLayout(),
default=ModelVisualizationLayout().na.id,
blank=True,
)
# Used in the visualization Data Center as is_blade
has_parent = models.BooleanField(default=False)
class Meta:
verbose_name = _('model')
verbose_name_plural = _('models')
def __str__(self):
if self.category_id:
return '[{}] {} {}'.format(
self.category, self.manufacturer, self.name
)
else:
return '{} {}'.format(
self.manufacturer, self.name
)
def _get_layout_class(self, field):
item = ModelVisualizationLayout.from_id(field)
return getattr(item, 'css_class', '')
def get_front_layout_class(self):
return self._get_layout_class(self.visualization_layout_front)
def get_back_layout_class(self):
return self._get_layout_class(self.visualization_layout_back)
class Category(
AdminAbsoluteUrlMixin,
MPTTModel,
NamedMixin.NonUnique,
TimeStampMixin,
models.Model
):
_allow_in_dashboard = True
code = models.CharField(max_length=4, blank=True, default='')
parent = TreeForeignKey(
'self',
null=True,
blank=True,
related_name='children',
db_index=True
)
imei_required = models.BooleanField(default=False)
allow_deployment = models.BooleanField(default=False)
show_buyout_date = models.BooleanField(default=False)
default_depreciation_rate = models.DecimalField(
blank=True,
decimal_places=2,
default=settings.DEFAULT_DEPRECIATION_RATE,
help_text=_(
'This value is in percentage.'
' For example value: "100" means it depreciates during a year.'
' Value: "25" means it depreciates during 4 years, and so on... .'
),
max_digits=5,
)
class Meta:
verbose_name = _('category')
verbose_name_plural = _('categories')
class MPTTMeta:
order_insertion_by = ['name']
def __str__(self):
return self.name
def get_default_depreciation_rate(self, category=None):
if category is None:
category = self
if category.default_depreciation_rate:
return category.default_depreciation_rate
elif category.parent:
return self.get_default_depreciation_rate(category.parent)
return 0
class AssetLastHostname(models.Model):
prefix = models.CharField(max_length=30, db_index=True)
counter = models.PositiveIntegerField(default=1)
postfix = models.CharField(max_length=30, db_index=True)
class Meta:
unique_together = ('prefix', 'postfix')
def formatted_hostname(self, fill=5):
return '{prefix}{counter:0{fill}}{postfix}'.format(
prefix=self.prefix,
counter=int(self.counter),
fill=fill,
postfix=self.postfix,
)
@classmethod
# TODO: select_for_update
def increment_hostname(cls, prefix, postfix=''):
obj, created = cls.objects.get_or_create(
prefix=prefix,
postfix=postfix,
)
if not created:
# F() avoid race condition problem
obj.counter = models.F('counter') + 1
obj.save()
return cls.objects.get(pk=obj.pk)
else:
return obj
@classmethod
def get_next_free_hostname(
cls, prefix, postfix, fill=5, availability_checker=None, _counter=1
):
try:
last_hostname = cls.objects.get(prefix=prefix, postfix=postfix)
except cls.DoesNotExist:
last_hostname = cls(prefix=prefix, postfix=postfix, counter=0)
last_hostname.counter += _counter
hostname = last_hostname.formatted_hostname(fill=fill)
if availability_checker is None or availability_checker(hostname):
return hostname
else:
return cls.get_next_free_hostname(
prefix, postfix, fill, availability_checker, _counter + 1
)
def __str__(self):
return self.formatted_hostname()
class BudgetInfo(
AdminAbsoluteUrlMixin,
NamedMixin,
TimeStampMixin,
models.Model
):
class Meta:
verbose_name = _('Budget info')
verbose_name_plural = _('Budgets info')
def __str__(self):
return self.name
class Asset(AdminAbsoluteUrlMixin, PriceMixin, BaseObject):
model = models.ForeignKey(
AssetModel, related_name='assets', on_delete=models.PROTECT
)
# TODO: unify hostname for DCA, VirtualServer, Cluster and CloudHost
# (use another model?)
hostname = NullableCharField(
blank=True,
default=None,
max_length=255,
null=True,
verbose_name=_('hostname'), # TODO: unique
)
sn = NullableCharField(
blank=True,
max_length=200,
null=True,
verbose_name=_('SN'),
unique=True,
)
barcode = NullableCharField(
blank=True,
default=None,
max_length=200,
null=True,
unique=True,
verbose_name=_('barcode')
)
niw = NullableCharField(
blank=True,
default=None,
max_length=200,
null=True,
verbose_name=_('inventory number'),
)
required_support = models.BooleanField(default=False)
order_no = models.CharField(
verbose_name=_('order number'),
blank=True,
max_length=50,
null=True,
)
invoice_no = models.CharField(
verbose_name=_('invoice number'),
blank=True,
db_index=True,
max_length=128,
null=True,
)
invoice_date = models.DateField(blank=True, null=True)
# to discuss: foreign key?
provider = models.CharField(
blank=True,
max_length=100,
null=True,
)
depreciation_rate = models.DecimalField(
blank=True,
decimal_places=2,
default=settings.DEFAULT_DEPRECIATION_RATE,
help_text=_(
'This value is in percentage.'
' For example value: "100" means it depreciates during a year.'
' Value: "25" means it depreciates during 4 years, and so on... .'
),
max_digits=5,
)
force_depreciation = models.BooleanField(
help_text=(
'Check if you no longer want to bill for this asset'
),
default=False,
)
depreciation_end_date = models.DateField(blank=True, null=True)
buyout_date = models.DateField(blank=True, null=True, db_index=True)
task_url = models.URLField(
blank=True,
help_text=('External workflow system URL'),
max_length=2048,
null=True,
)
budget_info = models.ForeignKey(
BudgetInfo,
blank=True,
default=None,
null=True,
on_delete=models.PROTECT,
)
property_of = models.ForeignKey(
AssetHolder,
on_delete=models.PROTECT,
null=True,
blank=True,
)
start_usage = models.DateField(
blank=True,
null=True,
help_text=(
'Fill it if date of first usage is different then date of creation'
)
)
def __str__(self):
return self.hostname or ''
def calculate_buyout_date(self):
"""
Get buyout date.
Calculate buyout date:
invoice_date + depreciation_rate months + custom buyout date delay
Returns:
Deprecation date
"""
if self.depreciation_end_date:
return self.depreciation_end_date
elif self.invoice_date:
months = self.get_depreciation_months() + 1 + \
settings.ASSET_BUYOUT_DELAY_MONTHS
return self.invoice_date + relativedelta(months=months)
else:
return None
def get_depreciation_months(self):
return int(
(1 / (self.depreciation_rate / 100) * 12)
if self.depreciation_rate else 0
)
def is_depreciated(self, date=None):
date = date or datetime.date.today()
if self.force_depreciation or not self.invoice_date:
return True
if self.depreciation_end_date:
deprecation_date = self.deprecation_end_date
else:
deprecation_date = self.invoice_date + relativedelta(
months=self.get_depreciation_months(),
)
return deprecation_date < date
def get_depreciated_months(self):
# DEPRECATED
# BACKWARD_COMPATIBILITY
return self.get_depreciation_months()
def is_deprecated(self, date=None):
# DEPRECATED
# BACKWARD_COMPATIBILITY
return self.is_depreciated()
def _liquidated_at(self, date):
liquidated_history = self.get_history().filter(
new_value='liquidated',
field_name='status',
).order_by('-date')[:1]
return liquidated_history and liquidated_history[0].date.date() <= date
def clean(self):
if not self.sn and not self.barcode:
error_message = [_('SN or BARCODE field is required')]
raise ValidationError(
{
'sn': error_message,
'barcode': error_message
}
)
def save(self, *args, **kwargs):
# if you save barcode as empty string (instead of None) you could have
# only one asset with empty barcode (because of `unique` constraint)
# if you save barcode as None you could have many assets with empty
# barcode (becasue `unique` constrainst is skipped)
for unique_field in ['barcode', 'sn']:
value = getattr(self, unique_field, None)
if value == '':
value = None
setattr(self, unique_field, value)
if not self.buyout_date:
self.buyout_date = self.calculate_buyout_date()
return super(Asset, self).save(*args, **kwargs)
| 1.796875 | 2 |
obeflix_back/serializer.py | luigiMinardi/alurachallenge-backend | 3 | 12761324 | <filename>obeflix_back/serializer.py
from rest_framework import serializers
from obeflix_back.models import Video, Categoria
class VideoSerializer(serializers.ModelSerializer):
class Meta:
model = Video
fields = '__all__'
class CategoriaSerializer(serializers.ModelSerializer):
class Meta:
model = Categoria
fields = '__all__'
class ListaVideoPorCategoriaSerializer(serializers.ModelSerializer):
class Meta:
model = Video
fields = '__all__' | 2.15625 | 2 |
ejercicios_resueltos/t10/t10ejer05.py | workready/pythonbasic | 0 | 12761325 | <filename>ejercicios_resueltos/t10/t10ejer05.py
import numpy as np
tablero = np.zeros((8, 8))
tablero[1::2, ::2] = 1
tablero[::2, 1::2] = 1
print(tablero) | 2.765625 | 3 |
app/protostory/portal/views.py | mizikanikyuuri/theStory | 0 | 12761326 | <filename>app/protostory/portal/views.py<gh_stars>0
from django.shortcuts import render
from django.shortcuts import get_object_or_404,render
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login ,logout
from django.http import JsonResponse, Http404
from django.views.decorators.csrf import ensure_csrf_cookie
import json
import logging
@ensure_csrf_cookie
def home (request):
return render(request,'portal/home.html')
def userLogin(request):
# logger.error('request.content_type')
if request.method != 'POST' :
return JsonResponse({
'login': 'fail',
'detail': 'format invalid'
})
user = authenticate(username=request.POST["username"],password= request.POST["password"])
#if user couldn't authenticated try to create user and reauthenicate it.
if user is None:
try:
user = User.objects.create_user(username=request.POST["username"],password=request.POST["password"])
except Exception as e:
return JsonResponse({
'login': 'fail',
'detail':print(e)
})
user = authenticate(username=request.POST["username"],password= request.POST["password"])
login(request, user)
return JsonResponse({'login': 'success'})
def userLogout(request):
logout(request)
return JsonResponse({'logout': 'success'}) | 2.09375 | 2 |
commanderbot/ext/automod/triggers/member_updated.py | CommanderBot-Dev/commanderbot-ext | 4 | 12761327 | from dataclasses import dataclass
from typing import Optional, Type, TypeVar
from commanderbot.ext.automod import events
from commanderbot.ext.automod.automod_event import AutomodEvent
from commanderbot.ext.automod.automod_trigger import (
AutomodTrigger,
AutomodTriggerBase,
)
from commanderbot.lib import JsonObject, RolesGuard
ST = TypeVar("ST")
@dataclass
class MemberUpdated(AutomodTriggerBase):
"""
Fires when an `on_typing` event is received.
This occurs when one or more of the following things change:
- status
- activity
- nickname
- roles
- pending
See: https://discordpy.readthedocs.io/en/stable/api.html?highlight=events#discord.on_typing
Attributes
----------
roles
The roles to match against. If empty, all roles will match.
"""
event_types = (events.MemberUpdated,)
roles: Optional[RolesGuard] = None
@classmethod
def from_data(cls: Type[ST], data: JsonObject) -> ST:
roles = RolesGuard.from_field_optional(data, "roles")
return cls(
description=data.get("description"),
roles=roles,
)
def ignore_by_role(self, event: AutomodEvent) -> bool:
if self.roles is None:
return False
return self.roles.ignore(event.member)
def ignore(self, event: AutomodEvent) -> bool:
return self.ignore_by_role(event)
def create_trigger(data: JsonObject) -> AutomodTrigger:
return MemberUpdated.from_data(data)
| 2.40625 | 2 |
pytglib/api/functions/get_secret_chat.py | iTeam-co/pytglib | 6 | 12761328 | <reponame>iTeam-co/pytglib
from ..utils import Object
class GetSecretChat(Object):
"""
Returns information about a secret chat by its identifier. This is an offline request
Attributes:
ID (:obj:`str`): ``GetSecretChat``
Args:
secret_chat_id (:obj:`int`):
Secret chat identifier
Returns:
SecretChat
Raises:
:class:`telegram.Error`
"""
ID = "getSecretChat"
def __init__(self, secret_chat_id, extra=None, **kwargs):
self.extra = extra
self.secret_chat_id = secret_chat_id # int
@staticmethod
def read(q: dict, *args) -> "GetSecretChat":
secret_chat_id = q.get('secret_chat_id')
return GetSecretChat(secret_chat_id)
| 2.515625 | 3 |
chapters/chapter6/text_generator.py | nazariinyzhnyk/Nikolenko_Deeplearning | 0 | 12761329 | import os
import numpy as np
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, LSTM, TimeDistributed, Activation, Reshape, concatenate, Input
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, CSVLogger, Callback
START_CHAR = '\b'
END_CHAR = '\t'
PADDING_CHAR = '\a'
chars = set([START_CHAR, '\n', END_CHAR])
input_frame = 'shakespeare_short.txt'
model_fname = 'model_keras'
output_fname = 'output.txt'
batchout_fname = 'batch_out.txt'
USE_SIMPLE_MODEL = False
with open(input_frame) as f:
for line in f:
chars.update(list(line.strip().lower()))
char_indicies = {c: i for i, c in enumerate(sorted(list(chars)))}
char_indicies[PADDING_CHAR] = 0
indicies_to_chars = {i: c for c, i in char_indicies.items()}
num_chars = len(chars)
print(num_chars)
def get_one(i, sz):
res = np.zeros(sz)
res[i] = 1
return res
char_vectors = {
c: (np.zeros(num_chars) if c == PADDING_CHAR else get_one(v, num_chars)) for c, v in char_indicies.items()
}
sentence_end_markers = set('.!?')
sentences = []
current_sentence = ''
with open(input_frame, 'r') as f:
for line in f:
s = line.strip().lower()
if len(s) > 0:
current_sentence += s + '\n'
if len(s) == 0 or s[-1] in sentence_end_markers:
current_sentence = current_sentence.strip()
if len(current_sentence) > 10:
sentences.append(current_sentence)
current_sentence = ''
def get_matrices(sentences, max_sentence_len):
X = np.zeros((len(sentences), max_sentence_len, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), max_sentence_len, len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
char_seq = (START_CHAR + sentence + END_CHAR).ljust(max_sentence_len + 1, PADDING_CHAR)
for t in range(max_sentence_len):
X[i, t, :] = char_vectors[char_seq[t]]
y[i, t, :] = char_vectors[char_seq[t + 1]]
return X, y
test_indicies = np.random.choice(range(len(sentences)), int(len(sentences) * 0.05))
sentences_train = [sentences[x] for x in set(range(len(sentences))) - set(test_indicies)]
sentences_test = [sentences[x] for x in test_indicies]
max_sentence_len = np.max([len(x) for x in sentences])
sentences_train = sorted(sentences_train, key=lambda x: len(x))
X_test, y_test = get_matrices(sentences_test, max_sentence_len)
batch_size = 16
print(sentences_train[1])
print(sentences_test[1])
print(X_test.shape)
def generate_batch():
while True:
for i in range(int(len(sentences_train) / batch_size)):
sentences_batch = sentences_train[i * batch_size:(i + 1) * batch_size]
yield get_matrices(sentences_batch, max_sentence_len)
class CharSampler(Callback):
def __init__(self, char_vectors, model):
self.char_vectors = char_vectors
self.model = model
def on_train_begin(self, logs={}):
self.epoch = 0
if os.path.isfile(output_fname):
os.remove(output_fname)
def sample(self, preds, temperature=1.0):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
def sample_one(self, T):
result = START_CHAR
while len(result) < 500:
Xsampled = np.zeros((1, len(result), num_chars)) # max_sentence_len
for t, c in enumerate(list(result)):
Xsampled[0, t, :] = self.char_vectors[c]
ysampled = self.model.predict(Xsampled, batch_size=1)[0, :]
yv = ysampled[len(result) - 1, :]
selected_char = indicies_to_chars[self.sample(yv, T)]
if selected_char == END_CHAR:
break
result = result + selected_char
return result
def on_epoch_end(self, epoch, logs=None):
self.epoch = self.epoch + 1
if self.epoch % 1 == 0:
print('\nEpoch: %d text sampling:' % self.epoch)
with open(output_fname, 'a') as outf:
outf.write('\n========= Epoch %d =========' % self.epoch)
for T in [.3, .5, .7, .9, 1.1]:
print('\tsampling, T= %.1f...' % T)
for _ in range(5):
self.model.reset_states()
res = self.sample_one(T)
outf.write('\nT=%.1f \n%s \n' % (T, res[1:]))
def on_batch_end(self, batch, logs={}):
if (batch + 1) % 10 == 0:
print('\nBatch %d text sampling: ' % batch)
with open(output_fname, 'a') as outf:
outf.write('\n========= Batch %d =========' % batch)
for T in [.3, .5, .7, .9, 1.1]:
print('\tsampling, T= %.1f...' % T)
for _ in range(5):
self.model.reset_states()
res = self.sample_one(T)
outf.write(res + '\n')
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.loss = []
self.acc = []
def on_batch_end(self, batch, logs={}):
self.loss.append(logs.get('loss'))
self.acc.append(logs.get('acc'))
if (batch + 1) % 100 == 0:
with open(batchout_fname, 'a') as outf:
for i in range(100):
outf.write('%d\t%.6f\t%.6f\n' %
(batch + i - 99, self.loss[i - 100], self.acc[i - 100]))
if USE_SIMPLE_MODEL:
# simple model
vec = Input(shape=(None, num_chars))
l1 = LSTM(128, activation='tanh', return_sequences=True)(vec)
l1_d = Dropout(0.2)(l1)
dense = TimeDistributed(Dense(num_chars))(l1_d)
output_res = Activation('softmax')(dense)
model = Model(input=vec, outputs=output_res)
else:
# deep model
vec = Input(shape=(None, num_chars))
l1 = LSTM(128, activation='tanh', return_sequences=True)(vec)
l1_d = Dropout(0.2)(l1)
input2 = concatenate([vec, l1_d])
l2 = LSTM(128, activation='tanh', return_sequences=True)(input2)
l2_d = Dropout(0.2)(l2)
input3 = concatenate([vec, l2_d])
l3 = LSTM(128, activation='tanh', return_sequences=True)(input3)
l3_d = Dropout(0.2)(l2)
input_d = concatenate([l1_d, l2_d, l3_d])
dense3 = TimeDistributed(Dense(num_chars))(input_d)
output_res = Activation('softmax')(dense3)
model = Model(input=vec, outputs=output_res)
model.compile(loss='categorical_crossentropy', optimizer=Adam(clipnorm=1.), metrics=['accuracy'])
cb_sampler = CharSampler(char_vectors, model)
cb_logger = CSVLogger(model_fname + '.log')
cb_checkpoint = ModelCheckpoint("model.hdf5", monitor='val_acc', save_best_only=True, save_weights_only=False)
model.fit_generator(generate_batch(),
int(len(sentences_train) / batch_size) * batch_size,
epochs=10,
verbose=True,
validation_data=(X_test, y_test),
callbacks=[cb_logger, cb_sampler, cb_checkpoint])
| 2.421875 | 2 |
team_formation/social_network.py | jeffbulmer/team-formation-script | 0 | 12761330 | <gh_stars>0
import sys
class SocialNet:
_socialNetwork = {}
_labels = []
def __init__(self, students, balance):
self._socialNetwork = self.createSocialNetwork(students);
self._labels = self.setLabels(students)
if(balance is True):
self._socialNetwork=self.balanceNetwork(self._socialNetwork)
def createSocialNetwork(self, students):
names = []
network = {}
for s in students:
names.append(s.getName())
for i in range(len(students)):
b = students[i].getBestFriend()
f = students[i].getFriends()
e = students[i].getEnemies()
curr_row = [];
for j in range(len(students)):
if j == i:
curr_row.append(0)
elif names[j] is b:
if(students[j].getBestFriend() is names[i]):
curr_row.append(1)
else:
curr_row.append(5)
elif names[j] in f:
curr_row.append(3)
elif names[j] in e:
curr_row.append(7)
else:
curr_row.append(5)
network[students[i].getName()] = curr_row
return network
def setLabels(self, students):
labels = []
for s in students:
labels.append(s.getName())
return labels
def balanceNetwork(self, socialNetwork):
names = self._labels;
for k in range(len(names)):
for i in range(len(socialNetwork[names[k]])):
if(socialNetwork[names[k]][i] != socialNetwork[names[i]][k]):
socialNetwork[names[k]][i] = max(socialNetwork[names[k]][i], socialNetwork[names[i]][k])
socialNetwork[names[i]][k] = max(socialNetwork[names[k]][i], socialNetwork[names[i]][k])
return socialNetwork
def check_distances(self, a):
distances = [];
curr = 0
for i in range(len(self._labels)):
if self._labels[i] == a:
distances.append(0)
curr = i
else:
distances.append(sys.maxsize)
visited = [];
while len(visited) < len(distances):
name = list(self._socialNetwork.keys())[curr]
row = self._socialNetwork[name]
for i in range(len(row)):
if(i in visited):
continue
elif(distances[curr] + row[i] < distances[i]):
distances[i] = distances[curr]+row[i];
visited.append(curr);
min_d = sys.maxsize
idx = curr
for i in range(len(distances)):
if(i in visited):
continue;
elif(min_d > distances[i]):
min_d = distances[i];
idx = i;
curr = idx;
return distances
def check_distance(self, a, b):
target = 0;
for i in range(len(self._labels)):
if self._labels[i] is b:
target = i;
distances = self.check_distances(a);
return distances[target];
def check_diameter(self):
diameter = 0
for i in self._labels:
distance = self.check_distances(i);
longest = max(distance)
if(longest > diameter):
diameter = longest;
return diameter;
| 3.171875 | 3 |
torch2trt/plugins/globals.py | huliang2016/torch2trt_dynamic | 0 | 12761331 | import os
import os.path as osp
dir_path = osp.join( os.path.expanduser('~'), "space/trt_plugin/build/lib/")
if not osp.exists(dir_path):
if "AMIRSTAN_LIBRARY_PATH" in os.environ:
dir_path = os.environ["AMIRSTAN_LIBRARY_PATH"]
else:
dir_path = os.path.dirname(os.path.realpath(__file__)) | 2.015625 | 2 |
wouso/games/grandchallenge/models.py | AlexandruGhergut/wouso | 117 | 12761332 | from django.db import models
from django.db.models import Q, Max
import logging
from wouso.core.config.models import IntegerSetting
from wouso.core.game.models import Game
from wouso.core.user.models import Player
from wouso.games.challenge.models import Challenge, ChallengeUser
class GrandChallengeUser(Player):
""" Extension of the user profile for GrandChallenge """
lost = models.IntegerField(default=0)
last_round = models.IntegerField(default=0)
def get_challenges(self):
"""
Return a queryset of grandchallenges for this player
"""
return Challenge.objects.filter(id__in=GrandChallenge.objects.filter(Q(challenge__user_from__user__id=self.id)|Q(challenge__user_to__user__id=self.id)).order_by('round').values('challenge'))
def get_active(self):
"""
Return a list of active GrandChallenges for this user
"""
return self.get_challenges().filter(status='A')
def get_played(self):
"""
Return a list of played GrandChallenges, ordered by round
"""
return self.get_challenges().filter(status__in=('D', 'P'))
def increase_lost(self):
self.lost += 1
self.save()
def set_last_round(self, round_number):
self.last_round = round_number
self.save()
class GrandChallenge(models.Model):
challenge = models.ForeignKey(Challenge, blank=True, null=True)
round = models.IntegerField(blank=True, null=True)
ALL = []
OUT_PLAY = []
CHALLENGES= []
def __oldinit__(self, user_from, user_to):
# TODO: change this constructor to a classmethod
if not GrandChallengeGame.is_final() and not GrandChallengeGame.is_winner():
self.branch = max(user_from.lost, user_to.lost)
else:
self.branch = min(user_from.lost, user_to.lost)
self.user_from = user_from
self.user_to = user_to
self.__class__.ALL.append(self)
self.won, self.lost = None, None
self.active = True
self.round_number = None
challenge_user_to = user_to.user.get_profile().get_extension(ChallengeUser)
challenge_user_from = user_from.user.get_profile().get_extension(ChallengeUser)
chall = Challenge.create(challenge_user_from, challenge_user_to)
chall.accept()
self.challenge_id = chall.id
self.__class__.CHALLENGES.append(chall.id)
@classmethod
def create(cls, user_from, user_to, round):
""" Create a new Challenge and automatically accept it.
"""
grand_challenge = cls.objects.create(round=round)
user_from = user_from.user.get_profile()
user_to = user_to.user.get_profile()
grand_challenge.challenge = Challenge.create(user_from.get_extension(ChallengeUser), user_to.get_extension(ChallengeUser))
grand_challenge.challenge.accept()
grand_challenge.save()
return grand_challenge
@classmethod
def get_challenges(cls):
return cls.ALL
@classmethod
def active(cls):
return filter(lambda c: c.active, cls.ALL)
@classmethod
def all_done(cls):
for i in cls.CHALLENGES:
x = Challenge.objects.get(id = i)
if x.status != "P":
return False
return True
def play(self, round_number):
winner = Challenge.objects.get(id= self.challenge_id).winner #trebuie generat de joc
if winner.user == self.user_from.user:
self.won = self.user_from
self.lost = self.user_to
self.user_to.lost += 1
else:
self.won = self.user_to
self.lost = self.user_from
self.user_from.lost += 1
self.active = False
self.round_number = round_number
@classmethod
def played_with(cls, user):
ret = []
for c in [c for c in cls.ALL if not c.active]:
if c.user_from == user:
ret.append(c.user_to)
elif c.user_to == user:
ret.append(c.user_from)
return ret
@classmethod
def joaca(cls, round_number):
for c in GrandChallenge.active():
#numarul rundei...
c.play(round_number)
if(c.lost.lost == 2):
cls.OUT_PLAY.append(c.lost)
#print c.lost
@classmethod
def clasament(cls):
arb_win = GrandChallengeGame.eligible(0)
arb_lose = GrandChallengeGame.eligible(1)
if(len(arb_win) == 1):
cls.OUT_PLAY.append(arb_win[0])
if(len(arb_lose) == 1):
cls.OUT_PLAY.append(arb_lose[0])
results = cls.OUT_PLAY
results.reverse()
return results
class Round(object):
def __init__(self, round_number):
self.round_number = int(round_number)
def challenges(self):
"""
Return a list of challenges in this round, ordered by status
"""
return [gc.challenge for gc in GrandChallenge.objects.filter(round=self.round_number).order_by('challenge__status')]
def info(self):
"""
Return a dictionary with information about this round
"""
return {}
def participants(self):
ps = set([c.user_from.user for c in self.challenges()] + [c.user_to.user for c in self.challenges()])
ps = map(lambda a: a.get_extension(GrandChallengeUser), ps)
return ps
def rounds(self):
"""
Return a list of previous rounds, as an iterator
"""
if self.round_number > 0:
for i in range(self.round_number):
yield Round(i + 1)
def __repr__(self):
return '<' + 'Round ' + unicode(self.round_number) + '>'
class GrandChallengeGame(Game):
ALL = []
round_number = 0
def __init__(self, *args, **kwargs):
# Set parent's fields
self._meta.get_field('verbose_name').default = "GrandChallenges"
self._meta.get_field('short_name').default = ""
# the url field takes as value only a named url from module's urls.py
self._meta.get_field('url').default = "grandchallenge_index_view"
super(GrandChallengeGame, self).__init__(*args, **kwargs)
@classmethod
def base_query(cls):
return GrandChallengeUser.objects.exclude(user__is_superuser=True).exclude(race__can_play=False)
@classmethod
def is_started(cls):
setting_round = IntegerSetting.get('gc_round')
return setting_round.get_value() > 0
@classmethod
def reset(cls):
"""
Reset a GC game, set every user lost to 0
"""
GrandChallenge.objects.all().delete()
GrandChallengeUser.objects.update(lost=0, last_round=0)
cls.set_current_round(0)
@classmethod
def create_users(cls):
"""
Create GrandChallengeUser extensions for all eligibile players.
"""
for p in Player.objects.exclude(race__can_play=False):
p.get_extension(GrandChallengeUser)
@classmethod
def start(cls):
"""
Create challenges for each consecutive players. Return a list of created challenges.
"""
cls.create_users()
challenges = []
round = 1
last = None
for user in cls.base_query():
u = user.user.get_profile()
if last is None:
last = u
else:
c = GrandChallenge.create(u, last, round)
challenges.append(c)
last = None
setting_round = IntegerSetting.get('gc_round')
setting_round.set_value(round)
return challenges
@classmethod
def eligible(cls, lost_count):
""" Return a queryset with players of lost_count
"""
return cls.base_query().filter(lost=lost_count)
@classmethod
def is_final(cls):
arb_win = cls.eligible(0)
arb_lose = cls.eligible(1)
if (len(arb_win) == 1) and (len(arb_lose) == 1):
return True
return False
@classmethod
def final_round(cls):
arb_win = cls.eligible(0)
arb_lose = cls.eligible(1)
GrandChallenge(arb_win[0], arb_lose[0])
@classmethod
def final_second_round(cls):
GrandChallengeGame.play_round(1)
@classmethod
def is_winner(cls):
arb_win = cls.eligible(0)
arb_lose = cls.eligible(1)
if (len(arb_win) == 0) and (len(arb_lose) == 2):
return False
return True
@classmethod
def is_finished(cls):
arb_win = cls.eligible(0)
arb_lose = cls.eligible(1)
if len(arb_win) == 0 or (len(arb_win) == 1 and len(arb_lose) != 1):
return True
return False
@classmethod
def play_round(cls, lost_count, round_number):
"""
Create new challenges.
"""
if lost_count == 0:
all = GrandChallengeGame.eligible(0)
elif lost_count == 1:
all = GrandChallengeGame.eligible(1)
all = list(all)
challenges = []
while len(all):
u = all[0]
played_with = GrandChallenge.played_with(u)
adversari = [eu for eu in all if ((eu.lost == u.lost) and (eu != u) and ((eu not in played_with) or (eu == all[-1])) )]
if not len(adversari):
break
try:
adversar = adversari[0]
all.remove(adversar)
all.remove(u)
c = GrandChallenge.create(u, adversar, round_number)
challenges.append(c)
except Exception as e:
logging.exception(e)
return challenges
@classmethod
def set_current_round(cls, number):
setting_round = IntegerSetting.get('gc_round')
setting_round.set_value(number)
@classmethod
def get_current_round(cls):
setting_round = IntegerSetting.get('gc_round')
round = setting_round.get_value()
if round == 0:
return None
return cls.get_round(round)
@classmethod
def get_round(cls, round):
return Round(round_number=round)
@classmethod
def get_winner(cls):
"""
Return gc winner
"""
if cls.is_finished():
final_gc = GrandChallenge.objects.filter(round=cls.get_current_round().round_number)[0]
return final_gc.challenge.winner.user.get_profile()
return None
@classmethod
def force_round_close(cls, round):
"""
Finish every challenge in the round
"""
for c in round.challenges():
if c.is_runnable():
c.set_expired()
if c.is_draw():
# Temporary hack FIXME
if c.user_from.seconds_took < c.user_to.seconds_took:
c.set_won_by_player(c.user_from.user)
else:
c.set_won_by_player(c.user_to.user)
gc_user_from = c.user_from.user.get_extension(GrandChallengeUser)
gc_user_to = c.user_to.user.get_extension(GrandChallengeUser)
# Upgrade lost count
if c.user_from.user == c.winner:
if gc_user_to.last_round < round.round_number:
gc_user_to.increase_lost()
elif c.user_to.user == c.winner:
if gc_user_from.last_round < round.round_number:
gc_user_from.increase_lost()
gc_user_from.set_last_round(round.round_number)
gc_user_to.set_last_round(round.round_number)
@classmethod
def round_next(cls):
"""
Progress to next round
"""
if cls.is_finished():
logging.error('Grand challenge finished.')
return None
round = cls.get_current_round()
cls.force_round_close(round)
challenges = []
if cls.is_final():
# Only two players left in the game
arb_win = cls.eligible(0)
arb_lose = cls.eligible(1)
challenges.append(GrandChallenge.create(arb_win[0], arb_lose[0], round.round_number + 1))
else:
# More than two players, create new challenges
if round.round_number % 2 == 1:
challenges += cls.play_round(1, round.round_number + 1)
challenges += cls.play_round(0, round.round_number + 1)
else:
challenges += cls.play_round(1, round.round_number + 1)
if challenges:
# Update round number
round.round_number += 1
cls.set_current_round(round.round_number)
logging.debug('Played round %s' % round.round_number)
return round | 2.1875 | 2 |
googlenetbn.py | takaaki82/GoogleNetBN_Chainer | 0 | 12761333 | <gh_stars>0
import chainer
import chainer.links as L
import chainer.functions as F
import numpy as np
from chainer.initializers import constant, uniform
class GoogleNetBN(chainer.Chain):
"""
GoogleNet of BatchNormalization version
"""
def __init__(self, n_class=None, pretrained_model=None, mean=None, initialW=None, initialBias=None):
self.n_class = n_class
self.mean = mean
self.initialbias = initialBias
self.insize = 224
if n_class is None:
self.n_class = 1000
if mean is None:
# imagenet means
self.mean = np.array([123.68, 116.779, 103.939], dtype=np.float32)[:, np.newaxis, np.newaxis]
if initialW is None:
# employ default initializers used in BVLC. For more detail, see
self.initialW = uniform.LeCunUniform(scale=1.0)
if pretrained_model is None:
# As a sampling process is time-consuming
# we employ a zero initializer for faster computation
self.initialW = constant.Zero()
super(GoogleNetBN, self).__init__()
with self.init_scope():
# Deep layers: GoogleNet of BatchNormalization version
self.conv1 = L.Convolution2D(None, 64, 7, stride=2, pad=3, nobias=True)
self.norm1 = L.BatchNormalization(64)
self.conv2 = L.Convolution2D(None, 192, 3, stride=1, pad=1, nobias=True)
self.norm2 = L.BatchNormalization(192)
self.inc3a = L.InceptionBN(None, 64, 64, 64, 64, 96, "avg", 32)
self.inc3b = L.InceptionBN(None, 64, 64, 96, 64, 96, "avg", 64)
self.inc3c = L.InceptionBN(None, 0, 128, 160, 64, 96, "max", stride=2)
self.inc4a = L.InceptionBN(None, 224, 64, 96, 96, 128, "avg", 128)
self.inc4b = L.InceptionBN(None, 192, 96, 128, 96, 128, "avg", 128)
self.inc4c = L.InceptionBN(None, 160, 128, 160, 128, 160, "avg", 128)
self.inc4d = L.InceptionBN(None, 96, 128, 192, 160, 192, "avg", 128)
self.inc4e = L.InceptionBN(None, 0, 128, 192, 192, 256, "max", stride=2)
self.inc5a = L.InceptionBN(None, 352, 192, 320, 160, 224, "avg", 128)
self.inc5b = L.InceptionBN(None, 352, 192, 320, 192, 224, "max", 128)
self.loss3_fc = L.Linear(None, self.n_class, initialW=self.initialW)
self.loss1_conv = L.Convolution2D(None, 128, 1, initialW=self.initialW, nobias=True)
self.norma = L.BatchNormalization(128)
self.loss1_fc1 = L.Linear(None, 1024, initialW=self.initialW, nobias=True)
self.norma2 = L.BatchNormalization(1024)
self.loss1_fc2 = L.Linear(None, self.n_class, initialW=self.initialW)
self.loss2_conv = L.Convolution2D(None, 128, 1, initialW=self.initialW, nobias=True)
self.normb = L.BatchNormalization(128)
self.loss2_fc1 = L.Linear(None, 1024, initialW=self.initialW, nobias=True)
self.normb2 = L.BatchNormalization(1024)
self.loss2_fc2 = L.Linear(None, self.n_class, initialW=self.initialW)
def __call__(self, x, t):
h = F.max_pooling_2d(F.relu(self.norm1(self.conv1(x))), 3, stride=2, pad=1)
h = F.max_pooling_2d(F.relu(self.norm2(self.conv2(h))), 3, stride=2, pad=1)
h = self.inc3a(h)
h = self.inc3b(h)
h = self.inc3c(h)
h = self.inc4a(h)
a = F.average_pooling_2d(h, 5, stride=3)
a = F.relu(self.norma(self.loss1_conv(a)))
a = F.relu(self.norma2(self.loss1_fc1(a)))
a = self.loss1_fc2(a)
loss1 = F.softmax_cross_entropy(a, t)
h = self.inc4b(h)
h = self.inc4c(h)
h = self.inc4d(h)
b = F.average_pooling_2d(h, 5, stride=3)
b = F.relu(self.normb(self.loss2_conv(b)))
b = F.relu(self.normb2(self.loss2_fc1(b)))
b = self.loss2_fc2(b)
loss2 = F.softmax_cross_entropy(b, t)
h = self.inc4e(h)
h = self.inc5a(h)
h = F.average_pooling_2d(self.inc5b(h), 7)
h = self.loss3_fc(h)
loss3 = F.softmax_cross_entropy(h, t)
loss = 0.3 * (loss1 + loss2) + loss3
accuracy = F.accuracy(h, t)
chainer.report({
"loss": loss,
"loss1": loss1,
"loss2": loss2,
"loss3": loss3,
"accuracy": accuracy,
}, self)
return loss
def predict(self, x):
h = F.max_pooling_2d(F.relu(self.norm1(self.conv1(x))), 3, stride=2, pad=1)
h = F.max_pooling_2d(F.relu(self.norm2(self.conv2(h))), 3, stride=2, pad=1)
h = self.inc3a(h)
h = self.inc3b(h)
h = self.inc3c(h)
h = self.inc4a(h)
h = self.inc4b(h)
h = self.inc4c(h)
h = self.inc4d(h)
h = self.inc4e(h)
h = self.inc5a(h)
h = F.average_pooling_2d(self.inc5b(h), 7)
h = self.loss3_fc(h)
return F.softmax(h)
| 2.609375 | 3 |
Q41_2_findContinuousSequence.py | FreesiaLikesPomelo/-offer | 0 | 12761334 | <filename>Q41_2_findContinuousSequence.py
'''
面试题57 - II. 和为s的连续正数序列
输入一个正整数 target ,输出所有和为 target 的连续正整数序列(至少含有两个数)。
序列内的数字由小到大排列,不同序列按照首个数字从小到大排列。
示例 1:
输入:target = 9
输出:[[2,3,4],[4,5]]
示例 2:
输入:target = 15
输出:[[1,2,3,4,5],[4,5,6],[7,8]]
限制:
1 <= target <= 10^5
'''
# test cases:
# 1. target<3: return []
# 2. no qualified result: input 4, 8...
# 3. normal test: might contain more than one qualified list
# 执行用时 :204 ms, 在所有 Python3 提交中击败了52.83%的用户
# 内存消耗 :13.7 MB, 在所有 Python3 提交中击败了100.00%的用户
class Solution:
def findContinuousSequence(self, target: int) -> List[List[int]]:
if target<3:
return []
elif target==3:
return [[1,2]]
else:
small = 1
big = 2
result = []
Sn = small+big
limit = int((target+1)/2)
while small<limit and big>small: # 6:3, 9:5
if Sn==target:
temp = list(range(small,big+1))
result.append(temp)
Sn-=small
small+=1
elif Sn<target:
big+=1
Sn+=big
else: #Sn>target
Sn-=small
small+=1
return result
'''
# 执行用时 :284 ms, 在所有 Python3 提交中击败了40.24%的用户
# 内存消耗 :13.6 MB, 在所有 Python3 提交中击败了100.00%的用户
class Solution:
def findContinuousSequence(self, target: int) -> List[List[int]]:
if target<3:
return []
elif target==3:
return [[1,2]]
else:
small = 1
big = 2
result = []
while small<int((target+1)/2) and big>small: # 6:3, 9:5
Sn = (small+big)*(big-small+1)/2
if Sn==target:
temp = list(range(small,big+1))
result.append(temp)
small+=1
elif Sn<target:
big+=1
else: #Sn>target
small+=1
return result
'''
| 3.421875 | 3 |
src/restaff/helpers/clean_uper.py | ko10ok/scorator | 0 | 12761335 | <filename>src/restaff/helpers/clean_uper.py<gh_stars>0
import os
def cleanup_temp_files(temporary_files):
for file in temporary_files:
logger.debug(f'removing temporary {file}')
os.remove(file)
| 2.171875 | 2 |
intersight_universal_api_calls.py | ugo-emekauwa/intersight-universal-api-calls | 1 | 12761336 | """
Cisco Intersight Universal API Calls Module, v1.1
Author: <NAME>
Contact: <EMAIL>, <EMAIL>
Summary: The Cisco Intersight Universal API Calls module provides
a set of functions that simplify creation, retrieval,
modification, and deletion of resources on Cisco Intersight.
"""
# Import needed Python modules
import sys
import json
import requests
import os
import intersight
from intersight.intersight_api_client import IntersightApiClient
# MODULE REQUIREMENT 1
"""
For the following variable below named key_id, please fill in between
the quotes your Intersight API Key ID.
Here is an example: key_id = "<KEY>"
"""
key_id = ""
# MODULE REQUIREMENT 2
"""
For the following variable below named key, please fill in between
the quotes your system's file path to your Intersight API key "SecretKey.txt" file.
Here is an example: key = "C:\Keys\Key1\SecretKey.txt"
"""
key = ""
# Define Intersight SDK IntersightApiClient variables
# Tested on Cisco Intersight API Reference v1.0.9-853
base_url = "https://intersight.com/api/v1"
api_instance = IntersightApiClient(host=base_url,private_key=key,api_key_id=key_id)
# Establish Intersight Universal Functions
def iu_get(api_path):
"""This is a function to perform a universal or generic GET on objects under available Intersight API types,
including those not yet defined in the Intersight SDK for Python. An argument for the API type path is required.
Args:
api_path: The path to the targeted Intersight API type. For example, to specify the Intersight API type for
adapter configuration policies, enter "adapter/ConfigPolicies". More API types can be found in the Intersight
API reference library at https://intersight.com/apidocs/introduction/overview/.
Returns:
A dictionary containing all objects of the specified API type. If the API type is inaccessible, an
implicit value of None will be returned.
"""
full_resource_path = "/" + api_path
try:
api_instance.call_api(full_resource_path,"GET")
response = api_instance.last_response.data
results = json.loads(response)
print("The API resource path '" + api_path + "' has been accessed successfully.\n")
return results
except:
print("Unable to access the API resource path '" + api_path + "'.\n")
def iu_get_moid(api_path,moid):
"""This is a function to perform a universal or generic GET on a specified object under available
Intersight API types, including those not yet defined in the Intersight SDK for Python. An argument for the
API type path and MOID (managed object identifier) is required.
Args:
api_path: The path to the targeted Intersight API type. For example, to specify the Intersight API type for
adapter configuration policies, enter "adapter/ConfigPolicies". More API types can be found in the Intersight
API reference library at https://intersight.com/apidocs/introduction/overview/.
moid: The managed object ID of the targeted API object.
Returns:
A dictionary containing all parameters of the specified API object. If the API object is inaccessible, an
implicit value of None will be returned.
"""
full_resource_path = "/" + api_path + "/" + moid
try:
api_instance.call_api(full_resource_path,"GET")
response = api_instance.last_response.data
results = json.loads(response)
print("The object located at the resource path '" + full_resource_path + "' has been accessed succesfully.\n")
return results
except:
print("Unable to access the object located at the resource path '" + full_resource_path + "'.\n")
def iu_delete_moid(api_path,moid):
"""This is a function to perform a universal or generic DELETE on a specified object under available
Intersight API types, including those not yet defined in the Intersight SDK for Python. An argument for the
API type path and MOID (managed object identifier) is required.
Args:
api_path: The path to the targeted Intersight API type. For example, to specify the Intersight API type for
adapter configuration policies, enter "adapter/ConfigPolicies". More API types can be found in the Intersight
API reference library at https://intersight.com/apidocs/introduction/overview/.
moid: The managed object ID of the targeted API object.
Returns:
A statement indicating whether the DELETE method was successful or failed.
Raises:
Exception: An exception occured while performing the API call. The exact error will be
specified.
"""
full_resource_path = "/" + api_path + "/" + moid
try:
api_instance.call_api(full_resource_path,"DELETE")
print("The deletion of the object located at the resource path '" + full_resource_path + "' has been completed.\n")
return "The DELETE method was successful."
except Exception as exception_message:
print("Unable to access the object located at the resource path '" + full_resource_path + "'.\n")
print(exception_message)
return "The DELETE method failed."
def iu_post(api_path,body):
"""This is a function to perform a universal or generic POST of an object under available Intersight
API types, including those not yet defined in the Intersight SDK for Python. An argument for the
API type path and body configuration data is required.
Args:
api_path: The path to the targeted Intersight API type. For example, to specify the Intersight API type for
adapter configuration policies, enter "adapter/ConfigPolicies". More API types can be found in the Intersight
API reference library at https://intersight.com/apidocs/introduction/overview/.
body: The content to be created under the targeted API type. This should be provided in a dictionary format.
Returns:
A statement indicating whether the POST method was successful or failed.
Raises:
Exception: An exception occured while performing the API call. The exact error will be
specified.
"""
full_resource_path = "/" + api_path
try:
api_instance.call_api(full_resource_path,"POST",body=body)
print("The creation of the object under the resource path '" + full_resource_path + "' has been completed.\n")
return "The POST method was successful."
except Exception as exception_message:
print("Unable to create the object under the resource path '" + full_resource_path + "'.\n")
print(exception_message)
return "The POST method failed."
def iu_post_moid(api_path,moid,body):
"""This is a function to perform a universal or generic POST of a specified object under available Intersight
API types, including those not yet defined in the Intersight SDK for Python. An argument for the
API type path, MOID (managed object identifier), and body configuration data is required.
Args:
api_path: The path to the targeted Intersight API type. For example, to specify the Intersight API type for
adapter configuration policies, enter "adapter/ConfigPolicies". More API types can be found in the Intersight
API reference library at https://intersight.com/apidocs/introduction/overview/.
moid: The managed object ID of the targeted API object.
body: The content to be modified on the targeted API object. This should be provided in a dictionary format.
Returns:
A statement indicating whether the POST method was successful or failed.
Raises:
Exception: An exception occured while performing the API call. The exact error will be
specified.
"""
full_resource_path = "/" + api_path + "/" + moid
try:
api_instance.call_api(full_resource_path,"POST",body=body)
print("The update of the object located at the resource path '" + full_resource_path + "' has been completed.\n")
return "The POST method was successful."
except Exception as exception_message:
print("Unable to access the object located at the resource path '" + full_resource_path + "'.\n")
print(exception_message)
return "The POST method failed."
def iu_patch_moid(api_path,moid,body):
"""This is a function to perform a universal or generic PATCH of a specified object under available Intersight
API types, including those not yet defined in the Intersight SDK for Python. An argument for the
API type path, MOID (managed object identifier), and body configuration data is required.
Args:
api_path: The path to the targeted Intersight API type. For example, to specify the Intersight API type for
adapter configuration policies, enter "adapter/ConfigPolicies". More API types can be found in the Intersight
API reference library at https://intersight.com/apidocs/introduction/overview/.
moid: The managed object ID of the targeted API object.
body: The content to be modified on the targeted API object. This should be provided in a dictionary format.
Returns:
A statement indicating whether the PATCH method was successful or failed.
Raises:
Exception: An exception occured while performing the API call. The exact error will be
specified.
"""
full_resource_path = "/" + api_path + "/" + moid
try:
api_instance.call_api(full_resource_path,"PATCH",body=body)
print("The update of the object located at the resource path '" + full_resource_path + "' has been completed.\n")
return "The PATCH method was successful."
except Exception as exception_message:
print("Unable to access the object located at the resource path '" + full_resource_path + "'.\n")
print(exception_message)
return "The PATCH method failed."
# Verify API key variables have been set
key_id_setting = key_id.strip()
if key_id_setting is None or len(key_id_setting) is 0 or "/" not in key_id_setting:
print("\nThe key_id variable for the intersight_universal_api_calls module has not been set correctly!")
print("Please edit the intersight_universal_api_calls.py file and set the key_id variable \nwith the ID of your API key in order for the module to work properly.")
key_setting = key.strip()
if key_setting is None or len(key_setting) is 0 or not os.path.isfile(key_setting):
print("\nThe key variable for the intersight_universal_api_calls module has not been set correctly!")
print("Please edit the intersight_universal_api_calls.py file and set the key variable \nwith your system's path to your API key SecretKey.txt file in order for the module to work properly.")
| 2.53125 | 3 |
auto_tag/exception.py | mateimicu/auto-tag | 6 | 12761337 | <reponame>mateimicu/auto-tag<filename>auto_tag/exception.py
#!/usr/bin/env python3
"""
Exception used in the AutoTag project
"""
class BaseAutoTagException(Exception):
"""Base exception for the AutoTag project."""
class DetectorValidationException(BaseAutoTagException):
"""Validation failed on a detector"""
class DetectorNotFound(BaseAutoTagException):
"""Validation failed on a detector"""
class ConfigurationError(BaseAutoTagException):
"""Validation failed on a detector"""
class CantFindBranch(BaseAutoTagException):
"""Can't find a specific branch"""
class UnknowkSearchStrategy(BaseAutoTagException):
"""Invalid search strategy."""
| 2.234375 | 2 |
tinkoff_voicekit_client/speech_utils/user_utils.py | nikolai-semenov/voicekit_client_python | 19 | 12761338 |
def get_x_request_id(metadata):
"""
Returning x-request-id from response metadata
:param metadata: response metadata from one of VoiceKit methods
return: None if x-request-id don't contain in metadata
"""
x_request_id = tuple(filter(lambda x: x[0] == 'x-request-id', metadata))
if x_request_id:
x_request_id = x_request_id[0][1]
return x_request_id
return None
| 2.609375 | 3 |
pyglm/inference/gibbs.py | slinderman/theano_pyglm | 37 | 12761339 | """ Fit a Network GLM with MAP estimation. For some models, the log posterior
is concave and has a unique maximum.
"""
import copy
from scipy.misc import logsumexp
from scipy.integrate import cumtrapz
from pyglm.utils.theano_func_wrapper import seval, _flatten
from pyglm.utils.packvec import *
from pyglm.utils.grads import *
from hips.inference.ars import adaptive_rejection_sample
from hips.inference.hmc import hmc
from pyglm.inference.log_sum_exp import log_sum_exp_sample
from pyglm.inference.coord_descent import coord_descent
class MetropolisHastingsUpdate(object):
"""
Base class for MH updates. Each update targets a specific model component
and requires certain configuration. For example, an update for the standard GLM
might require differentiable parameters. Typical updates include:
- Gibbs updates (sample from conditional distribution)
- Hamiltonian Monte Carlo (uses gradient info to sample unconstrained cont. vars)
- Slice sampling (good for correlaed multivariate Gaussians)
"""
def __init__(self):
self._target_components = []
@property
def target_components(self):
# Return a list of components that this update applies to
return self._target_components
@property
def target_variables(self):
# Return a list of variables that this update applies to
return []
def preprocess(self, population):
""" Do any req'd preprocessing
"""
pass
def update(self, x_curr):
""" Take a MH step
"""
return x_curr
class ParallelMetropolisHastingsUpdate(MetropolisHastingsUpdate):
""" Extending this class indicates that the updates can be
performed in parallel over n, the index of the neuron.
"""
def update(self, x_curr, n):
""" Take a MH step for the n-th neuron. This can be performed in parallel
over other n' \in [N]
"""
pass
# class HmcGlmUpdate(ParallelMetropolisHastingsUpdate):
# """
# Update the continuous and unconstrained GLM parameters using Hamiltonian
# Monte Carlo. Stochastically follow the gradient of the parameters using
# Hamiltonian dynamics.
# """
# def __init__(self):
# super(HmcGlmUpdate, self).__init__()
#
# self.avg_accept_rate = 0.9
# self.step_sz = 0.05
#
# def preprocess(self, population):
# """ Initialize functions that compute the gradient and Hessian of
# the log probability with respect to the differentiable GLM
# parameters, e.g. the weight matrix if it exists.
# """
# self.population = population
# self.glm = population.glm
# self.syms = population.get_variables()
# self.glm_syms = differentiable(self.syms['glm'])
#
# # Compute gradients of the log prob wrt the GLM parameters
# self.glm_logp = self.glm.log_p
# self.g_glm_logp_wrt_glm, _ = grad_wrt_list(self.glm_logp,
# _flatten(self.glm_syms))
#
# # Get the shape of the parameters from a sample of variables
# self.glm_shapes = get_shapes(self.population.extract_vars(self.population.sample(),0)['glm'],
# self.glm_syms)
#
# def _glm_logp(self, x_vec, x_all):
# """
# Compute the log probability (or gradients and Hessians thereof)
# of the given GLM variables. We also need the rest of the population variables,
# i.e. those that are not being sampled currently, in order to evaluate the log
# probability.
# """
# # Extract the glm parameters
# x_glm = unpackdict(x_vec, self.glm_shapes)
# set_vars(self.glm_syms, x_all['glm'], x_glm)
# lp = seval(self.glm_logp,
# self.syms,
# x_all)
# return lp
#
# def _grad_glm_logp(self, x_vec, x_all):
# """
# Compute the negative log probability (or gradients and Hessians thereof)
# of the given GLM variables. We also need the rest of the population variables,
# i.e. those that are not being sampled currently, in order to evaluate the log
# probability.
# """
# # Extract the glm parameters
# x_glm = unpackdict(x_vec, self.glm_shapes)
# set_vars(self.glm_syms, x_all['glm'], x_glm)
# glp = seval(self.g_glm_logp_wrt_glm,
# self.syms,
# x_all)
# return glp
#
# def update(self, x, n):
# """ Gibbs sample the GLM parameters. These are mostly differentiable
# so we use HMC wherever possible.
# """
#
# xn = self.population.extract_vars(x, n)
#
# # Get the differentiable variables suitable for HMC
# dxn = get_vars(self.glm_syms, xn['glm'])
# x_glm_0, shapes = packdict(dxn)
#
# # Create lambda functions to compute the nll and its gradient
# nll = lambda x_glm_vec: -1.0*self._glm_logp(x_glm_vec, xn)
# grad_nll = lambda x_glm_vec: -1.0*self._grad_glm_logp(x_glm_vec, xn)
#
# # HMC with automatic parameter tuning
# n_steps = 2
# x_glm, new_step_sz, new_accept_rate = hmc(nll,
# grad_nll,
# self.step_sz,
# n_steps,
# x_glm_0,
# adaptive_step_sz=True,
# avg_accept_rate=self.avg_accept_rate)
#
# # Update step size and accept rate
# self.step_sz = new_step_sz
# self.avg_accept_rate = new_accept_rate
# # print "GLM step sz: %.3f\tGLM_accept rate: %.3f" % (new_step_sz, new_accept_rate)
#
#
# # Unpack the optimized parameters back into the state dict
# x_glm_n = unpackdict(x_glm, shapes)
# set_vars(self.glm_syms, xn['glm'], x_glm_n)
#
#
# x['glms'][n] = xn['glm']
# return x
class HmcBiasUpdate(ParallelMetropolisHastingsUpdate):
"""
Update the continuous and unconstrained bias parameters using Hamiltonian
Monte Carlo. Stochastically follow the gradient of the parameters using
Hamiltonian dynamics.
"""
def __init__(self):
super(HmcBiasUpdate, self).__init__()
self.n_steps = 10
self.avg_accept_rate = 0.9
self.step_sz = 0.1
def preprocess(self, population):
""" Initialize functions that compute the gradient and Hessian of
the log probability with respect to the differentiable GLM
parameters, e.g. the weight matrix if it exists.
"""
self.population = population
self.glm = population.glm
self.bias_model = self.glm.bias_model
self.syms = population.get_variables()
self.bias_syms = differentiable(self.syms['glm']['bias'])
# Compute gradients of the log prob wrt the GLM parameters
self.glm_logp = self.glm.log_p
# self.g_glm_logp_wrt_bias, _ = grad_wrt_list(self.glm_logp,
# _flatten(self.bias_syms))
self.g_glm_ll_wrt_bias, _ = grad_wrt_list(self.glm.ll,
_flatten(self.bias_syms))
self.g_bias_logp_wrt_bias, _ = grad_wrt_list(self.bias_model.log_p,
_flatten(self.bias_syms))
# Get the shape of the parameters from a sample of variables
self.glm_shapes = get_shapes(self.population.extract_vars(self.population.sample(),0)['glm']['bias'],
self.bias_syms)
def _precompute_vars(self, x, n):
""" Precompute currents for sampling A and W
"""
nvars = self.population.extract_vars(x, n)
I_stim = seval(self.glm.bkgd_model.I_stim,
self.syms,
nvars)
I_net = seval(self.glm.I_net,
self.syms,
nvars)
return I_stim, I_net
def _glm_logp(self, x_vec, x_all, I_stim, I_net):
"""
Compute the log probability (or gradients and Hessians thereof)
of the given GLM variables. We also need the rest of the population variables,
i.e. those that are not being sampled currently, in order to evaluate the log
probability.
"""
# Extract the glm parameters
x_bias = unpackdict(x_vec, self.glm_shapes)
set_vars(self.bias_syms, x_all['glm']['bias'], x_bias)
lp = seval(self.bias_model.log_p,
self.syms,
x_all)
# Compute the log likelihood for each data sequence
for data in self.population.data_sequences:
self.population.set_data(data)
lp += seval(self.glm.ll,
{'I_stim' : self.glm.bkgd_model.I_stim,
'I_net' : self.glm.I_net,
'bias' : self.bias_model.bias,
'n' : self.glm.n
},
{'I_stim' : I_stim,
'I_net' : I_net,
'bias' : x_vec,
'n' : x_all['glm']['n']
}
)
return lp
def _grad_glm_logp(self, x_vec, x_all, I_stim, I_net):
"""
Compute the negative log probability (or gradients and Hessians thereof)
of the given GLM variables. We also need the rest of the population variables,
i.e. those that are not being sampled currently, in order to evaluate the log
probability.
"""
# Extract the glm parameters
x_bias = unpackdict(x_vec, self.glm_shapes)
set_vars(self.bias_syms, x_all['glm']['bias'], x_bias)
# glp = seval(self.g_glm_logp_wrt_bias,
# self.syms,
# x_all)
#
glp = seval(self.g_bias_logp_wrt_bias,
self.syms,
x_all)
# Compute the log likelihood for each data sequence
for data in self.population.data_sequences:
self.population.set_data(data)
glp += seval(self.g_glm_ll_wrt_bias,
{'I_stim' : self.glm.bkgd_model.I_stim,
'I_net' : self.glm.I_net,
'bias' : self.bias_model.bias,
'n' : self.glm.n
},
{'I_stim' : I_stim,
'I_net' : I_net,
'bias' : x_vec,
'n' : x_all['glm']['n']
}
)
return glp
def update(self, x, n):
""" Gibbs sample the GLM parameters. These are mostly differentiable
so we use HMC wherever possible.
"""
xn = self.population.extract_vars(x, n)
# # Get the differentiable variables suitable for HMC
# dxn = get_vars(self.bias_syms, xn['glm']['bias'])
# x_glm_0, shapes = packdict(dxn)
I_stim, I_net = self._precompute_vars(x, n)
x_bias_0 = xn['glm']['bias']['bias']
# Create lambda functions to compute the nll and its gradient
nll = lambda x_glm_vec: -1.0 * self._glm_logp(x_glm_vec, xn, I_stim, I_net)
grad_nll = lambda x_glm_vec: -1.0 * self._grad_glm_logp(x_glm_vec, xn, I_stim, I_net)
# HMC with automatic parameter tuning
x_bias, new_step_sz, new_accept_rate = hmc(nll,
grad_nll,
self.step_sz,
self.n_steps,
x_bias_0,
adaptive_step_sz=True,
avg_accept_rate=self.avg_accept_rate)
# Update step size and accept rate
self.step_sz = new_step_sz
self.avg_accept_rate = new_accept_rate
# print "GLM step sz: %.3f\tGLM_accept rate: %.3f" % (new_step_sz, new_accept_rate)
xn['glm']['bias']['bias'] = x_bias
x['glms'][n] = xn['glm']
return x
class HmcBkgdUpdate(ParallelMetropolisHastingsUpdate):
"""
Update the continuous and unconstrained bkgd parameters using Hamiltonian
Monte Carlo. Stochastically follow the gradient of the parameters using
Hamiltonian dynamics.
"""
def __init__(self):
super(HmcBkgdUpdate, self).__init__()
self.n_steps = 2
self.avg_accept_rate = 0.9
self.step_sz = 0.1
def preprocess(self, population):
""" Initialize functions that compute the gradient and Hessian of
the log probability with respect to the differentiable GLM
parameters, e.g. the weight matrix if it exists.
"""
self.population = population
self.glm = population.glm
self.syms = population.get_variables()
self.bkgd_syms = differentiable(self.syms['glm']['bkgd'])
# Compute gradients of the log prob wrt the GLM parameters
self.glm_logprior = self.glm.log_prior
self.g_glm_logprior_wrt_bkgd, _ = grad_wrt_list(self.glm_logprior,
_flatten(self.bkgd_syms))
self.glm_ll = self.glm.ll
self.g_glm_ll_wrt_bkgd, _ = grad_wrt_list(self.glm_ll,
_flatten(self.bkgd_syms))
# Get the shape of the parameters from a sample of variables
self.glm_shapes = get_shapes(self.population.extract_vars(self.population.sample(),0)['glm']['bkgd'],
self.bkgd_syms)
def _glm_logp(self, x_vec, x_all):
"""
Compute the log probability (or gradients and Hessians thereof)
of the given GLM variables. We also need the rest of the population variables,
i.e. those that are not being sampled currently, in order to evaluate the log
probability.
"""
# Extract the glm parameters
x_imp = unpackdict(x_vec, self.glm_shapes)
set_vars(self.bkgd_syms, x_all['glm']['bkgd'], x_imp)
lp = seval(self.glm_logprior,
self.syms,
x_all)
# Compute the log likelihood for each data sequence
for data in self.population.data_sequences:
self.population.set_data(data)
lp += seval(self.glm_ll,
self.syms,
x_all)
return lp
def _grad_glm_logp(self, x_vec, x_all):
"""
Compute the negative log probability (or gradients and Hessians thereof)
of the given GLM variables. We also need the rest of the population variables,
i.e. those that are not being sampled currently, in order to evaluate the log
probability.
"""
# Extract the glm parameters
x_imp = unpackdict(x_vec, self.glm_shapes)
set_vars(self.bkgd_syms, x_all['glm']['bkgd'], x_imp)
glp = seval(self.g_glm_logprior_wrt_bkgd,
self.syms,
x_all)
# Compute the log likelihood for each data sequence
for data in self.population.data_sequences:
self.population.set_data(data)
glp = seval(self.g_glm_ll_wrt_bkgd,
self.syms,
x_all)
return glp
def update(self, x, n):
""" Gibbs sample the GLM parameters. These are mostly differentiable
so we use HMC wherever possible.
"""
xn = self.population.extract_vars(x, n)
# Get the differentiable variables suitable for HMC
dxn = get_vars(self.bkgd_syms, xn['glm']['bkgd'])
x_glm_0, shapes = packdict(dxn)
# Return if nothing to do
if len(dxn) == 0:
return x
# Create lambda functions to compute the nll and its gradient
nll = lambda x_glm_vec: -1.0*self._glm_logp(x_glm_vec, xn)
grad_nll = lambda x_glm_vec: -1.0*self._grad_glm_logp(x_glm_vec, xn)
# HMC with automatic parameter tuning
x_bkgd, new_step_sz, new_accept_rate = hmc(nll,
grad_nll,
self.step_sz,
self.n_steps,
x_glm_0,
adaptive_step_sz=True,
avg_accept_rate=self.avg_accept_rate)
# Update step size and accept rate
self.step_sz = new_step_sz
self.avg_accept_rate = new_accept_rate
# print "GLM step sz: %.3f\tGLM_accept rate: %.3f" % (new_step_sz, new_accept_rate)
# Unpack the optimized parameters back into the state dict
x_bkgd_n = unpackdict(x_bkgd, shapes)
set_vars(self.bkgd_syms, xn['glm']['bkgd'], x_bkgd_n)
x['glms'][n] = xn['glm']
return x
class HmcImpulseUpdate(ParallelMetropolisHastingsUpdate):
"""
Update the continuous and unconstrained bias parameters using Hamiltonian
Monte Carlo. Stochastically follow the gradient of the parameters using
Hamiltonian dynamics.
"""
def __init__(self):
super(HmcImpulseUpdate, self).__init__()
self.avg_accept_rate = 0.9
self.step_sz = 0.1
def preprocess(self, population):
""" Initialize functions that compute the gradient and Hessian of
the log probability with respect to the differentiable GLM
parameters, e.g. the weight matrix if it exists.
"""
self.population = population
self.glm = population.glm
self.syms = population.get_variables()
self.impulse_syms = differentiable(self.syms['glm']['imp'])
# Compute gradients of the log prob wrt the GLM parameters
self.glm_logprior = self.glm.log_prior
self.g_glm_logprior_wrt_imp, _ = grad_wrt_list(self.glm_logprior,
_flatten(self.impulse_syms))
self.glm_ll = self.glm.ll
self.g_glm_ll_wrt_imp, _ = grad_wrt_list(self.glm_ll,
_flatten(self.impulse_syms))
# Get the shape of the parameters from a sample of variables
self.glm_shapes = get_shapes(self.population.extract_vars(self.population.sample(),0)['glm']['imp'],
self.impulse_syms)
def _glm_logp(self, x_vec, x_all):
"""
Compute the log probability (or gradients and Hessians thereof)
of the given GLM variables. We also need the rest of the population variables,
i.e. those that are not being sampled currently, in order to evaluate the log
probability.
"""
# Extract the glm parameters
x_imp = unpackdict(x_vec, self.glm_shapes)
set_vars(self.impulse_syms, x_all['glm']['imp'], x_imp)
lp = seval(self.glm_logprior,
self.syms,
x_all)
# Compute the log likelihood for each data sequence
for data in self.population.data_sequences:
self.population.set_data(data)
lp += seval(self.glm_ll,
self.syms,
x_all)
return lp
def _grad_glm_logp(self, x_vec, x_all):
"""
Compute the negative log probability (or gradients and Hessians thereof)
of the given GLM variables. We also need the rest of the population variables,
i.e. those that are not being sampled currently, in order to evaluate the log
probability.
"""
# Extract the glm parameters
x_imp = unpackdict(x_vec, self.glm_shapes)
set_vars(self.impulse_syms, x_all['glm']['imp'], x_imp)
glp = seval(self.g_glm_logprior_wrt_imp,
self.syms,
x_all)
# Compute the log likelihood for each data sequence
for data in self.population.data_sequences:
self.population.set_data(data)
glp = seval(self.g_glm_ll_wrt_imp,
self.syms,
x_all)
return glp
def update(self, x, n):
""" Gibbs sample the GLM parameters. These are mostly differentiable
so we use HMC wherever possible.
"""
xn = self.population.extract_vars(x, n)
# Get the differentiable variables suitable for HMC
dxn = get_vars(self.impulse_syms, xn['glm']['imp'])
x_glm_0, shapes = packdict(dxn)
# Create lambda functions to compute the nll and its gradient
nll = lambda x_glm_vec: -1.0*self._glm_logp(x_glm_vec, xn)
grad_nll = lambda x_glm_vec: -1.0*self._grad_glm_logp(x_glm_vec, xn)
# HMC with automatic parameter tuning
n_steps = 2
x_imp, new_step_sz, new_accept_rate = hmc(nll,
grad_nll,
self.step_sz,
n_steps,
x_glm_0,
adaptive_step_sz=True,
avg_accept_rate=self.avg_accept_rate)
# Update step size and accept rate
self.step_sz = new_step_sz
self.avg_accept_rate = new_accept_rate
# print "GLM step sz: %.3f\tGLM_accept rate: %.3f" % (new_step_sz, new_accept_rate)
# Unpack the optimized parameters back into the state dict
x_imp_n = unpackdict(x_imp, shapes)
set_vars(self.impulse_syms, xn['glm']['imp'], x_imp_n)
x['glms'][n] = xn['glm']
return x
class HmcDirichletImpulseUpdate(ParallelMetropolisHastingsUpdate):
"""
Update the Dirichlet impulse response parameters using Hamiltonian
Monte Carlo. Stochastically follow the gradient of the parameters using
Hamiltonian dynamics.
"""
def __init__(self):
super(HmcDirichletImpulseUpdate, self).__init__()
self.avg_accept_rate = 0.9
self.step_sz = 0.1
def preprocess(self, population):
""" Initialize functions that compute the gradient and Hessian of
the log probability with respect to the differentiable GLM
parameters, e.g. the weight matrix if it exists.
"""
self.population = population
self.glm = population.glm
self.network = self.population.network
self.syms = population.get_variables()
# Compute gradients of the log prob wrt the GLM parameters
self.glm_logp = self.glm.log_p
self.grads_wrt_imp = []
self.grad_lls_wrt_imp = []
self.grad_priors_wrt_imp = []
for g in self.glm.imp_model.gs:
grad,_ = grad_wrt_list(self.glm_logp, [g])
self.grads_wrt_imp.append(grad)
grad,_ = grad_wrt_list(self.glm.ll, [g])
self.grad_lls_wrt_imp.append(grad)
grad,_ = grad_wrt_list(self.glm.imp_model.log_p, [g])
self.grad_priors_wrt_imp.append(grad)
# Get the shape of the parameters from a sample of variables
# self.glm_shapes = get_shapes(self.population.extract_vars(self.population.sample(),0)['glm']['imp'],
# self.impulse_syms)
def _precompute_vars(self, x, n):
""" Precompute currents for sampling A and W
"""
nvars = self.population.extract_vars(x, n)
I_bias = seval(self.glm.bias_model.I_bias,
self.syms,
nvars)
I_stim = seval(self.glm.bkgd_model.I_stim,
self.syms,
nvars)
return I_bias, I_stim
def _glm_logp(self, n, g, x_all, I_bias, I_stim):
"""
Compute the log probability (or gradients and Hessians thereof)
of the given GLM variables. We also need the rest of the population variables,
i.e. those that are not being sampled currently, in order to evaluate the log
probability.
"""
# Extract the glm parameters
s = \
{
'I_stim' : self.glm.bkgd_model.I_stim,
'I_bias' : self.glm.bias_model.I_bias,
'n' : self.glm.n,
'W' : self.network.weights.W_flat,
'A' : self.network.graph.A
}
xv = \
{
'I_stim' : I_stim,
'I_bias' : I_bias,
'n' : x_all['glm']['n'],
'W' : x_all['net']['weights']['W'],
'A' : x_all['net']['graph']['A']
}
# Add the Dirichlet impulse response parameters
for n_pre, g_sym in enumerate(self.glm.imp_model.gs):
s[g_sym.name] = g_sym
if n_pre == n:
xv[g_sym.name] = g
else:
xv[g_sym.name] = x_all['glm']['imp'][g_sym.name]
lp = seval(self.glm.imp_model.log_p, s, xv)
# Compute the log likelihood for each data sequence
for data in self.population.data_sequences:
self.population.set_data(data)
lp += seval(self.glm.ll, s, xv)
# set_vars(self.impulse_syms, x_all['glm']['imp'], x_imp)
# x_all['glm']['imp']['g_%d' % n] = g
# lp = seval(self.glm_logp,
# self.syms,
# x_all)
return lp
def _grad_glm_logp(self, n, g, x_all, I_bias, I_stim):
"""
Compute the negative log probability (or gradients and Hessians thereof)
of the given GLM variables. We also need the rest of the population variables,
i.e. those that are not being sampled currently, in order to evaluate the log
probability.
"""
# Extract the glm parameters
# x_all['glm']['imp']['g_%d' % n] = g
# Extract the glm parameters
s = \
{
'I_stim' : self.glm.bkgd_model.I_stim,
'I_bias' : self.glm.bias_model.I_bias,
'n' : self.glm.n,
'W' : self.network.weights.W_flat,
'A' : self.network.graph.A
}
xv = \
{
'I_stim' : I_stim,
'I_bias' : I_bias,
'n' : x_all['glm']['n'],
'W' : x_all['net']['weights']['W'],
'A' : x_all['net']['graph']['A']
}
# Add the Dirichlet impulse response parameters
for n_pre, g_sym in enumerate(self.glm.imp_model.gs):
s[g_sym.name] = g_sym
if n_pre == n:
xv[g_sym.name] = g
else:
xv[g_sym.name] = x_all['glm']['imp'][g_sym.name]
glp = seval(self.grad_priors_wrt_imp[n], s, xv)
# Compute the log likelihood for each data sequence
for data in self.population.data_sequences:
self.population.set_data(data)
glp += seval(self.grad_lls_wrt_imp[n], s, xv)
# glp = seval(self.grads_wrt_imp[n],
# self.syms,
# x_all)
return glp
def update(self, x, n_post):
""" Gibbs sample the GLM parameters. These are mostly differentiable
so we use HMC wherever possible.
"""
xn = self.population.extract_vars(x, n_post)
I_bias, I_stim = self._precompute_vars(x, n_post)
A = x['net']['graph']['A']
for n_pre in range(self.population.N):
# Only sample if there is a connection from n_pre to n_post
if A[n_pre, n_post]:
# Get current g
g_0 = xn['glm']['imp']['g_%d' % n_pre]
# Create lambda functions to compute the nll and its gradient
nll = lambda g: -1.0*self._glm_logp(n_pre, g, xn, I_bias, I_stim)
grad_nll = lambda g: -1.0*self._grad_glm_logp(n_pre, g, xn, I_bias, I_stim)
# HMC with automatic parameter tuning
n_steps = 2
g_f, new_step_sz, new_accept_rate = hmc(nll,
grad_nll,
self.step_sz,
n_steps,
g_0,
adaptive_step_sz=True,
avg_accept_rate=self.avg_accept_rate)
# Update step size and accept rate
self.step_sz = new_step_sz
self.avg_accept_rate = new_accept_rate
# print "GLM step sz: %.3f\tGLM_accept rate: %.3f" % (new_step_sz, new_accept_rate)
# Unpack the optimized parameters back into the state dict
xn['glm']['imp']['g_%d' % n_pre] = g_f
else:
# No edge: Sample g from the prior
g_f = np.random.gamma(self.glm.imp_model.alpha,
np.ones(self.glm.imp_model.B))
xn['glm']['imp']['g_%d' % n_pre] = g_f
x['glms'][n_post] = xn['glm']
return x
class CollapsedGibbsNetworkColumnUpdate(ParallelMetropolisHastingsUpdate):
def __init__(self):
super(CollapsedGibbsNetworkColumnUpdate, self).__init__()
# TODO: Only use an MH proposal from the prior if you are certain
# that the prior puts mass on likely edges. Otherwise you will never
# propose to transition from no-edge to edge and mixing will be very,
# very slow.
self.propose_from_prior = False
# Define constants for Sampling
self.DEG_GAUSS_HERMITE = 10
self.GAUSS_HERMITE_ABSCISSAE, self.GAUSS_HERMITE_WEIGHTS = \
np.polynomial.hermite.hermgauss(self.DEG_GAUSS_HERMITE)
def preprocess(self, population):
""" Initialize functions that compute the gradient and Hessian of
the log probability with respect to the differentiable network
parameters, e.g. the weight matrix if it exists.
"""
self.population = population
self.network = population.network
self.glm = population.glm
self.syms = population.get_variables()
# Get the weight model
self.mu_w = self.network.weights.prior.mu.get_value()
self.sigma_w = self.network.weights.prior.sigma.get_value()
if hasattr(self.network.weights, 'refractory_prior'):
self.mu_w_ref = self.network.weights.refractory_prior.mu.get_value()
self.sigma_w_ref = self.network.weights.refractory_prior.sigma.get_value()
else:
self.mu_w_ref = self.mu_w
self.sigma_w_ref = self.sigma_w
def _precompute_vars(self, x, n_post):
""" Precompute currents for sampling A and W
"""
nvars = self.population.extract_vars(x, n_post)
I_bias = seval(self.glm.bias_model.I_bias,
self.syms,
nvars)
I_stim = seval(self.glm.bkgd_model.I_stim,
self.syms,
nvars)
I_imp = seval(self.glm.imp_model.I_imp,
self.syms,
nvars)
p_A = seval(self.network.graph.pA,
self.syms['net'],
x['net'])
return I_bias, I_stim, I_imp, p_A
def _precompute_other_current(self, x, I_imp, n_pre, n_post):
"""
Precompute the weighted currents from neurons other than n_pre
"""
# Set A[n_pre,n_post]=0 to omit this current
A = x['net']['graph']['A']
W = x['net']['weights']['W']
A_init = A[n_pre, n_post]
A[n_pre, n_post] = 0
# Get the likelihood of the GLM under A and W
s = {'A' : self.network.graph.A,
'W' : self.syms['net']['weights']['W'],
'n' :self.glm.n,
'I_imp' : self.glm.imp_model.I_imp,
'nlin' : self.syms['glm']['nlin']
}
xv = {'A' : A,
'W' : W,
'n' : n_post,
'I_imp' : I_imp,
'nlin' : x['glms'][n_post]['nlin']
}
I_net_other = seval(self.glm.I_net, s, xv)
# Reset A
A[n_pre, n_post] = A_init
return I_net_other
def _glm_ll_A_old(self, n_pre, n_post, w, x, I_bias, I_stim, I_imp):
""" Compute the log likelihood of the GLM with A=True and given W
"""
# Set A in state dict x
A = x['net']['graph']['A']
A_init = A[n_pre, n_post]
A[n_pre, n_post] = 1
# Set W in state dict x
W = x['net']['weights']['W'].reshape(A.shape)
W_init = W[n_pre, n_post]
W[n_pre, n_post] = w
# Get the likelihood of the GLM under A and W
s = {'A' : self.network.graph.A,
'W' : self.syms['net']['weights']['W'],
'n' :self.glm.n,
'I_bias' : self.glm.bias_model.I_bias,
'I_stim' : self.glm.bkgd_model.I_stim,
'I_imp' : self.glm.imp_model.I_imp,
'nlin' : self.syms['glm']['nlin']
}
xv = {'A' : A,
'W' : W.ravel(),
'n' : n_post,
'I_bias' : I_bias,
'I_stim' : I_stim,
'I_imp' : I_imp,
'nlin' : x['glms'][n_post]['nlin']
}
# Compute the log likelihood for each data sequence
ll = 0
for data in self.population.data_sequences:
self.population.set_data(data)
ll += seval(self.glm.ll, s, xv)
# Reset A and W
A[n_pre, n_post] = A_init
W[n_pre, n_post] = W_init
return ll
def _glm_ll(self, n_pre, n_post, w, x, I_bias, I_stim, I_imp, I_net_other):
""" Compute the log likelihood of the GLM with A=True and given W
"""
# Compute the weighted network current
I_net = I_net_other + w*I_imp[:,n_pre]
# Get the likelihood of the GLM under A and W
s = {'n' :self.glm.n,
'I_bias' : self.glm.bias_model.I_bias,
'I_stim' : self.glm.bkgd_model.I_stim,
'I_net' : self.glm.I_net,
'nlin' : self.syms['glm']['nlin']
}
xv = {'n' : n_post,
'I_bias' : I_bias,
'I_stim' : I_stim,
'I_net' : I_net,
'nlin' : x['glms'][n_post]['nlin']
}
# Compute the log likelihood for each data sequence
ll = 0
for data in self.population.data_sequences:
self.population.set_data(data)
ll += seval(self.glm.ll, s, xv)
return ll
def _glm_ll_noA(self, n_pre, n_post, x, I_bias, I_stim, I_imp):
""" Compute the log likelihood of the GLM with A=True and given W
"""
# Set A in state dict x
A = x['net']['graph']['A']
A_init = A[n_pre, n_post]
A[n_pre, n_post] = 0
W = x['net']['weights']['W']
# Get the likelihood of the GLM under A and W
s = {'A' : self.network.graph.A,
'W' : self.syms['net']['weights']['W'],
'n' :self.glm.n,
'I_bias' : self.glm.bias_model.I_bias,
'I_stim' : self.glm.bkgd_model.I_stim,
'I_imp' : self.glm.imp_model.I_imp,
'nlin' : self.syms['glm']['nlin']
}
xv = {'A' : A,
'W' : W.ravel(),
'n' : n_post,
'I_bias' : I_bias,
'I_stim' : I_stim,
'I_imp' : I_imp,
'nlin' : x['glms'][n_post]['nlin']
}
# Compute the log likelihood for each data sequence
ll = 0
for data in self.population.data_sequences:
self.population.set_data(data)
ll += seval(self.glm.ll, s, xv)
A[n_pre, n_post] = A_init
return ll
def _collapsed_sample_AW(self, n_pre, n_post, x,
I_bias, I_stim, I_imp, I_other, p_A):
"""
Do collapsed Gibbs sampling for an entry A_{n,n'} and W_{n,n'} where
n = n_pre and n' = n_post.
"""
# Set sigma_w and mu_w
if n_pre == n_post:
mu_w = self.mu_w_ref
sigma_w = self.sigma_w_ref
else:
mu_w = self.mu_w
sigma_w = self.sigma_w
A = x['net']['graph']['A']
W = x['net']['weights']['W'].reshape(A.shape)
# Propose from the prior and see if A would change.
prior_lp_A = np.log(p_A[n_pre, n_post])
prior_lp_noA = np.log(1.0-p_A[n_pre, n_post])
# TODO: We could make this faster by precomputing the other currents
# going into neuron n'.
# Approximate G = \int_0^\infty p({s,c} | A, W) p(W_{n,n'}) dW_{n,n'}
log_L = np.zeros(self.DEG_GAUSS_HERMITE)
weighted_log_L = np.zeros(self.DEG_GAUSS_HERMITE)
W_nns = np.sqrt(2) * sigma_w * self.GAUSS_HERMITE_ABSCISSAE + mu_w
for i in np.arange(self.DEG_GAUSS_HERMITE):
w = self.GAUSS_HERMITE_WEIGHTS[i]
W_nn = W_nns[i]
log_L[i] = self._glm_ll(n_pre, n_post, W_nn,
x, I_bias, I_stim, I_imp, I_other)
# Handle NaNs in the GLM log likelihood
if np.isnan(log_L[i]):
log_L[i] = -np.Inf
weighted_log_L[i] = log_L[i] + np.log(w/np.sqrt(np.pi))
# Handle NaNs in the GLM log likelihood
if np.isnan(weighted_log_L[i]):
weighted_log_L[i] = -np.Inf
# compute log pr(A_nn) and log pr(\neg A_nn) via log G
log_G = logsumexp(weighted_log_L)
if not np.isfinite(log_G):
print weighted_log_L
raise Exception("log_G not finie")
# Compute log Pr(A_nn=1) given prior and estimate of log lkhd after integrating out W
log_pr_A = prior_lp_A + log_G
# Compute log Pr(A_nn = 0 | {s,c}) = log Pr({s,c} | A_nn = 0) + log Pr(A_nn = 0)
log_pr_noA = prior_lp_noA + \
self._glm_ll(n_pre, n_post, 0.0, x,
I_bias, I_stim, I_imp, I_other)
if np.isnan(log_pr_noA):
log_pr_noA = -np.Inf
# Sample A
try:
A[n_pre, n_post] = log_sum_exp_sample([log_pr_noA, log_pr_A])
if np.allclose(p_A[n_pre, n_post], 1.0) and not A[n_pre, n_post]:
print log_pr_noA
print log_pr_A
raise Exception("Sampled no self edge")
except Exception as e:
raise e
# import pdb; pdb.set_trace()
set_vars('A', x['net']['graph'], A)
# Sample W from its posterior, i.e. log_L with denominator log_G
# If A_nn = 0, we don't actually need to resample W since it has no effect
if A[n_pre,n_post] == 1:
# W[n_pre, n_post] = self._inverse_cdf_sample_w(mu_w, sigma_w, W_nns, log_L)
W[n_pre, n_post] = self._adaptive_rejection_sample_w(n_pre, n_post, x, mu_w, sigma_w,
W_nns, log_L, I_bias, I_stim, I_imp, I_other)
# if not np.isfinite(self._glm_ll(n_pre, n_post, W[n_pre, n_post], x, I_bias, I_stim, I_imp)):
# raise Exception("Invalid weight sample")
# print "p_W: %.3f (v=%.3f)" % (np.interp(W[n_pre, n_post], ws, p_W) ,v)
else:
# Sample W from the prior
W[n_pre, n_post] = mu_w + sigma_w * np.random.randn()
# Set W in state dict x
x['net']['weights']['W'] = W.ravel()
def _inverse_cdf_sample_w(self, mu_w, sigma_w, W_nns, log_L):
"""
Sample weight w using inverse CDF method. We have already evaluated the
log likelihood log_L at a set of points W_nns. Use these to approximate
the probability density.
"""
log_prior_W = -0.5/sigma_w**2 * (W_nns-mu_w)**2
log_posterior_W = log_prior_W + log_L
log_p_W = log_posterior_W - logsumexp(log_posterior_W)
p_W = np.exp(log_p_W)
F_W = cumtrapz(p_W, W_nns, initial=0.0)
F_W = F_W / F_W[-1]
# Sample W_rv
v = np.random.rand()
w = np.interp(v, F_W, W_nns)
return w
def _adaptive_rejection_sample_w(self, n_pre, n_post, x, mu_w, sigma_w, ws, log_L, I_bias, I_stim, I_imp, I_other):
"""
Sample weights using adaptive rejection sampling.
This only works for log-concave distributions, which will
be the case if the nonlinearity is convex and log concave, and
when the prior on w is log concave (as it is when w~Gaussian).
"""
# import pdb; pdb.set_trace()
log_prior_W = -0.5/sigma_w**2 * (ws-mu_w)**2
log_posterior_W = log_prior_W + log_L
# Define a function to evaluate the log posterior
# For numerical stability, try to normalize
Z = np.amax(log_posterior_W)
def _log_posterior(ws_in):
ws = np.asarray(ws_in)
shape = ws.shape
ws = np.atleast_1d(ws)
lp = np.zeros_like(ws)
for (i,w) in enumerate(ws):
lp[i] = -0.5/sigma_w**2 * (w-mu_w)**2 + \
self._glm_ll(n_pre, n_post, w, x, I_bias, I_stim, I_imp, I_other) \
- Z
if isinstance(ws_in, np.ndarray):
return lp.reshape(shape)
elif isinstance(ws_in, float) or isinstance(ws_in, np.float):
return np.float(lp)
# Only use the valid ws
# valid_ws = np.arange(len(ws))[np.isfinite(log_posterior_W)]
valid_ws = np.bitwise_and(np.isfinite(log_posterior_W),
log_posterior_W > -1e8,
log_posterior_W < 1e8)
return adaptive_rejection_sample(_log_posterior,
ws[valid_ws], log_posterior_W[valid_ws] - Z,
(-np.Inf, np.Inf),
stepsz=sigma_w/2.0,
debug=False)
def _collapsed_sample_AW_with_prior(self, n_pre, n_post, x,
I_bias, I_stim, I_imp, p_A):
"""
Do collapsed Gibbs sampling for an entry A_{n,n'} and W_{n,n'} where
n = n_pre and n' = n_post.
"""
# Set sigma_w and mu_w
if n_pre == n_post:
mu_w = self.mu_w_ref
sigma_w = self.sigma_w_ref
else:
mu_w = self.mu_w
sigma_w = self.sigma_w
A = x['net']['graph']['A']
W = x['net']['weights']['W'].reshape(A.shape)
# Propose from the prior and see if A would change.
prior_lp_A = np.log(p_A[n_pre, n_post])
prop_A = np.int8(np.log(np.random.rand()) < prior_lp_A)
# We only need to compute the acceptance probability if the proposal
# would change A
A_init = A[n_pre, n_post]
W_init = W[n_pre, n_post]
if A[n_pre, n_post] != prop_A:
# Approximate G = \int_0^\infty p({s,c} | A, W) p(W_{n,n'}) dW_{n,n'}
log_L = np.zeros(self.DEG_GAUSS_HERMITE)
W_nns = np.sqrt(2) * sigma_w * self.GAUSS_HERMITE_ABSCISSAE + mu_w
for i in np.arange(self.DEG_GAUSS_HERMITE):
w = self.GAUSS_HERMITE_WEIGHTS[i]
W_nn = W_nns[i]
log_L[i] = np.log(w/np.sqrt(np.pi)) + \
self._glm_ll_A(n_pre, n_post, W_nn,
x, I_bias, I_stim, I_imp)
# Handle NaNs in the GLM log likelihood
if np.isnan(log_L[i]):
log_L[i] = -np.Inf
# compute log pr(A_nn) and log pr(\neg A_nn) via log G
from scipy.misc import logsumexp
log_G = logsumexp(log_L)
# Compute log Pr(A_nn=1) given prior and estimate of log lkhd after integrating out W
log_lkhd_A = log_G
# Compute log Pr(A_nn = 0 | {s,c}) = log Pr({s,c} | A_nn = 0) + log Pr(A_nn = 0)
log_lkhd_noA = self._glm_ll_noA(n_pre, n_post, x, I_bias, I_stim, I_imp)
# Decide whether or not to accept
log_pr_accept = log_lkhd_A - log_lkhd_noA if prop_A else log_lkhd_noA - log_lkhd_A
if np.log(np.random.rand()) < log_pr_accept:
# Update A
A[n_pre, n_post] = prop_A
# Update W if there is an edge in A
if A[n_pre, n_post]:
# Update W if there is an edge
log_p_W = log_L - log_G
# Compute the log CDF
log_F_W = [logsumexp(log_p_W[:i]) for i in range(1,self.DEG_GAUSS_HERMITE)] + [0]
# Sample via inverse CDF
W[n_pre, n_post] = np.interp(np.log(np.random.rand()),
log_F_W,
W_nns)
elif A[n_pre, n_post]:
assert A[n_pre, n_post] == A_init
# If we propose not to change A then we accept with probability 1, but we
# still need to update W
# Approximate G = \int_0^\infty p({s,c} | A, W) p(W_{n,n'}) dW_{n,n'}
log_L = np.zeros(self.DEG_GAUSS_HERMITE)
W_nns = np.sqrt(2) * sigma_w * self.GAUSS_HERMITE_ABSCISSAE + mu_w
for i in np.arange(self.DEG_GAUSS_HERMITE):
w = self.GAUSS_HERMITE_WEIGHTS[i]
W_nn = W_nns[i]
log_L[i] = np.log(w/np.sqrt(np.pi)) + \
self._glm_ll_A(n_pre, n_post, W_nn,
x, I_bias, I_stim, I_imp)
# Handle NaNs in the GLM log likelihood
if np.isnan(log_L[i]):
log_L[i] = -np.Inf
# compute log pr(A_nn) and log pr(\neg A_nn) via log G
from scipy.misc import logsumexp
log_G = logsumexp(log_L)
# Update W if there is an edge
log_p_W = log_L - log_G
# Compute the log CDF
log_F_W = [logsumexp(log_p_W[:i]) for i in range(1,self.DEG_GAUSS_HERMITE)] + [0]
# Sample via inverse CDF
W[n_pre, n_post] = np.interp(np.log(np.random.rand()),
log_F_W,
W_nns)
# Set W in state dict x
x['net']['weights']['W'] = W.ravel()
def update(self, x, n):
""" Collapsed Gibbs sample a column of A and W
"""
A = x['net']['graph']['A']
N = A.shape[0]
I_bias, I_stim, I_imp, p_A = self._precompute_vars(x, n)
order = np.arange(N)
np.random.shuffle(order)
for n_pre in order:
# Precompute the other currents
I_other = self._precompute_other_current(x, I_imp, n_pre, n)
# print "Sampling %d->%d" % (n_pre, n)
if self.propose_from_prior:
self._collapsed_sample_AW_with_prior(n_pre, n, x,
I_bias, I_stim, I_imp, p_A)
else:
self._collapsed_sample_AW(n_pre, n, x,
I_bias, I_stim, I_imp, I_other, p_A)
return x
class GibbsNetworkColumnUpdate(ParallelMetropolisHastingsUpdate):
def __init__(self):
super(GibbsNetworkColumnUpdate, self).__init__()
self.avg_accept_rate = 0.9
self.step_sz = 0.05
def preprocess(self, population):
""" Initialize functions that compute the gradient and Hessian of
the log probability with respect to the differentiable network
parameters, e.g. the weight matrix if it exists.
"""
self.N = population.model['N']
self.population = population
self.network = population.network
self.glm = population.glm
self.syms = population.get_variables()
self.g_netlp_wrt_W = T.grad(self.network.log_p, self.syms['net']['weights']['W'])
self.g_glmll_wrt_W = T.grad(self.glm.ll, self.syms['net']['weights']['W'])
def _precompute_currents(self, x, n_post):
""" Precompute currents for sampling A and W
"""
nvars = self.population.extract_vars(x, n_post)
I_bias = seval(self.glm.bias_model.I_bias,
self.syms,
nvars)
I_stim = seval(self.glm.bkgd_model.I_stim,
self.syms,
nvars)
I_imp = seval(self.glm.imp_model.I_imp,
self.syms,
nvars)
return I_bias, I_stim, I_imp
def _lp_A(self, A, x, n_post, I_bias, I_stim, I_imp):
""" Compute the log probability for a given column A[:,n_post]
"""
# Set A in state dict x
set_vars('A', x['net']['graph'], A)
# Get the prior probability of A
lp = seval(self.network.log_p,
self.syms['net'],
x['net'])
# Get the likelihood of the GLM under A
s = [self.network.graph.A] + \
_flatten(self.syms['net']['weights']) + \
[self.glm.n,
self.glm.bias_model.I_bias,
self.glm.bkgd_model.I_stim,
self.glm.imp_model.I_imp] + \
_flatten(self.syms['glm']['nlin'])
xv = [A] + \
_flatten(x['net']['weights']) + \
[n_post,
I_bias,
I_stim,
I_imp] + \
_flatten(x['glms'][n_post]['nlin'])
# Compute the log likelihood for each data sequence
for data in self.population.data_sequences:
self.population.set_data(data)
lp += self.glm.ll.eval(dict(zip(s, xv)))
return lp
# Helper functions to sample W
def _lp_W(self, W, x, n_post, I_bias, I_stim, I_imp):
""" Compute the log probability for a given column W[:,n_post]
"""
# Set A in state dict x
set_vars('W', x['net']['weights'], W)
# Get the prior probability of A
lp = seval(self.network.log_p,
self.syms['net'],
x['net'])
# Get the likelihood of the GLM under W
s = _flatten(self.syms['net']['graph']) + \
[self.network.weights.W_flat,
self.glm.n,
self.glm.bias_model.I_bias,
self.glm.bkgd_model.I_stim,
self.glm.imp_model.I_imp] + \
_flatten(self.syms['glm']['nlin'])
xv = _flatten(x['net']['graph']) + \
[W,
n_post,
I_bias,
I_stim,
I_imp] + \
_flatten(x['glms'][n_post]['nlin'])
# Compute the log likelihood for each data sequence
for data in self.population.data_sequences:
self.population.set_data(data)
lp += self.glm.ll.eval(dict(zip(s, xv)))
return lp
def _grad_lp_W(self, W, x, n_post, I_bias, I_stim, I_imp):
""" Compute the log probability for a given column W[:,n_post]
"""
# Set A in state dict x
set_vars('W', x['net']['weights'], W)
# Get the prior probability of A
g_lp = seval(self.g_netlp_wrt_W,
self.syms['net'],
x['net'])
# Get the likelihood of the GLM under W
s = _flatten(self.syms['net']['graph']) + \
[self.network.weights.W_flat,
self.glm.n,
self.glm.bias_model.I_bias,
self.glm.bkgd_model.I_stim,
self.glm.imp_model.I_imp] + \
_flatten(self.syms['glm']['nlin'])
xv = _flatten(x['net']['graph']) + \
[W,
n_post,
I_bias,
I_stim,
I_imp] + \
_flatten(x['glms'][n_post]['nlin'])
# Compute the log likelihood for each data sequence
for data in self.population.data_sequences:
self.population.set_data(data)
g_lp += seval(self.g_glmll_wrt_W,
dict(zip(range(len(s)), s)),
dict(zip(range(len(xv)),xv)))
# Ignore gradients wrt columns other than n_post
g_mask = np.zeros((self.N,self.N))
g_mask[:,n_post] = 1
g_lp *= g_mask.flatten()
return g_lp
def _sample_column_of_A(self, n_post, x, I_bias, I_stim, I_imp):
# Sample the adjacency matrix if it exists
if 'A' in x['net']['graph']:
# print "Sampling A"
A = x['net']['graph']['A']
N = A.shape[0]
# Sample coupling filters from other neurons
for n_pre in np.arange(N):
# print "Sampling A[%d,%d]" % (n_pre,n_post)
# WARNING Setting A is somewhat of a hack. It only works
# because nvars copies x's pointer to A rather than making
# a deep copy of the adjacency matrix.
A[n_pre,n_post] = 0
log_pr_noA = self._lp_A(A, x, n_post, I_bias, I_stim, I_imp)
A[n_pre,n_post] = 1
log_pr_A = self._lp_A(A, x, n_post, I_bias, I_stim, I_imp)
# Sample A[n_pre,n_post]
A[n_pre,n_post] = log_sum_exp_sample([log_pr_noA, log_pr_A])
if not np.isfinite(log_pr_noA) or not np.isfinite(log_pr_A):
import pdb; pdb.set_trace()
if n_pre == n_post and not A[n_pre, n_post]:
import pdb; pdb.set_trace()
def _sample_column_of_W(self, n_post, x, I_bias, I_stim, I_imp):
# Sample W if it exists
if 'W' in x['net']['weights']:
# print "Sampling W"
nll = lambda W: -1.0 * self._lp_W(W, x, n_post, I_bias, I_stim, I_imp)
grad_nll = lambda W: -1.0 * self._grad_lp_W(W, x, n_post, I_bias, I_stim, I_imp)
# Automatically tune these parameters
n_steps = 10
(W, new_step_sz, new_accept_rate) = hmc(nll,
grad_nll,
self.step_sz,
n_steps,
x['net']['weights']['W'],
adaptive_step_sz=True,
avg_accept_rate=self.avg_accept_rate)
# Update step size and accept rate
self.step_sz = new_step_sz
self.avg_accept_rate = new_accept_rate
# print "W step sz: %.3f\tW_accept rate: %.3f" % (new_step_sz, new_accept_rate)
# Update current W
x['net']['weights']['W'] = W
def update(self, x, n):
""" Sample a single column of the network (all the incoming
coupling filters). This is a parallelizable chunk.
"""
# Precompute the filtered currents from other GLMs
I_bias, I_stim, I_imp = self._precompute_currents(x, n)
self._sample_column_of_A(n, x, I_bias, I_stim, I_imp)
self._sample_column_of_W(n, x, I_bias, I_stim, I_imp)
return x
class LatentLocationUpdate(MetropolisHastingsUpdate):
"""
Gibbs sample the parameters of a latent distance model, namely the
latent locations (if they are not given) and the distance scale.
"""
def __init__(self):
super(LatentLocationUpdate, self).__init__()
# Use HMC if the locations are continuous
# Otherwise, use a Metropolis-Hastings update
self.avg_accept_rate = 0.9
self.step_sz = 0.001
def preprocess(self, population):
self.N = population.model['N']
# Get the location model(s)
from pyglm.components.latent import LatentLocation
self.location_models = []
self.location_updates = []
for latent_component in population.latent.latentlist:
if isinstance(latent_component, LatentLocation):
self.location_models.append(latent_component)
# Make an update for this model
if latent_component.dtype == np.int:
# update = _DiscreteLatentLocationUpdate(latent_component)
# update = _DiscreteGibbsLatentLocationUpdate(latent_component)
update = _DiscreteLocalGibbsLatentLocationUpdate(latent_component)
else:
update = _ContinuousLatentLocationUpdate(latent_component)
update.preprocess(population)
self.location_updates.append(update)
def update(self, x):
"""
Update each location update in turn
"""
for update in self.location_updates:
x = update.update(x)
return x
class _ContinuousLatentLocationUpdate(MetropolisHastingsUpdate):
"""
A special subclass to sample continuous latent locations
"""
def __init__(self, latent_location_component):
self.location = latent_location_component
def preprocess(self, population):
self.syms = population.get_variables()
# Get the shape of L
# TODO: Fix this hack!
self.L = self.location.L
self.L_shape = population.sample()['latent'][self.location.name]['L'].shape
# Compute the log probability and its gradients, taking into
# account the prior and the likelihood of any consumers of the
# location.
self.log_p = T.constant(0.)
self.log_p += self.location.log_p
self.g_log_p = T.constant(0.)
self.g_log_p += T.grad(self.location.log_p, self.L)
from pyglm.components.graph import LatentDistanceGraphModel
if isinstance(population.network.graph, LatentDistanceGraphModel):
self.log_p += population.network.graph.log_p
self.g_log_p +=T.grad(population.network.graph.log_p, self.L)
from pyglm.components.bkgd import SharedTuningCurveStimulus
if isinstance(population.glm.bkgd_model, SharedTuningCurveStimulus):
self.log_p += population.glm.bkgd.log_p
self.g_log_p +=T.grad(population.glm.bkgd.log_p, self.L)
def _lp_L(self, L, x):
# Set L in state dict x
set_vars('L', x['latent'][self.location.name], L)
lp = seval(self.log_p, self.syms, x)
assert np.all(np.isfinite(lp))
return lp
def _grad_lp_wrt_L(self, L, x):
# Set L in state dict x
set_vars('L', x['latent'][self.location.name], L)
g_lp = seval(self.g_log_p, self.syms, x)
# if not np.all(np.isfinite(g_lp)):
# import pdb; pdb.set_trace()
return g_lp
def update(self, x):
"""
Sample L using HMC given A and delta (distance scale)
"""
nll = lambda L: -1.0 * self._lp_L(L.reshape(self.L_shape), x)
grad_nll = lambda L: -1.0 * self._grad_lp_wrt_L(L.reshape(self.L_shape), x).ravel()
# Automatically tune these paramseters
n_steps = 10
(L, new_step_sz, new_accept_rate) = hmc(nll,
grad_nll,
self.step_sz,
n_steps,
x['latent'][self.location.name]['L'].ravel(),
adaptive_step_sz=True,
avg_accept_rate=self.avg_accept_rate)
# Update step size and accept rate
self.step_sz = new_step_sz
# print "Step: ", self.step_sz
self.avg_accept_rate = new_accept_rate
# print "Accept: ", self.avg_accept_rate
# Update current L
x['latent'][self.location.name]['L'] = L.reshape(self.L_shape)
return x
class _DiscreteLatentLocationUpdate(MetropolisHastingsUpdate):
"""
A special subclass to sample discrete latent locations on a grid
"""
def __init__(self, latent_location_component):
self.location = latent_location_component
def preprocess(self, population):
self.N = population.N
self.population = population
self.syms = population.get_variables()
self.L = self.location.Lmatrix
# Compute the log probability and its gradients, taking into
# account the prior and the likelihood of any consumers of the
# location.
self.log_p = self.location.log_p
self.log_lkhd = T.constant(0.)
from pyglm.components.graph import LatentDistanceGraphModel
if isinstance(population.network.graph, LatentDistanceGraphModel):
self.log_lkhd += population.network.graph.log_p
from pyglm.components.bkgd import SharedTuningCurveStimulus
if isinstance(population.glm.bkgd_model, SharedTuningCurveStimulus):
self.log_lkhd += population.glm.log_p
def _lp_L(self, L, x, n):
if not self._check_bounds(L):
return -np.Inf
# Set L in state dict x
xn = self.population.extract_vars(x, n)
set_vars('L', xn['latent'][self.location.name], L.ravel())
lp = seval(self.log_p, self.syms, xn)
lp += seval(self.log_lkhd, self.syms, xn)
return lp
def _check_bounds(self, L):
"""
Return true if locations are within the allowable range
"""
from pyglm.components.priors import Categorical, JointCategorical
prior = self.location.location_prior
if isinstance(prior, Categorical):
if np.any(L < prior.min) or np.any(L > prior.max):
return False
if isinstance(prior, JointCategorical):
if np.any(L[:,0] < prior.min0) or np.any(L[:,0] > prior.max0) or \
np.any(L[:,1] < prior.min1) or np.any(L[:,1] > prior.max1):
return False
return True
def update(self, x):
"""
Sample each entry in L using Metropolis Hastings
"""
L = seval(self.location.Lmatrix, self.syms['latent'], x['latent'])
# print "L: ", L
for n in range(self.N):
L_curr = L[n,:].copy()
lp_curr = self._lp_L(L, x, n)
# Make a symmetric proposal of \pm 1 step along each dimension independently
L_prop = L_curr + np.random.randint(-1,2,L_curr.shape)
L[n,:] = L_prop
lp_prop = self._lp_L(L, x, n)
# Accept or reject (ignoring proposal since it's symmetric)
if np.log(np.random.rand()) < lp_prop - lp_curr:
L[n,:] = L_prop
# print "%d: [%d,%d]->[%d,%d]" % (n, L_curr[0], L_curr[1], L_prop[0],L_prop[1])
else:
L[n,:] = L_curr
# print "%d: [%d,%d]->[%d,%d]" % (n, L_curr[0], L_curr[1], L_curr[0],L_curr[1])
# Update current L
if not self._check_bounds(L):
import pdb; pdb.set_trace()
x['latent'][self.location.name]['L'] = L.ravel()
return x
class _DiscreteGibbsLatentLocationUpdate(MetropolisHastingsUpdate):
"""
A special subclass to sample discrete latent locations on a grid
"""
def __init__(self, latent_location_component):
self.location = latent_location_component
def preprocess(self, population):
self.N = population.N
self.population = population
self.syms = population.get_variables()
self.L = self.location.Lmatrix
# Compute the log probability and its gradients, taking into
# account the prior and the likelihood of any consumers of the
# location.
self.log_p = self.location.log_p
self.log_lkhd = T.constant(0.)
from pyglm.components.graph import LatentDistanceGraphModel
if isinstance(population.network.graph, LatentDistanceGraphModel):
self.log_lkhd += population.network.graph.log_p
from pyglm.components.bkgd import SharedTuningCurveStimulus
if isinstance(population.glm.bkgd_model, SharedTuningCurveStimulus):
self.log_lkhd += population.glm.log_p
def _lp_L(self, L, x, n):
if not self._check_bounds(L):
return -np.Inf
# Set L in state dict x
xn = self.population.extract_vars(x, n)
set_vars('L', xn['latent'][self.location.name], L.ravel())
lp = seval(self.log_p, self.syms, xn)
lp += seval(self.log_lkhd, self.syms, xn)
return lp
def _check_bounds(self, L):
"""
Return true if locations are within the allowable range
"""
from pyglm.components.priors import Categorical, JointCategorical
prior = self.location.location_prior
if isinstance(prior, Categorical):
if np.any(L < prior.min) or np.any(L > prior.max):
return False
if isinstance(prior, JointCategorical):
if np.any(L[:,0] < prior.min0) or np.any(L[:,0] > prior.max0) or \
np.any(L[:,1] < prior.min1) or np.any(L[:,1] > prior.max1):
return False
return True
def update(self, x):
"""
Sample each entry in L using Metropolis Hastings
"""
from pyglm.components.priors import Categorical, JointCategorical
prior = self.location.location_prior
L = seval(self.location.Lmatrix, self.syms['latent'], x['latent'])
# print "L: ", L
for n in range(self.N):
# Compute the probability of each possible location
if isinstance(prior, Categorical):
lnp = np.zeros(prior.max-prior.min + 1)
for i,l in enumerate(range(prior.min, prior.max+1)):
L[n,0] = l
lnp[i] = self._lp_L(L, x, n)
L[n] = prior.min + log_sum_exp_sample(lnp)
elif isinstance(prior, JointCategorical):
d1 = prior.max0-prior.min0+1
d2 = prior.max1-prior.min1+1
lnp = np.zeros((d1,d2))
for i,l1 in enumerate(range(prior.min0, prior.max0+1)):
for j,l2 in enumerate(range(prior.min1, prior.max1+1)):
L[n,0] = l1
L[n,1] = l2
lnp[i,j] = self._lp_L(L, x, n)
# import pdb; pdb.set_trace()
# Gibbs sample from the 2d distribution
ij = log_sum_exp_sample(lnp.ravel(order='C'))
i,j = np.unravel_index(ij, (d1,d2), order='C')
L[n,0] = prior.min0 + i
L[n,1] = prior.min1 + j
else:
raise Exception('Only supporting Categorical and JointCategorical location priors')
# Update current L
if not self._check_bounds(L):
import pdb; pdb.set_trace()
x['latent'][self.location.name]['L'] = L.ravel()
return x
class _DiscreteLocalGibbsLatentLocationUpdate(MetropolisHastingsUpdate):
"""
A special subclass to sample discrete latent locations on a grid
This is a Metropolis-Hastings update that takes local steps proportional
to their relative probability.
"""
def __init__(self, latent_location_component):
self.location = latent_location_component
def preprocess(self, population):
self.N = population.N
self.population = population
self.glm = self.population.glm
self.syms = population.get_variables()
self.L = self.location.Lmatrix
self.Lflat = self.location.Lflat
# Compute the log probability and its gradients, taking into
# account the prior and the likelihood of any consumers of the
# location.
self.log_p = self.location.log_p
self.log_lkhd = T.constant(0.)
from pyglm.components.graph import LatentDistanceGraphModel
if isinstance(population.network.graph, LatentDistanceGraphModel):
self.log_lkhd += population.network.graph.log_p
from pyglm.components.bkgd import SharedTuningCurveStimulus
if isinstance(population.glm.bkgd_model, SharedTuningCurveStimulus):
self.log_lkhd += population.glm.ll
def _precompute_vars(self, x, n):
""" Precompute currents for sampling A and W
"""
nvars = self.population.extract_vars(x, n)
I_bias = seval(self.glm.bias_model.I_bias,
self.syms,
nvars)
I_stim_xt = seval(self.glm.bkgd_model.I_stim_xt,
self.syms,
nvars)
I_net = seval(self.glm.I_net,
self.syms,
nvars)
return I_bias, I_stim_xt, I_net
def _lp_L(self, L, x, n, I_bias, I_stim_xt, I_net):
if not self._check_bounds(L):
return -np.Inf
# Extract the glm parameters
s = \
{
'L' : self.Lflat,
'I_stim_xt' : self.glm.bkgd_model.I_stim_xt,
'I_bias' : self.glm.bias_model.I_bias,
'I_net' : self.glm.I_net,
'A' : self.population.network.graph.A,
'n' : self.glm.n
}
xv = \
{
'L' : L.ravel(),
'I_stim_xt' : I_stim_xt,
'I_bias' : I_bias,
'I_net' : I_net,
'A' : x['net']['graph']['A'],
'n' : n
}
lp = seval(self.log_p, s, xv)
lp += seval(self.log_lkhd, s, xv)
# # Set L in state dict x
# xn = self.population.extract_vars(x, n)
# set_vars('L', xn['latent'][self.location.name], L.ravel())
# lp = seval(self.log_p, self.syms, xn)
# lp += seval(self.log_lkhd, self.syms, xn)
return lp
def _check_bounds(self, L):
"""
Return true if locations are within the allowable range
"""
from pyglm.components.priors import Categorical, JointCategorical
prior = self.location.location_prior
if isinstance(prior, Categorical):
if np.any(L < prior.min) or np.any(L > prior.max):
return False
if isinstance(prior, JointCategorical):
if np.any(L[:,0] < prior.min0) or np.any(L[:,0] > prior.max0) or \
np.any(L[:,1] < prior.min1) or np.any(L[:,1] > prior.max1):
return False
return True
def _get_neighbors(self, L):
"""
Get valid neighbors of 2D location (l0,l1)
"""
ne = []
from pyglm.components.priors import Categorical, JointCategorical
prior = self.location.location_prior
if isinstance(prior, Categorical):
for ne0 in range(L[0]-1,L[0]+2):
if ne0 >= prior.min and ne0 <= prior.max1:
ne.append((ne0))
elif isinstance(prior, JointCategorical):
for ne0 in range(L[0]-1,L[0]+2):
for ne1 in range(L[1]-1,L[1]+2):
if ne0 >= prior.min0 and ne0 <= prior.max0:
if ne1 >= prior.min1 and ne1 <= prior.max1:
ne.append((ne0,ne1))
return ne
def update(self, x):
"""
Sample each entry in L using Metropolis Hastings
"""
prior = self.location.location_prior
L = seval(self.location.Lmatrix, self.syms['latent'], x['latent'])
# Update each of the N neuron locations serially
# import pdb; pdb.set_trace()
for n in range(self.N):
print "Sampling location of neuron ", n
# Precompute currents
I_bias, I_stim_xt, I_net = self._precompute_vars(x, n)
# Compute the probability of each neighboring location
lnp_cache = {}
curr_loc = L[n,:]
curr_neighbors = self._get_neighbors(L[n,:])
curr_lnps = []
for ne in curr_neighbors:
L[n,:] = np.array(ne)
lnp_ne = self._lp_L(L, x, n, I_bias, I_stim_xt, I_net)
lnp_cache[ne] = lnp_ne
curr_lnps.append(lnp_ne)
# Propose a neighbor according to its relative probability
prop_loc = curr_neighbors[log_sum_exp_sample(curr_lnps)]
# Compute acceptance probability
prop_neighbors = self._get_neighbors(prop_loc)
prop_lnps = []
for ne in prop_neighbors:
if ne in lnp_cache:
prop_lnps.append(lnp_cache[ne])
else:
L[n,:] = np.array(ne)
lnp_ne = self._lp_L(L, x, n, I_bias, I_stim_xt, I_net)
lnp_cache[ne] = lnp_ne
prop_lnps.append(lnp_ne)
# Acceptance probability is the ratio of normalizing constants
lnp_accept = logsumexp(curr_lnps) - logsumexp(prop_lnps)
if np.log(np.random.rand()) < lnp_accept:
L[n,:] = np.array(prop_loc)
else:
# Reject and stay in current loc
L[n,:] = np.array(curr_loc)
# Update current L
if not self._check_bounds(L):
import pdb; pdb.set_trace()
x['latent'][self.location.name]['L'] = L.ravel()
return x
class LatentTypeUpdate(MetropolisHastingsUpdate):
"""
A special subclass to sample discrete latent locations on a grid
"""
def __init__(self):
pass
def preprocess(self, population):
self.N = population.N
self.population = population
self.syms = population.get_variables()
# Get the shared tuning curve component
from pyglm.components.latent import LatentType
self.latent_types = []
for latent_component in population.latent.latentlist:
if isinstance(latent_component, LatentType):
self.latent_types.append(latent_component)
# # Compute the log probability and its gradients, taking into
# # account the prior and the likelihood of any consumers of the
# # location.
from pyglm.components.graph import StochasticBlockGraphModel
if isinstance(population.network.graph, StochasticBlockGraphModel):
self.net_log_lkhd = population.network.graph.log_p
else:
self.net_log_lkhd = T.constant(0.)
from pyglm.components.bkgd import SharedTuningCurveStimulus
if isinstance(population.glm.bkgd_model, SharedTuningCurveStimulus):
self.glm_log_lkhd = population.glm.ll
else:
self.glm_log_lkhd = T.constant(0.)
def _lp_L(self, latent_type, Y, x, n):
# Set Yin state dict x
xn = self.population.extract_vars(x, n)
set_vars('Y', xn['latent'][latent_type.name], Y.ravel())
lp = seval(latent_type.log_p, self.syms, xn)
lp += seval(self.net_log_lkhd, self.syms, xn)
# Compute the log likelihood for each data sequence
for data in self.population.data_sequences:
self.population.set_data(data)
lp += seval(self.glm_log_lkhd, self.syms, xn)
return lp
def update(self, x):
"""
Sample each entry in L using Metropolis Hastings
"""
from pyglm.inference.log_sum_exp import log_sum_exp_sample
for latent_type in self.latent_types:
# Update the latent types
R = latent_type.R
Y = x['latent'][latent_type.name]['Y']
print "Y: ", Y
for n in range(self.N):
print "Sampling latent type of neuron ", n
lpr = np.zeros(R)
for r in range(R):
Y[n] = r
lpr[r] = self._lp_L(latent_type, Y, x, n)
Y[n] = log_sum_exp_sample(lpr)
x['latent'][latent_type.name]['Y'] = Y
# Update alpha with the conjugate dirichlet prior
from pyglm.components.priors import Dirichlet
if isinstance(latent_type.alpha_prior, Dirichlet):
suffstats = latent_type.alpha_prior.alpha0.get_value()
suffstats += np.bincount(Y, minlength=R)
alpha = np.random.dirichlet(suffstats)
x['latent'][latent_type.name]['alpha'] = alpha
else:
raise Warning('Cannot update alpha prior!')
return x
class LatentLocationAndTypeUpdate(MetropolisHastingsUpdate):
"""
A special subclass to sample discrete latent locations on a grid
along with the type of the neuron
"""
def __init__(self):
raise NotImplementedError('Joint update of location and type has not yet been implemented!')
def preprocess(self, population):
self.N = population.N
self.population = population
self.syms = population.get_variables()
# Get the shared tuning curve component
from pyglm.components.latent import LatentType
self.latent_types = []
for latent_component in population.latent.latentlist:
if isinstance(latent_component, LatentType):
self.latent_types.append(latent_component)
# # Compute the log probability and its gradients, taking into
# # account the prior and the likelihood of any consumers of the
# # location.
# self.log_p = self.location.log_p
from pyglm.components.graph import StochasticBlockGraphModel
if isinstance(population.network.graph, StochasticBlockGraphModel):
self.net_log_lkhd = population.network.graph.log_p
else:
self.net_log_lkhd = T.constant(0.)
from pyglm.components.bkgd import SharedTuningCurveStimulus
if isinstance(population.glm.bkgd_model, SharedTuningCurveStimulus):
self.glm_log_lkhd = population.glm.ll
else:
self.glm_log_lkhd = T.constant(0.)
def _lp_L(self, latent_type, Y, x, n):
# Set Yin state dict x
xn = self.population.extract_vars(x, n)
set_vars('Y', xn['latent'][latent_type.name], Y.ravel())
lp = seval(latent_type.log_p, self.syms, xn)
lp += seval(latent_type.net_log_lkhd, self.syms, xn)
# Compute the log likelihood for each data sequence
for data in self.population.data_sequences:
self.population.set_data(data)
lp += seval(self.glm_log_lkhd, self.syms, xn)
return lp
def update(self, x):
"""
Sample each entry in L using Metropolis Hastings
"""
from pyglm.inference.log_sum_exp import log_sum_exp_sample
for latent_type in self.latent_types:
# Update the latent types
R = latent_type.R
Y = x['latent'][latent_type.name]['Y']
print "Y: ", Y
for n in range(self.N):
lpr = np.zeros(R)
for r in range(R):
Y[n] = r
lpr[r] = self._lp_L(latent_type, Y, x, n)
Y[n] = log_sum_exp_sample(lpr)
x['latent'][latent_type.name]['Y'] = Y
# Update alpha with the conjugate dirichlet prior
from pyglm.components.priors import Dirichlet
if isinstance(latent_type.alpha_prior, Dirichlet):
suffstats = latent_type.alpha_prior.alpha0.get_value()
suffstats += np.bincount(Y, minlength=R)
alpha = np.random.dirichlet(suffstats)
x['latent'][latent_type.name]['alpha'] = alpha
else:
raise Warning('Cannot update alpha prior!')
from pyglm.components.priors import Categorical, JointCategorical
prior = self.location.location_prior
L = seval(self.location.Lmatrix, self.syms['latent'], x['latent'])
# print "L: ", L
for n in range(self.N):
# Compute the probability of each possible location
if isinstance(prior, Categorical):
lnp = np.zeros(prior.max-prior.min + 1)
for i,l in enumerate(range(prior.min, prior.max+1)):
L[n,0] = l
lnp[i] = self._lp_L(L, x, n)
L[n] = prior.min + log_sum_exp_sample(lnp)
elif isinstance(prior, JointCategorical):
d1 = prior.max0-prior.min0+1
d2 = prior.max1-prior.min1+1
lnp = np.zeros((d1,d2))
for i,l1 in enumerate(range(prior.min0, prior.max0+1)):
for j,l2 in enumerate(range(prior.min1, prior.max1+1)):
L[n,0] = l1
L[n,1] = l2
lnp[i,j] = self._lp_L(L, x, n)
# import pdb; pdb.set_trace()
# Gibbs sample from the 2d distribution
ij = log_sum_exp_sample(lnp.ravel(order='C'))
i,j = np.unravel_index(ij, (d1,d2), order='C')
L[n,0] = prior.min0 + i
L[n,1] = prior.min1 + j
else:
raise Exception('Only supporting Categorical and JointCategorical location priors')
# Update current L
if not self._check_bounds(L):
import pdb; pdb.set_trace()
x['latent'][self.location.name]['L'] = L.ravel()
return x
class SharedTuningCurveUpdate(MetropolisHastingsUpdate):
"""
A special subclass to sample continuous latent locations
"""
def __init__(self):
self.n_steps = 2
self.avg_accept_rate = 0.9
self.step_sz = 0.1
def preprocess(self, population):
self.population = population
self.glm = self.population.glm
self.N = population.N
# Get the shared tuning curve component
from pyglm.components.latent import LatentTypeWithTuningCurve
self.tc_model = None
for latent_component in population.latent.latentlist:
if isinstance(latent_component, LatentTypeWithTuningCurve):
self.tc_model = latent_component
break
if self.tc_model is None:
return
self.syms = population.get_variables()
# Get the shape of w_x and w_t
self.w_x = self.tc_model.w_x
self.w_x_shape = (self.tc_model.Bx, self.tc_model.R)
self.w_t = self.tc_model.w_t
self.w_t_shape = (self.tc_model.Bt, self.tc_model.R)
# Compute the log probability and its gradients, taking into
# account the prior and the likelihood of any consumers of the
# location.
self.log_p = self.tc_model.log_p
self.g_log_p_wrt_wx = T.constant(0.)
self.g_log_p_wrt_wt = T.constant(0.)
self.g_log_p_wrt_wx += T.grad(self.tc_model.log_p, self.w_x)
self.g_log_p_wrt_wt += T.grad(self.tc_model.log_p, self.w_t)
self.log_lkhd = T.constant(0.0)
self.g_log_lkhd_wrt_wx = T.constant(0.)
self.g_log_lkhd_wrt_wt = T.constant(0.)
from pyglm.components.bkgd import SharedTuningCurveStimulus
if isinstance(population.glm.bkgd_model, SharedTuningCurveStimulus):
self.log_lkhd += population.glm.ll
self.g_log_lkhd_wrt_wx += T.grad(population.glm.ll, self.w_x)
self.g_log_lkhd_wrt_wt += T.grad(population.glm.ll, self.w_t)
def _precompute_vars(self, x):
""" Precompute currents for sampling the stimulus filters
"""
I_biases = []
I_nets = []
for n in range(self.population.N):
nvars = self.population.extract_vars(x, n)
I_biases.append(seval(self.glm.bias_model.I_bias,
self.syms,
nvars))
I_nets.append(seval(self.glm.I_net,
self.syms,
nvars))
return I_biases, I_nets
def _lp(self, x, I_biases, I_nets):
"""
Compute the log posterior of x (across all GLMs)
"""
# Set w_x in state dict x
lp = seval(self.log_p, self.syms['latent'], x['latent'])
for n in range(self.N):
# Extract the glm parameters
xn = self.population.extract_vars(x, n)
s = \
{
'I_net' : self.glm.I_net,
'I_bias' : self.glm.bias_model.I_bias,
'n' : self.glm.n,
}
s.update(self.syms['latent'])
xv = \
{
'I_net' : I_nets[n],
'I_bias' : I_biases[n],
'n' : n,
}
xv.update(xn['latent'])
# Compute the GLM log likelihood for each data sequence
for data in self.population.data_sequences:
self.population.set_data(data)
lp += seval(self.log_lkhd, s, xv)
return lp
def _lp_wx(self, w_x, x, I_biases, I_nets):
"""
Compute the log posterior of x (across all GLMs)
"""
# Set w_x in state dict x
set_vars('w_x', x['latent'][self.tc_model.name], w_x)
return self._lp(x, I_biases, I_nets)
def _lp_wt(self, w_t, x, I_biases, I_nets):
# Set w_t in state dict x
set_vars('w_t', x['latent'][self.tc_model.name], w_t)
return self._lp(x, I_biases, I_nets)
def _grad_lp_wrt_wx(self, w_x, x, I_biases, I_nets):
# Set L in state dict x
set_vars('w_x', x['latent'][self.tc_model.name], w_x)
g_lp = seval(self.g_log_p_wrt_wx, self.syms['latent'], x['latent'])
for n in range(self.N):
# print "Computing grad_lp_wrt_wx for neuron ", n
xn = self.population.extract_vars(x, n)
s = \
{
'I_net' : self.glm.I_net,
'I_bias' : self.glm.bias_model.I_bias,
'n' : self.glm.n,
}
s.update(self.syms['latent'])
xv = \
{
'I_net' : I_nets[n],
'I_bias' : I_biases[n],
'n' : n,
}
xv.update(xn['latent'])
# Compute the GLM log likelihood for each data sequence
for data in self.population.data_sequences:
self.population.set_data(data)
g_lp += seval(self.g_log_lkhd_wrt_wx, s, xv)
return g_lp
# def _grad_lp_wrt_wx(self, w_x, x):
# # Set L in state dict x
# set_vars('w_x', x['latent'][self.tc_model.name], w_x)
# g_lp = seval(self.g_log_p_wrt_wx, self.syms['latent'], x['latent'])
#
# for n in range(self.N):
# xn = self.population.extract_vars(x, n)
# g_lp += seval(self.g_log_lkhd_wrt_wx, self.syms, xn)
#
# return g_lp
def _grad_lp_wrt_wt(self, w_t, x, I_biases, I_nets):
# Set L in state dict x
set_vars('w_t', x['latent'][self.tc_model.name], w_t)
g_lp = seval(self.g_log_p_wrt_wt, self.syms['latent'], x['latent'])
for n in range(self.N):
# print "Computing grad_lp_wrt_wt for neuron ", n
xn = self.population.extract_vars(x, n)
s = \
{
'I_net' : self.glm.I_net,
'I_bias' : self.glm.bias_model.I_bias,
'n' : self.glm.n,
}
s.update(self.syms['latent'])
xv = \
{
'I_net' : I_nets[n],
'I_bias' : I_biases[n],
'n' : n,
}
xv.update(xn['latent'])
# Compute the GLM log likelihood for each data sequence
for data in self.population.data_sequences:
self.population.set_data(data)
g_lp += seval(self.g_log_lkhd_wrt_wt, s, xv)
return g_lp
def update(self, x):
"""
Sample L using HMC given A and delta (distance scale)
"""
if self.tc_model is None:
return
# Precompute other currents
I_biases, I_nets = self._precompute_vars(x)
# Update w_x
nll_wx = lambda w_x: -1.0 * self._lp_wx(w_x.reshape(self.w_x_shape), x, I_biases, I_nets)
grad_nll_wx = lambda w_x: -1.0 * self._grad_lp_wrt_wx(w_x.reshape(self.w_x_shape), x, I_biases, I_nets).ravel()
# Automatically tune these parameters
(w_x, new_step_sz, new_accept_rate) = hmc(nll_wx,
grad_nll_wx,
self.step_sz,
self.n_steps,
x['latent'][self.tc_model.name]['w_x'].ravel(),
adaptive_step_sz=True,
avg_accept_rate=self.avg_accept_rate)
# Update step size and accept rate
self.step_sz = new_step_sz
# print "Step: ", self.step_sz
self.avg_accept_rate = new_accept_rate
# print "Accept: ", self.avg_accept_rate
# Update current w_x
x['latent'][self.tc_model.name]['w_x'] = w_x.reshape(self.w_x_shape)
# Do the same for w_t
nll_wt = lambda w_t: -1.0 * self._lp_wt(w_t.reshape(self.w_t_shape), x, I_biases, I_nets)
grad_nll_wt = lambda w_t: -1.0 * self._grad_lp_wrt_wt(w_t.reshape(self.w_t_shape), x, I_biases, I_nets).ravel()
# Automatically tune these paramseters
(w_t, new_step_sz, new_accept_rate) = hmc(nll_wt,
grad_nll_wt,
self.step_sz,
self.n_steps,
x['latent'][self.tc_model.name]['w_t'].ravel(),
adaptive_step_sz=True,
avg_accept_rate=self.avg_accept_rate)
# Update step size and accept rate
self.step_sz = new_step_sz
# print "Step: ", self.step_sz
self.avg_accept_rate = new_accept_rate
# print "Accept: ", self.avg_accept_rate
# Update current w_t
x['latent'][self.tc_model.name]['w_t'] = w_t.reshape(self.w_t_shape)
return x
def initialize_updates(population):
""" Compute the set of updates required for the given population.
TODO: Figure out how to do this in a really principled way.
"""
serial_updates = []
parallel_updates = []
print "Initializing latent variable samplers"
print "Ignoring shared tuning curve update"
tc_sampler = SharedTuningCurveUpdate()
tc_sampler.preprocess(population)
serial_updates.append(tc_sampler)
loc_sampler = LatentLocationUpdate()
loc_sampler.preprocess(population)
serial_updates.append(loc_sampler)
type_sampler = LatentTypeUpdate()
type_sampler.preprocess(population)
serial_updates.append(type_sampler)
# All populations have a parallel GLM sampler
print "Initializing GLM samplers"
# glm_sampler = HmcGlmUpdate()
# glm_sampler.preprocess(population)
# parallel_updates.append(glm_sampler)
bias_sampler = HmcBiasUpdate()
bias_sampler.preprocess(population)
parallel_updates.append(bias_sampler)
bkgd_sampler = HmcBkgdUpdate()
bkgd_sampler.preprocess(population)
parallel_updates.append(bkgd_sampler)
from pyglm.components.impulse import DirichletImpulses
if isinstance(population.glm.imp_model, DirichletImpulses):
imp_sampler = HmcDirichletImpulseUpdate()
else:
imp_sampler = HmcImpulseUpdate()
imp_sampler.preprocess(population)
parallel_updates.append(imp_sampler)
# All populations have a network sampler
print "Initializing network sampler"
# net_sampler = GibbsNetworkColumnUpdate()
net_sampler = CollapsedGibbsNetworkColumnUpdate()
net_sampler.preprocess(population)
parallel_updates.append(net_sampler)
# If the graph model is a latent distance model, add its update
# from components.graph import LatentDistanceGraphModel
# if isinstance(population.network.graph, LatentDistanceGraphModel):
# print "Initializing latent location sampler"
# loc_sampler = LatentLocationUpdate()
# loc_sampler.preprocess(population)
# serial_updates.append(loc_sampler)
return serial_updates, parallel_updates
def gibbs_sample(population,
N_samples=1000,
x0=None,
init_from_mle=True,
callback=None):
"""
Sample the posterior distribution over parameters using MCMC.
"""
N = population.model['N']
dt = population.model['dt']
# Draw initial state from prior if not given
if x0 is None:
x0 = population.sample()
if init_from_mle:
print "Initializing with coordinate descent"
from pyglm.models.model_factory import make_model, convert_model
from pyglm.population import Population
mle_model = make_model('standard_glm', N=N, dt=dt)
mle_popn = Population(mle_model)
for data in population.data_sequences:
mle_popn.add_data(data)
mle_x0 = mle_popn.sample()
# Initialize with MLE under standard GLM
mle_x0 = coord_descent(mle_popn, x0=mle_x0, maxiter=1)
# Convert between inferred parameters of the standard GLM
# and the parameters of this model. Eg. Convert unweighted
# networks to weighted networks with normalized impulse responses.
x0 = convert_model(mle_popn, mle_model, mle_x0, population, population.model, x0)
# # TODO: Move this to a better place
# from pyglm.inference.smart_init import initialize_locations_by_correlation
# initialize_locations_by_correlation(population, x0)
# Create updates for this population
serial_updates, parallel_updates = initialize_updates(population)
# DEBUG Profile the Gibbs sampling loop
import cProfile, pstats, StringIO
pr = cProfile.Profile()
pr.enable()
# Alternate fitting the network and fitting the GLMs
x_smpls = [x0]
x = x0
import time
start_time = time.time()
for smpl in np.arange(N_samples):
# Call the callback
if callback is not None:
callback(x)
# Print the current log likelihood
lp = population.compute_log_p(x)
# Compute iters per second
stop_time = time.time()
if stop_time - start_time == 0:
print "Gibbs iteration %d. Iter/s exceeds time resolution. Log prob: %.3f" % (smpl, lp)
else:
print "Gibbs iteration %d. Iter/s = %f. Log prob: %.3f" % (smpl,
1.0/(stop_time-start_time),
lp)
start_time = stop_time
# Go through each parallel MH update
for parallel_update in parallel_updates:
for n in np.arange(N):
# print "Parallel update: %s for neuron %d" % (str(type(parallel_update)), n)
parallel_update.update(x, n)
# Sample the serial updates
for serial_update in serial_updates:
# print "Serial update: ", type(serial_update)
serial_update.update(x)
x_smpls.append(copy.deepcopy(x))
pr.disable()
s = StringIO.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
with open('mcmc.prof.txt', 'w') as f:
f.write(s.getvalue())
f.close()
return x_smpls
| 2.609375 | 3 |
app.py | abhipatel12/Rubiks-Cube | 0 | 12761340 | <reponame>abhipatel12/Rubiks-Cube
# import pygame and initialize packages
import pygame
# importing pygame.locals for key coordinates
from pygame.locals import (
K_ESCAPE,
KEYDOWN,
QUIT,
K_f,
K_r,
K_u,
K_l,
K_b,
K_d,
KMOD_SHIFT
)
# initialize pygame
pygame.init()
# setting up screen constants (width and height)
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
# create screen object
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
# variable to keep game loop running
running = True
# Main Game Loop
while running:
# loop that looks at every event in the queue
for event in pygame.event.get():
if event.type == KEYDOWN: # check if user hit a key
# getting mods for shift + key
mods = pygame.key.get_mods()
# check what key was hit
if event.key == K_ESCAPE: # escape key -> stop loop
running = False
elif event.key == K_f and mods & KMOD_SHIFT: # SHIFT + F -> front counter rotation
print("SHIFT + F KEY")
elif event.key == K_r and mods & KMOD_SHIFT: # SHIFT + R -> right counter rotation
print("SHIFT + R KEY")
elif event.key == K_u and mods & KMOD_SHIFT: # SHIFT + U -> upper counter rotation
print("SHIFT + U KEY")
elif event.key == K_l and mods & KMOD_SHIFT: # SHIFT + L -> left counter rotation
print("SHIFT + L KEY")
elif event.key == K_b and mods & KMOD_SHIFT: # SHIFT + B -> back counter rotation
print("SHIFT + B KEY")
elif event.key == K_d and mods & KMOD_SHIFT: # SHIFT + D -> down counter rotation
print("SHIFT + D KEY")
elif event.key == K_f: # f key -> front rotation
print("F KEY")
elif event.key == K_r: # r key -> right rotation
print("R KEY")
elif event.key == K_u: # u key -> upper rotation
print("U KEY")
elif event.key == K_l: # l key -> left rotation
print("L KEY")
elif event.key == K_b: # b key -> back rotation
print("B KEY")
elif event.key == K_d: # d key -> down rotation
print("D KEY")
elif event.type == QUIT: # check if user clicked window close button
running = False
# quit
pygame.quit()
| 3.53125 | 4 |
modules/db_worker/DbWorker.py | navicore/oemap | 0 | 12761341 | #!/usr/bin/python2
"""worker to read presence records from redis and write to mongodb"""
from pymongo import MongoClient
import syslog
import redis
import json
import time
import datetime
from argparse import ArgumentParser
#from riemann import RiemannClient, RiemannUDPTransport
#rmmonitor = RiemannClient(transport = RiemannUDPTransport,
#host=config.riemann['host'])
#ok_response = {'status': 'ok'}
INQNAME = "oemap_db_worker_in_queue"
REPLYTO = "oemap_www_nodejs_in_queue"
class DbWorker():
def __init__ (self):
parser = ArgumentParser()
parser.add_argument('-n', '--job', dest='job', action='store',
help='worker instance id')
self.args = parser.parse_args()
self.rhost = "127.0.0.1"
self.rport = 6379
self.starttime = datetime.datetime.now()
self.statc = 0
self.stati = 0
self.database = None
def stats(self):
self.statc = self.statc + 1
self.stati = self.stati + 1
if self.stati == 10000:
now = datetime.datetime.now()
dur = now - self.starttime
rate = ''
if dur.seconds > 0:
rate = str(self.stati / dur.seconds) + " per second"
else:
rate = "1000+ per second"
self.log_notice("processed %s records. rate was %s." %
(self.statc, rate))
self.stati = 0
self.starttime = now
FIVE_MIN_IN_SECS = 60 * 5
ONE_HOUR_IN_SECS = 60 * 60
ONE_DAY_IN_SECS = ONE_HOUR_IN_SECS * 24
def setExpireTime(self, rec):
now = datetime.datetime.now()
ttl = rec['ttl']
if ttl == 1:
rec['exp_time'] = now + datetime.timedelta(0, FIVE_MIN_IN_SECS)
elif ttl == 2:
rec['exp_time'] = now + datetime.timedelta(0, ONE_HOUR_IN_SECS)
elif ttl == 3:
rec['exp_time'] = now + datetime.timedelta(0, ONE_DAY_IN_SECS)
else:
rec['exp_time'] = now # ready for sweeper
def run (self):
while True:
try:
self.log_notice('%s Python impl starting queue %s' % ("test", INQNAME))
rdis = redis.Redis(host=self.rhost, port=self.rport)
client = MongoClient()
self.database = client.oemap_test
while True:
(_, msg) = rdis.brpop(keys=[INQNAME], timeout=600)
if msg == None:
continue
rec = json.loads(msg)
self.log_debug("updating %s for %s" % (rec['_id'],
rec['label']))
self.setExpireTime(rec)
self.database.presences.save(rec)
self.stats()
except Exception:
self.handle_exception()
time.sleep(1)
except: # catch *all* exceptions
self.handle_exception()
time.sleep(1)
def log_debug (self, msg):
syslog.syslog(syslog.LOG_DEBUG, "%s %s" % (self.args.job, msg))
def log_notice (self, msg):
syslog.syslog(syslog.LOG_NOTICE, "%s %s" % (self.args.job, msg))
def log_error (self, msg):
syslog.syslog(syslog.LOG_ERR, "%s %s" % (self.args.job, msg))
def handle_exception(self):
import traceback
formatted_lines = traceback.format_exc().splitlines()
for line in formatted_lines:
self.log_error(line)
if __name__ == "__main__":
DbWorker().run()
| 2.671875 | 3 |
server/routines/api.py | ni4ka7a/gym-track | 0 | 12761342 | from routines.models import Routine
from rest_framework import viewsets, permissions
from rest_framework.decorators import action
from rest_framework.response import Response
from .serializers import RoutineSerializer
class RoutineViewSet(viewsets.ModelViewSet):
# queryset = Workout.objects.all()
permission_classes = [
# permissions.AllowAny
permissions.IsAuthenticated,
]
serializer_class = RoutineSerializer
def get_queryset(self):
return self.request.user.routines.all()
def perform_create(self, serializer):
serializer.save(author=self.request.user)
# test manually add exercises
# @action(detail=False, methods=['PUT'])
# def exercises(self, request, *args, **kwargs):
# routineId = request.GET['id']
# routine = Routine.objects.get(id=routineId)
# exerciseId = request.data['exerciseId']
# routine.exercises.add(exerciseId)
# routine.save()
# return Response('success') | 2.21875 | 2 |
src/utils/crypto.py | biobdeveloper/bithonledger | 6 | 12761343 | """Cryptography module.
Encrypt and decrypt user's Bitcoin WIFs.
"""
import rncryptor
from base64 import b64encode, b64decode
def encrypt(wif, password):
return b64encode(rncryptor.RNCryptor().encrypt(data=wif, password=password)).decode('utf-8')
def decrypt(enc_wif, password):
return rncryptor.RNCryptor().decrypt(b64decode(enc_wif.encode('utf-8')), password=password)
| 3.46875 | 3 |
orbit_prediction/orbit_prediction/pred_physics_err.py | lahorite/spacetech-ssa | 1 | 12761344 | # Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import argparse
import itertools
import numpy as np
import pandas as pd
import xgboost as xgb
import matplotlib.pyplot as plt
import sklearn.metrics as metrics
from sklearn.model_selection import GridSearchCV, train_test_split
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
logger = logging.getLogger(__name__)
def eval_models(models, data):
"""Calculates the root mean squared error (RMSE) and the coefficient of
determination (R^2) for each of the models.
:param models: Dictionary of the error model for each state vector
component
:type models: {str: xgboost.XGBRegressor}
:param data: Dictionary containing the training and test datasets
:type data: {str: numpy.array}
:return: Returns a DataFrame containing the evaluation metric results
:rtype: pandas.DataFrame
"""
evals = []
for target_col, reg in models.items():
y_hat = reg.predict(data['X_test'])
y = data['y_test'][target_col]
rmse = metrics.mean_squared_error(y, y_hat, squared=False)
r2 = metrics.r2_score(y, y_hat)
eval_dict = {'Error': target_col, 'RMSE': rmse, 'R^2': r2}
evals.append(eval_dict)
return pd.DataFrame(evals)
def plot_feat_impts(models, data):
"""Plots the feature importances for each of the error models.
For use in an interactive jupyter session.
:param models: Dictionary of the error model for each state vector
component
:type models: {str: xgboost.XGBRegressor}
:param data: Dictionary containing the training and test datasets
:type data: {str: numpy.array}
"""
feat_names = data['X_train'].columns
fig, axs = plt.subplots(2, 3, figsize=(10, 10))
for (target_col, model), ax in zip(models.items(), axs.flat):
feat_imp = pd.Series(model.feature_importances_, index=feat_names)
feat_imp.sort_values(ascending=False, inplace=True)
feat_imp.plot(kind='bar', ax=ax, title=target_col)
plt.ylabel('Feature Importance Score')
plt.tight_layout()
def get_state_vect_cols(prefix):
"""Get the column names of the state vector components with the
provided `prefix`.
:param prefix: The prefix that is used in front of the state vector
components in the column names, examples are `physics_pred` and
`physics_err`
:type prefix: str
:return: A list of the 6 names of the prefixed state vector components
:rtype: [str]
"""
vectors = ['r', 'v']
components = ['x', 'y', 'z']
col_names = [f'{prefix}_{v}_{c}'
for v, c
in itertools.product(vectors, components)]
return col_names
def load_models(models_dir):
"""Loads previously trained XGBoost models from the `models_dir`
:param models_dir: The path to where the serialized XGBoost JSON files are
:type models_dir: str
:return: A list of the loaded XGBoost models
:rtype: [xgboost.XGBRegressor]
"""
ml_models = []
model_names = get_state_vect_cols('physics_err')
for mn in model_names:
model = xgb.XGBRegressor()
model_path = os.path.join(models_dir, f'{mn}.json')
model.load_model(model_path)
ml_models.append(model)
return ml_models
def save_models(models, models_dir):
"""Saves the error estimations models as JSON representations.
:param models: Dictionary of the error model for each state vector
component
:type models: {str: xgboost.XGBRegressor}
:param models_dir: The path to save the serialized XGBoost JSON files to
:type models_dir: str
"""
for model_name, err_model in models.items():
file_name = f'{model_name}.json'
file_path = os.path.join(models_dir, file_name)
err_model.save_model(file_path)
def predict_err(models, physics_preds):
"""Uses the provide ML models to predict the error in the physics
model orbit prediction.
:param ml_models: The ML models to use to estimate the error in each
of the predicted state vector components.
:type ml_models: [xgboost.XGBRegressor]
:param physcis_preds: The elapsed time in seconds and the predicted
state vectors to estimate the errors for
:type physcis_preds: numpy.array
:return: The estimated errors
:rtype: numpy.array
"""
# Each model predicts the error for its respective state vector component
err_preds = [m.predict(physics_preds) for m in models]
# Orient the error estimates as column vectors
err_preds = np.stack(err_preds, axis=1)
return err_preds
def build_train_test_sets(df, test_size=0.2):
"""Builds training and testing sets from the provided DataFrame.
:param df: The DataFrame to use to build training and test sets from
:type df: pandas.DataFrame
:param test_size: The percentage size of the DataFrame that should be used
to build the test set
:type test_size: float
:return: A dictionary containing the feature and target training/test sets
:rtype: dict[str, pandas.DataFrame]
"""
# Features are the physics predicted state vectors and the amount of
# time in seconds into the future the prediction was made
feature_cols = ['elapsed_seconds'] + get_state_vect_cols('physics_pred')
# The target values are the errors between the physical model predictions
# and the ground truth observations
target_cols = get_state_vect_cols('physics_err')
# Create feature and target matrices
X = df[feature_cols]
y = df[target_cols]
# Split feature and target data into training and test sets
data_keys = ['X_train', 'X_test', 'y_train', 'y_test']
data_vals = train_test_split(X, y, test_size=test_size)
train_test_data = dict(zip(data_keys, data_vals))
return train_test_data
def train_models(data, params={}, eval_metric='rmse'):
"""Trains gradient boosted regression tree models to estimate the error in
each of the six state vector components in the physical model prediction
:param data: Dictionary containing the training and test datasets
:type data: {str: numpy.array}
:param params: A dictionary of parameters to pass to the XGBRegressor
constructor
:type params: dict
:param eval_metric: The loss function to use in model training
:type eval_metric: str
:return: Dictionary containing the trained models for each state vector
component
:rtype: {str: xgboost.XGBRegressor}
"""
default_params = {
'booster': 'gbtree',
'tree_method': 'gpu_hist',
'gpu_id': 0
}
default_params.update(params)
X, ys = data['X_train'], data['y_train']
models = {}
for target_col in ys.columns:
y = ys[target_col]
reg = xgb.XGBRegressor(**default_params)
reg.fit(X, y, eval_metric=eval_metric)
models[target_col] = reg
return models
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=('Train baseline XGBoost models to estimate physical '
'prediction error'),
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--input_path',
help=('The path to the parquet file containing the physical model '
'prediction training data'),
type=str,
required=True
)
parser.add_argument(
'--use_gpu',
help='Use a GPU in model training',
action='store_true'
)
parser.add_argument(
'--out_dir',
help=('The directory to serialize the models to'),
type=str,
required=True
)
args = parser.parse_args()
logger.info('Loading physical model orbit prediction training data...')
physics_pred_df = pd.read_parquet(args.input_path)
logger.info('Building training and test sets...')
train_test_data = build_train_test_sets(physics_pred_df)
if args.use_gpu:
params = {}
else:
params = {'tree_method': 'hist'}
logger.info('Training Error Models...')
err_models = train_models(train_test_data, params=params)
logger.info(eval_models(err_models, train_test_data))
logger.info('Serializing Error Models...')
save_models(err_models, args.out_dir)
| 2.515625 | 3 |
2019/bon_appetit.py | Akhilkokani/Solving-Hackerrank-Problem-Statements | 0 | 12761345 | <gh_stars>0
# Problem Statement Link: https://www.hackerrank.com/challenges/bon-appetit/problem
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the bonAppetit function below.
def bonAppetit(bill, k, b):
bill.pop ( k )
total_bill = sum ( bill )
amount_to_be_paid_by_each_person = total_bill / 2
overchage = b - amount_to_be_paid_by_each_person
if overchage == 0:
print ("Bon Appetit")
else:
print ( int(overchage) )
if __name__ == '__main__':
nk = input().rstrip().split()
n = int(nk[0])
k = int(nk[1])
bill = list(map(int, input().rstrip().split()))
b = int(input().strip())
bonAppetit(bill, k, b)
| 3.765625 | 4 |
imgtoascii/__init__.py | yasserbdj96/imgtoascii | 0 | 12761346 | #!/usr/bin/env python
# coding:utf-8
# Code by : <NAME>
# E-mail : <EMAIL>
"""
#set:usage.py,examples.py,changelog.txt
##################################################################
# USAGE :
#s
from imgtoascii import imgtoascii
imgtoascii("<IMAGE_PATH>","<OPTION>").view()
#e
##################################################################
# EXAMPLES :
#s
from imgtoascii import imgtoascii
# Example:1
imgtoascii("test.png").view()
# Example:2
p1=imgtoascii("test.png",False).view()
for i in range(len(p1)):
print(p1[i])
#e
##################################################################
# CHANGELOG :
#s
## 0.0.2
- Fix Bugs.
## 0.0.1
- First public release.
#e
##################################################################
"""
# VALUES :
__version__="0.0.2"
__name__="imgtoascii"
__author__="<NAME> (<NAME>)"
__author_email__="<EMAIL>"
__github_user_name__="yasserbdj96"
__title__="image to ascii."
__description__="Convert images to ascii."
__author_website__=f"https://{__github_user_name__}.github.io/"
__source_code__=f"https://github.com/{__github_user_name__}/{__name__}"
__keywords__=[__github_user_name__,'python']
__keywords__.extend(__title__.split(" "))
__keywords__.extend(__description__.split(" "))
__install_requires__=['pipincluder']
__Installation__="pip install "+__name__+"=="+__version__
__license__='MIT License'
__copyright__='Copyright © 2008->Present, '+__author__+"."
__license_text__=f'''MIT License
{__copyright__}
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
You also agree that if you become very rich you will give me 1% of your wealth.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
##################################################################
#s
from pipincluder import pipincluder
#import pakages by pipincluder:
exec(pipincluder("from PIL import Image",
"from hexor import hexor").modules())
#start imgtoascii class:
class imgtoascii:
#__init__:
def __init__(self,img,oki=True):
self.oki=oki
art="██"
im=Image.open(img)
width,height=im.size
pixels=list(im.getdata())
allpi=[]
linepi=[]
k=1
p1=hexor(True,"rgb")
for i in range(len(pixels)):
if k<width:
linepi.append(p1.c(art,f"{pixels[i][0]},{pixels[i][1]},{pixels[i][2]}",f"{pixels[i][0]},{pixels[i][1]},{pixels[i][2]}"))
elif k==width:
allpi.append(linepi)
linepi=[]
k=0
k+=1
self.allpi=allpi
#view:
def view(self):
allart=self.allpi
image_art=[]
for i in range(len(allart)):
line=""
for j in range(len(allart[i])):
line=line+allart[i][j]
image_art.append(line)
if self.oki==True:
for i in range(len(image_art)):
print(image_art[i])
else:
return image_art
#e | 2.453125 | 2 |
script/bootstrap.py | samuel100u/oneVPL-cpu | 0 | 12761347 | ###############################################################################
# Copyright (C) Intel Corporation
#
# SPDX-License-Identifier: MIT
###############################################################################
"""Build oneVPL-cpu ffmpeg dependencies"""
from io import BytesIO
import sys
import os
import posixpath
import argparse
import subprocess
import shutil
import time
import multiprocessing
import urllib.request
import zipfile
import ssl
from pathlib import Path
from os import environ
from contextlib import contextmanager
# Component Versions
SVT_HEVC_VERSION = '1.5.1'
SVT_AV1_VERSION = 'v0.8.6' # v0.8.7 is missing AVC support
DAV1D_VERSION = '0.9.0'
X264_VERSION = 'stable'
FFMPEG_VERSION = 'n4.4'
# Folder this script is in
SCRIPT_PATH = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
# Number of CPU cores to try to use in parallel
CPU_COUNT = multiprocessing.cpu_count()
# Flag indicating if verbose (debug) loging should be output
VERBOSE = 'VERBOSE' in os.environ
if VERBOSE:
if os.environ['VERBOSE'] not in ['', '-']:
# pylint: disable=consider-using-with
VERBOSE_FILE = open(os.environ['VERBOSE'], 'w')
else:
VERBOSE_FILE = sys.stdout
if os.name == 'nt':
VERBOSE_CMD = '::'
VERBOSE_CMT = '@REM'
else:
VERBOSE_CMD = '# $'
VERBOSE_CMT = '#'
# Optional dictionary with environment options for Git
# mostly used to set an alternate PATH
GIT_ENV = None
# indicate if we prefer to clone, or to download archives
PREFER_CLONE = False
def _escape_cmd_arg(arg):
"""quote/escape and argument for a command line call so that it can
be safely used even if it has special charaters"""
arg = str(arg)
if ' ' in arg or '"' in arg:
return '"' + arg.replace('"', '""') + '"'
return arg
def log(message):
"""Log activity"""
if VERBOSE:
VERBOSE_FILE.write(f"{VERBOSE_CMD} {message}\n")
VERBOSE_FILE.flush()
def log_comment(message):
"""Log a comment"""
if VERBOSE:
VERBOSE_FILE.write(f"{VERBOSE_CMT} {message}\n")
VERBOSE_FILE.flush()
def to_posix_path(path):
"""convert path to posix
On Windows this includes adjusting it based on MinGW drive naming
"""
if os.name != 'nt':
return path
if not path:
return path
parts = path.split('\\')
if len(parts[0]) == 2 and parts[0].endswith(":"):
parts[0] = "/" + parts[0][:-1].lower()
return posixpath.join(*parts)
def set_env(name, value):
"""Set environment variable"""
if os.name == 'nt':
log(f'set {name}={value}')
else:
log(f'export {name}="{value}"')
os.environ[name] = value
def replace(target, old_str, new_str):
"""replace text in a file"""
log_comment(f'replace "{old_str}" with "{new_str}" in {target}')
if os.name == 'nt':
log(f'powershell -Command "(gc {target}) -replace \'{old_str}\', \'{new_str}\' '
+ f'| Out-File -encoding utf8 {target}"')
else:
log(f'sed -i \'s/{old_str}/{new_str}/\' {target}')
with open(target, "r") as file_obj:
content = file_obj.read()
content = content.replace(old_str, new_str)
with open(target, "w") as file_obj:
file_obj.write(content)
@contextmanager
def pushd(*dst):
"""change working directory"""
cur_dir = os.getcwd()
dest = os.path.join(cur_dir, *dst)
os.chdir(dest)
log(f'pushd {dest}')
try:
yield
finally:
log('popd')
log_comment(f' -> {cur_dir}')
os.chdir(cur_dir)
#pylint: disable=invalid-name
def rm(target):
"""delete a file or folder"""
if os.path.exists(target):
# Delete sometimes fails if done immediately, timeout
# is not great, but allows filesystem settings to stabilize.
timeout = time.time() + 10
while time.time() < timeout:
try:
if os.path.isfile(target):
if os.name == 'nt':
log(f'del {target}')
else:
log(f'rm {target}')
os.remove(target)
break
if os.path.isdir(target):
if os.name == 'nt':
log(f'rd /s /q {target}')
else:
log(f'rm -rf {target}')
shutil.rmtree(target)
break
except PermissionError:
time.sleep(1)
def mkdir(target):
"""make a folder"""
if target and not os.path.exists(target):
if os.name == 'nt':
log(f'md {target}')
else:
log(f'mkdir -p {target}')
os.makedirs(target)
# Rarely there is a bit of async delay in filesystem changes.
# If a user script deleted this folder just before running this
# script we may need to wait a moment to see the folder created.
if not os.path.exists(target):
time.sleep(2)
def join_command(command):
"""Join a series or parameters into a command, escaping if needed"""
return ' '.join([_escape_cmd_arg(argument) for argument in command])
def cmd(*args, shell=None, no_throw=False, env=None, xenv=None):
"""Run a command"""
if len(args) == 1:
command = args[0]
else:
command = join_command(args)
if env is not None:
log_comment('Using custom environment for next command')
if xenv is not None:
if env is None:
env = os.environ.copy()
env.update(xenv)
for name in xenv:
log_comment(f'Using "{name}={xenv[name]}" for next command')
exec_cmd = command
if shell is None and os.name != 'nt':
shell = 'bash'
if shell == 'bash':
if os.name == 'nt':
# In Windows bash is unexpected so we will record using it
# as part of the verbose log
command = f"bash -c '{command}'"
exec_cmd = command
else:
# outside Windows we explicitly use bash, but we don't need
# to worry about letting people know we are using it.
exec_cmd = f"exec bash -c '{command}'"
log(f'{command}')
with subprocess.Popen(exec_cmd, shell=True, env=env) as proc:
proc.communicate()
if not no_throw and proc.returncode != 0:
raise Exception(f"Error running command: {command}")
return proc.returncode
def capture_cmd(*args, shell=None, log_errors=True, env=None, xenv=None):
"""Run a command and capture the output"""
if len(args) == 1:
command = args[0]
else:
command = join_command(args)
if env is not None:
log_comment('Using custom environment for next command')
if xenv is not None:
if env is None:
env = os.environ.copy()
env.update(xenv)
for name in xenv:
log_comment(f'Using "{name}={xenv[name]}" for next command')
exec_cmd = command
if shell is None and os.name != 'nt':
shell = 'bash'
if shell == 'bash':
if os.name == 'nt':
# In Windows bash is unexpected so we will record using it
# as part of the verbose log
command = f"bash -c '{command}'"
exec_cmd = command
else:
# outside Windows we explicitly use bash, but we don't need
# to worry about letting people know we are using it.
exec_cmd = f"exec bash -c '{command}'"
log(f'{command}')
with subprocess.Popen(exec_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=True,
env=env) as proc:
result = proc.communicate()
if log_errors and result[1]:
sys.stderr.write(result[1])
return (result[0], result[1], proc.returncode)
class ZipFileWithPermissions(zipfile.ZipFile):
"""ZipFile class that handles file permissions."""
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath, preserving permissions.
"""
if not isinstance(member, zipfile.ZipInfo):
member = self.getinfo(member)
targetpath = super()._extract_member(member, targetpath, pwd)
attr = member.external_attr >> 16
if attr != 0:
os.chmod(targetpath, attr)
return targetpath
def download_archive(url, path):
"""download an archive and unpack it to a folder"""
if not os.path.exists(path):
mkdir(path)
log_comment(f"Downloading {url}")
# bypassing ssl because we keep running into cases where certs have expired.
#pylint: disable=protected-access
context = ssl._create_unverified_context()
with urllib.request.urlopen(url, context=context) as webstream:
with ZipFileWithPermissions(BytesIO(webstream.read())) as archfileobj:
log_comment(f"Extracting {url} to {path} as zip file")
archfileobj.extractall(path)
def is_repo_root(path):
"""check if path is the root of a git working copy"""
output, _, result = capture_cmd('git',
'rev-parse',
"--git-dir",
xenv=GIT_ENV)
log(result)
log(output)
return (result != 0) and os.path.samefile(os.path.join(output, ".."), path)
def main():
"""Main steps to build ffmpeg and dependencies"""
proj_dir = str(Path(os.path.dirname(os.path.realpath(sys.argv[0]))).parent)
parser = argparse.ArgumentParser(prog="bootstrap")
parser.add_argument("--config",
'-m',
"--build_mode",
dest='build_mode',
choices=['Release', 'Debug'],
default='Release',
help='Build mode/configuration')
parser.add_argument('-gpl',
"--use_gpl",
"--gpl",
dest='use_gpl',
action="store_true",
help='Use GPL codecs (ex: x264)')
parser.add_argument(
'-A',
"--arch",
dest='arch',
choices=['x86_64', 'x86_32'] if os.name == 'nt' else ['x86_64'],
default='x86_64',
help='Target Architecture')
parser.add_argument(
'--clean',
'-clean',
dest='clean',
action="store_true",
help='Remove previous build/install dirs before starting')
parser.add_argument('--validation',
dest='validation',
action="store_true",
help='Build validation binaries')
# Unused argument for compatibility
parser.add_argument('--bootstrap',
dest='bootstrap',
action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
bootstrap(args.clean, args.use_gpl, args.build_mode, proj_dir, args.arch,
args.validation)
def make_mingw_path(arch):
"""Create PATH setting for MinGW"""
fallback_msys_root = os.path.join('C:\\', 'tools', 'msys64')
if 'MSYS_ROOT' in os.environ:
msys_root = os.environ['MSYS_ROOT']
print(f'MSYS_ROOT found: {msys_root}', file=sys.stderr)
elif os.path.exists(fallback_msys_root):
msys_root = fallback_msys_root
print(f'MSYS_ROOT not found using msys at: {msys_root}',
file=sys.stderr)
else:
raise 'MSys not found'
msys_usr_path = os.path.join(msys_root, 'usr')
msys_usr_bin_path = os.path.join(msys_usr_path, 'bin')
win_path = os.path.join('C:\\', 'Windows')
win_sys_path = os.path.join(win_path, 'System32')
mingw_path = []
if arch == 'x86_32':
mingw_path.append(os.path.join(msys_root, 'mingw32', 'bin'))
mingw_path.append(
os.path.join(msys_root, 'mingw32', 'i686-w64-mingw32', 'bin'))
mingw_path.append(os.path.join(msys_root, 'mingw64', 'bin'))
mingw_path.extend([
os.path.join(msys_usr_path, 'local', 'bin'),
msys_usr_bin_path,
os.path.join(msys_root, 'bin'),
win_sys_path,
win_path,
os.path.join(win_sys_path, 'Wbem'),
os.path.join(win_sys_path, 'WindowsPowerShell', 'v1.0'),
os.path.join(msys_usr_bin_path, 'site_perl'),
os.path.join(msys_usr_bin_path, 'vendor_perl'),
os.path.join(msys_usr_bin_path, 'core_perl'),
])
return os.pathsep.join(mingw_path)
def make_git_path(mingw_path):
"""Create PATH setting for Git"""
git_path = os.environ['PATH']
# MSYS git does not play with other gits, so use users version if present
git_location = shutil.which('git')
if git_location is None:
git_path = mingw_path
return git_path
#pylint: disable=too-many-arguments,too-many-branches,too-many-statements
def bootstrap(clean, use_gpl, build_mode, proj_dir, arch, validation):
"""Bootstrap install"""
if os.name == 'nt':
#pylint: disable=global-statement
global GIT_ENV
mingw_path = make_mingw_path(arch)
GIT_ENV = {'PATH': make_git_path(mingw_path)}
# Don't update PATH with MinGW until we have figured out Git path
set_env('PATH', mingw_path)
build_dir = os.path.join(proj_dir, '_extbuild')
if "VPL_CPU_DEPS_BUILD_DIR" in os.environ:
build_dir = environ.get("VPL_CPU_DEPS_BUILD_DIR")
else:
set_env('VPL_CPU_DEPS_BUILD_DIR', build_dir)
install_dir = os.path.join(proj_dir, '_deps')
if "VPL_BUILD_DEPENDENCIES" in os.environ:
install_dir = environ.get("VPL_BUILD_DEPENDENCIES")
else:
set_env('VPL_BUILD_DEPENDENCIES', install_dir)
pkg_config_path = [os.path.join(install_dir, "lib", "pkgconfig")]
if 'PKG_CONFIG_PATH' in os.environ:
pkg_config_path.append(os.environ['PKG_CONFIG_PATH'])
set_env('PKG_CONFIG_PATH', os.pathsep.join(pkg_config_path))
if clean:
rm(build_dir)
rm(install_dir)
mkdir(build_dir)
mkdir(install_dir)
with pushd(build_dir):
#build dependencies
# build_aom_av1_decoder(install_dir)
if arch == 'x86_64':
if use_gpl:
build_gpl_x264_encoder(install_dir)
build_dav1d_decoder(install_dir)
build_svt_av1_encoder(install_dir, build_mode)
build_svt_hevc_encoder(install_dir, build_mode)
#prepare ffmpeg build
version = FFMPEG_VERSION
if os.path.exists(f'FFmpeg-{version}'):
print("using existing ffmpeg dir")
else:
if PREFER_CLONE:
cmd('git',
'clone',
'--depth=1',
'-b',
f'{version}',
'https://github.com/FFmpeg/FFmpeg',
f'FFmpeg-{version}',
xenv=GIT_ENV)
else:
download_archive(
f"https://github.com/FFmpeg/FFmpeg/archive/refs/tags/{version}.zip",
".")
with pushd(f'FFmpeg-{version}'):
if not is_repo_root("."):
# make this folder a git repo so we can use "git am" to apply patches
cmd('git', 'init', xenv=GIT_ENV)
cmd('git', 'add', '.', xenv=GIT_ENV)
cmd('git',
'-c',
'user.name=bootstrap',
'-c',
'user.email=<EMAIL>@localhost',
'commit',
'-m',
'Import',
xenv=GIT_ENV)
patch_path = os.path.join(SCRIPT_PATH, 'patches', 'ffmpeg')
if os.path.exists(patch_path):
for patch in os.scandir(patch_path):
if patch.is_file():
cmd('git',
'-c',
'user.name=bootstrap',
'-c',
'user.email=<EMAIL>@localhost',
'am',
patch.path,
xenv=GIT_ENV)
configure_opts = []
configure_opts.extend(
ffmpeg_configure_opts(install_dir, arch, validation))
if build_mode == "Debug":
configure_opts.extend(ffmpeg_debug_configure_opts())
configure_opts.extend(
ffmpeg_3rdparty_configure_opts(build_dir, use_gpl))
# run configure
cmd('./configure', *configure_opts, shell='bash')
# build ffmpeg
cmd('make', '-j', CPU_COUNT)
cmd('make', 'install')
def build_dav1d_decoder(install_dir):
"""build libdav1d from source"""
version = DAV1D_VERSION
if os.path.exists(f'dav1d-{version}'):
print("using existing david decoder dir")
return
if PREFER_CLONE:
cmd('git',
'clone',
'--depth=1',
'-b',
f'{version}',
'https://code.videolan.org/videolan/dav1d.git',
f'dav1d-{version}',
xenv=GIT_ENV)
else:
download_archive(
f"https://code.videolan.org/videolan/dav1d/-/archive/{version}/dav1d-{version}.zip",
".")
with pushd(f'dav1d-{version}'):
cmd('meson', 'build', '--prefix', os.path.join(install_dir,
''), '--libdir',
os.path.join(install_dir, 'lib'), '--buildtype', 'release',
'--default-library=static', '-Denable_avx512=false')
cmd('ninja', '-C', 'build')
with pushd('build'):
cmd('ninja', 'install')
if os.name != 'nt':
if os.path.isfile(
os.path.join(install_dir, 'lib', 'pkgconfig',
'dav1d_edited')):
print("dav1d.pc already edited")
else:
with pushd(install_dir, 'lib', 'pkgconfig'):
replace('dav1d.pc', '-ldav1d', '-ldav1d -pthread -ldl')
cmd('touch', 'dav1d_edited')
def build_svt_hevc_encoder(install_dir, build_mode):
"""build SVT HEVC encoder from source"""
version = SVT_HEVC_VERSION
if os.path.exists(f'SVT-HEVC-{version}'):
print("using existing SVT-HEVC encoder dir")
return
if PREFER_CLONE:
cmd('git',
'clone',
'--depth=1',
'-b',
f'v{version}',
'https://github.com/OpenVisualCloud/SVT-HEVC.git',
f'SVT-HEVC-{version}',
xenv=GIT_ENV)
else:
download_archive(
f"https://github.com/OpenVisualCloud/SVT-HEVC/archive/refs/tags/v{version}.zip",
".")
with pushd(f'SVT-HEVC-{version}'):
if build_mode == 'Debug':
replace(os.path.join('Source', 'Lib', 'Codec', 'EbMalloc.h'),
'#define DEBUG_MEMORY_USAGE', '#undef DEBUG_MEMORY_USAGE')
replace(os.path.join('Source', 'Lib', 'Codec', 'EbDefinitions.h'),
'#define LIB_PRINTF_ENABLE 1',
'#define LIB_PRINTF_ENABLE 0')
mkdir('release')
with pushd('release'):
cmd('cmake', '..', '-GUnix Makefiles',
f'-DCMAKE_BUILD_TYPE={build_mode}',
f'-DCMAKE_INSTALL_PREFIX={os.path.join(install_dir, "")}',
'-DCMAKE_INSTALL_LIBDIR=lib', '-DBUILD_SHARED_LIBS=off',
'-DBUILD_APP=off')
cmd('make', '-j', CPU_COUNT)
cmd('make', 'install')
def build_svt_av1_encoder(install_dir, build_mode):
"""build SVT AV1 encoder from source"""
version = SVT_AV1_VERSION
if os.path.exists(f'SVT-AV1-{version}'):
print("using existing SVT-AV1 encoder dir")
return
if PREFER_CLONE:
cmd('git',
'clone',
'--depth=1',
'-b',
f'{version}',
'https://gitlab.com/AOMediaCodec/SVT-AV1',
f'SVT-AV1-{version}',
xenv=GIT_ENV)
else:
download_archive(
f"https://gitlab.com/AOMediaCodec/SVT-AV1/-/archive/{version}/SVT-AV1-{version}.zip",
".")
with pushd(f'SVT-AV1-{version}'):
if build_mode == 'Debug':
replace(
os.path.join('Source', 'Lib', 'Common', 'Codec', 'EbMalloc.h'),
'#define DEBUG_MEMORY_USAGE', '#undef DEBUG_MEMORY_USAGE')
mkdir('release')
with pushd('release'):
cmd('cmake', '..', '-GUnix Makefiles',
f'-DCMAKE_BUILD_TYPE={build_mode}',
f'-DCMAKE_INSTALL_PREFIX={os.path.join(install_dir, "")}',
'-DCMAKE_INSTALL_LIBDIR=lib', '-DBUILD_SHARED_LIBS=off',
'-DBUILD_APPS=off',
'-DBUILD_DEC=off' if os.name != 'nt' else '',
'-DCMAKE_C_FLAGS=$(CMAKE_C_FLAGS) -DSVT_LOG_QUIET=1')
cmd('make', '-j', CPU_COUNT)
cmd('make', 'install')
def build_gpl_x264_encoder(install_dir):
"""build x264 encoder from source"""
version = X264_VERSION
posix_install_dir = to_posix_path(install_dir)
if os.path.exists(f'x264-{version}'):
print("using existing x264 encoder dir")
return
if PREFER_CLONE:
cmd('git',
'clone',
'--depth=1',
'-b',
f'{version}',
'https://code.videolan.org/videolan/x264.git',
f'x264-{version}',
xenv=GIT_ENV)
else:
download_archive(
f"https://code.videolan.org/videolan/x264/-/archive/{version}/x264-{version}.zip",
".")
with pushd(f'x264-{version}'):
cmd('./configure',
f'--prefix={posix_install_dir}',
'--enable-static',
'--enable-pic',
shell='bash')
cmd('make', '-j', CPU_COUNT)
cmd('make', 'install')
def ffmpeg_configure_opts(install_dir, arch, validation):
"""configure options for ffmpeg build"""
posix_install_dir = to_posix_path(install_dir)
result = [
f'--prefix={posix_install_dir}',
'--enable-static',
'--disable-shared',
'--enable-pic',
'--disable-everything',
'--disable-network',
'--disable-doc',
'--disable-manpages',
'--disable-hwaccels',
'--disable-appkit',
'--disable-alsa',
'--disable-avfoundation',
'--disable-iconv',
'--disable-lzma',
'--disable-securetransport',
'--disable-xlib',
'--disable-zlib',
'--disable-amf',
'--disable-audiotoolbox',
'--disable-cuvid',
'--disable-d3d11va',
'--disable-dxva2',
'--disable-nvdec',
'--disable-nvenc',
'--disable-v4l2-m2m',
'--disable-videotoolbox',
'--disable-sdl2',
'--enable-indev=lavfi',
'--enable-protocol=file',
'--enable-bsf=h264_mp4toannexb',
'--enable-bsf=hevc_mp4toannexb',
'--enable-bsf=mjpeg2jpeg',
'--enable-bsf=mjpega_dump_header',
'--enable-decoder=rawvideo',
'--enable-encoder=rawvideo',
'--enable-demuxer=rawvideo',
'--enable-demuxer=mjpeg',
'--enable-muxer=rawvideo',
'--enable-muxer=null',
'--enable-decoder=wrapped_avframe',
'--enable-encoder=wrapped_avframe',
'--enable-muxer=h264',
'--enable-muxer=mpeg2video',
'--enable-muxer=mjpeg',
'--enable-muxer=hevc',
'--enable-muxer=ivf',
'--enable-filter=testsrc',
'--enable-demuxer=image2',
'--enable-muxer=image2',
'--enable-filter=yuvtestsrc',
'--enable-filter=rgbtestsrc',
'--enable-decoder=h264',
'--enable-parser=h264',
'--enable-demuxer=h264',
'--enable-decoder=hevc',
'--enable-demuxer=hevc',
'--enable-demuxer=ivf',
'--enable-parser=hevc',
'--enable-parser=mjpeg',
'--enable-parser=av1',
'--enable-decoder=mpeg2video',
'--enable-encoder=mpeg2video',
'--enable-encoder=mjpeg',
'--enable-decoder=mjpeg',
'--enable-filter=overlay',
'--enable-filter=crop',
'--enable-filter=scale',
'--enable-filter=drawbox',
'--enable-filter=psnr',
'--enable-filter=split',
'--enable-filter=select',
'--enable-filter=concat',
'--enable-filter=ssim',
]
if os.name == 'nt':
result.extend([
'--extra-cflags=-fPIC',
'--extra-ldflags=-fPIC',
'--enable-filter=testsrc2',
])
if arch == 'x86_64':
result.append('--arch=x86_64')
result.append('--target-os=mingw64')
elif arch == 'x86_32':
result.append('--arch=x86_32')
result.append('--target-os=mingw32')
else:
raise Exception(f'Unknown architecture {arch}')
else:
if validation:
result.extend([
'--enable-filter=testsrc2', '--disable-vaapi',
'--disable-cuda-llvm'
])
else:
result.extend([
'--disable-vaapi', '--disable-cuda-llvm', '--disable-avdevice',
'--disable-swresample'
])
return result
def ffmpeg_debug_configure_opts():
"""add ffmpeg configure debug flags if requested"""
return [
'--disable-optimizations', '--extra-cflags=-Og',
'--extra-cflags=-fno-omit-frame-pointer', '--enable-debug=3',
'--extra-cflags=-fno-inline'
]
def ffmpeg_3rdparty_configure_opts(build_dir, use_gpl):
"""update ffmpeg configure command line based on packages findable
by pkg-config"""
result = []
pkg_list = capture_cmd("pkg-config", "--list-all")[0]
if "aom" in pkg_list:
print("aom decoder found")
result.extend(['--enable-libaom', '--enable-decoder=libaom_av1'])
if "dav1d" in pkg_list:
print("dav1d decoder found")
result.extend(['--enable-libdav1d', '--enable-decoder=libdav1d'])
if use_gpl:
if "x264" in pkg_list:
print("x264 encoder found")
result.extend([
'--enable-gpl', '--enable-libx264', '--enable-encoder=libx264'
])
if "SvtAv1Enc" in pkg_list:
print("SVT-AV1 encoder found")
result.extend(['--enable-libsvtav1', '--enable-encoder=libsvtav1'])
if "SvtHevcEnc" in pkg_list:
print("SVT-HEVC encoder found")
result.extend(['--enable-libsvthevc', '--enable-encoder=libsvt_hevc'])
if os.path.isfile("svt-hevc-patched"):
print("SVT-HEVC patch already applied")
else:
patch = 'n4.4-0001-lavc-svt_hevc-add-libsvt-hevc-encoder-wrapper.patch'
cmd('git',
'-c',
'user.name=bootstrap',
'-c',
'user.email=<EMAIL>@localhost',
'am',
os.path.join(build_dir, f'SVT-HEVC-{SVT_HEVC_VERSION}',
'ffmpeg_plugin', patch),
xenv=GIT_ENV)
cmd('touch', 'svt-hevc-patched')
return result
if __name__ == "__main__":
main()
| 2 | 2 |
rdtools/test/filtering_test.py | kperrynrel/rdtools | 107 | 12761348 | <filename>rdtools/test/filtering_test.py
""" Filtering Module Tests. """
import pytest
import pandas as pd
import numpy as np
from rdtools import (csi_filter,
poa_filter,
tcell_filter,
clip_filter,
quantile_clip_filter,
normalized_filter,
logic_clip_filter,
xgboost_clip_filter)
import warnings
def test_csi_filter():
''' Unit tests for clear sky index filter.'''
measured_poa = np.array([1, 1, 0, 1.15, 0.85])
clearsky_poa = np.array([1, 2, 1, 1.00, 1.00])
filtered = csi_filter(measured_poa,
clearsky_poa,
threshold=0.15)
# Expect clearsky index is filtered with threshold of +/- 0.15.
expected_result = np.array([True, False, False, True, True])
assert filtered.tolist() == expected_result.tolist()
def test_poa_filter():
''' Unit tests for plane of array insolation filter.'''
measured_poa = np.array([201, 1199, 500, 200, 1200])
filtered = poa_filter(measured_poa,
poa_global_low=200,
poa_global_high=1200)
# Expect high and low POA cutoffs to be non-inclusive.
expected_result = np.array([True, True, True, False, False])
assert filtered.tolist() == expected_result.tolist()
def test_tcell_filter():
''' Unit tests for cell temperature filter.'''
tcell = np.array([-50, -49, 0, 109, 110])
filtered = tcell_filter(tcell,
temperature_cell_low=-50,
temperature_cell_high=110)
# Expected high and low tcell cutoffs to be non-inclusive.
expected_result = np.array([False, True, True, True, False])
assert filtered.tolist() == expected_result.tolist()
@pytest.fixture
def generate_power_time_series_no_clipping():
power_no_datetime_index = pd.Series(np.arange(1, 101))
power_datetime_index = pd.Series(np.arange(1, 101))
# Add datetime index to second series
time_range = pd.date_range('2016-12-02T11:00:00.000Z',
'2017-06-06T07:00:00.000Z', freq='H')
power_datetime_index.index = pd.to_datetime(time_range[:100])
# Create a series that is tz-naive to test on
power_datetime_index_tz_naive = power_datetime_index.copy()
power_datetime_index_tz_naive.index = \
power_datetime_index_tz_naive.index.tz_localize(None)
# Note: Power is expected to be Series object with a datetime index.
return power_no_datetime_index, power_datetime_index, \
power_datetime_index_tz_naive
@pytest.fixture
def generate_power_time_series_irregular_intervals():
power_datetime_index = pd.Series(np.arange(1, 62))
# Add datetime index to second series
time_range_1 = pd.date_range('2016-12-02T11:00:00.000Z',
'2017-06-06T07:00:00.000Z', freq='1T')
power_datetime_index.index = pd.to_datetime(time_range_1[:61])
power_datetime_index_2 = pd.Series(np.arange(100, 200))
time_range_2 = pd.date_range(power_datetime_index.index.max(),
'2017-06-06T07:00:00.000Z', freq='15T')
power_datetime_index_2.index = pd.to_datetime(time_range_2[:100])
power_datetime_index_2 = power_datetime_index_2.iloc[1:]
power_datetime_index = pd.concat([power_datetime_index,
power_datetime_index_2])
power_datetime_index_3 = pd.Series(list(reversed(np.arange(100, 200))))
time_range_3 = pd.date_range(power_datetime_index.index.max(),
'2017-06-06T07:00:00.000Z', freq='5T')
power_datetime_index_3.index = pd.to_datetime(time_range_3[:100])
power_datetime_index_3 = power_datetime_index_3.iloc[1:]
power_datetime_index = pd.concat([power_datetime_index,
power_datetime_index_3])
power_datetime_index.sort_index()
# Note: Power is expected to be Series object with a datetime index.
return power_datetime_index
@pytest.fixture
def generate_power_time_series_one_min_intervals():
power_datetime_index = pd.Series(np.arange(1, 51))
power_datetime_index = pd.concat([power_datetime_index,
power_datetime_index[::-1]])
# Add datetime index to second series
time_range = pd.date_range('2016-12-02T11:00:00.000Z',
'2017-06-06T07:00:00.000Z', freq='1T')
power_datetime_index.index = pd.to_datetime(time_range[:100])
# Note: Power is expected to be Series object with a datetime index.
return power_datetime_index
@pytest.fixture
def generate_power_time_series_clipping():
power_no_datetime_index = pd.Series(np.arange(2, 101, 2))
power_no_datetime_index = pd.concat([power_no_datetime_index,
power_no_datetime_index[::-1]])
power_no_datetime_index[48:52] = 110
power_no_datetime_index = power_no_datetime_index.reset_index(drop=True)
power_datetime_index = power_no_datetime_index.copy()
# Add datetime index to second series
time_range = pd.date_range('2016-12-02T11:00:00.000Z',
'2017-06-06T07:00:00.000Z', freq='H')
power_datetime_index.index = pd.to_datetime(time_range[:100])
# Note: Power is expected to be Series object with a datetime index.
return power_no_datetime_index, power_datetime_index
def test_quantile_clip_filter():
''' Unit tests for inverter clipping filter.'''
power = pd.Series(np.arange(1, 101))
# Note: Power is expected to be Series object because clip_filter makes
# use of the Series.quantile() method.
filtered = quantile_clip_filter(power, quantile=0.98)
# Expect 99% of the 98th quantile to be filtered
expected_result = power < (98 * 0.99)
assert ((expected_result == filtered).all())
def test_logic_clip_filter(generate_power_time_series_no_clipping,
generate_power_time_series_clipping,
generate_power_time_series_one_min_intervals,
generate_power_time_series_irregular_intervals):
''' Unit tests for logic clipping filter.'''
power_no_datetime_index_nc, power_datetime_index_nc, power_nc_tz_naive = \
generate_power_time_series_no_clipping
# Test that a Type Error is raised when a pandas series
# without a datetime index is used.
pytest.raises(TypeError, logic_clip_filter,
power_no_datetime_index_nc)
# Test that an error is thrown when we don't include the correct
# mounting configuration input
pytest.raises(ValueError, logic_clip_filter,
power_datetime_index_nc, 'not_fixed')
# Test that an error is thrown when there are 10 or fewer readings
# in the time series
pytest.raises(Exception, logic_clip_filter,
power_datetime_index_nc[:9])
# Test that a warning is thrown when the time series is tz-naive
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
logic_clip_filter(power_nc_tz_naive)
# Warning thrown for it being an experimental filter + tz-naive
assert len(w) == 2
# Scramble the index and run through the filter. This should throw
# an IndexError.
power_datetime_index_nc_shuffled = power_datetime_index_nc.sample(frac=1)
pytest.raises(IndexError, logic_clip_filter,
power_datetime_index_nc_shuffled, 'fixed')
# Generate 1-minute interval data, run it through the function, and
# check that the associated data returned is 1-minute
power_datetime_index_one_min_intervals = \
generate_power_time_series_one_min_intervals
mask_one_min = logic_clip_filter(power_datetime_index_one_min_intervals)
# Generate irregular interval data, and run it through the XGBoost model
power_datetime_index_irregular = \
generate_power_time_series_irregular_intervals
# Make sure that the routine throws a warning when the data sampling
# frequency is less than 95% consistent
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
logic_clip_filter(power_datetime_index_irregular)
# Warning thrown for it being an experimental filter + irregular
# sampling frequency.
assert len(w) == 2
# Check that the returned time series index for the logic filter is
# the same as the passed time series index
mask_irregular = logic_clip_filter(power_datetime_index_irregular)
# Expect none of the sequence to be clipped (as it's
# constantly increasing)
mask_nc = logic_clip_filter(power_datetime_index_nc)
# Test the time series where the data is clipped
power_no_datetime_index_c, power_datetime_index_c = \
generate_power_time_series_clipping
# Expect 4 values in middle of sequence to be clipped (when x=50)
mask_c = logic_clip_filter(power_datetime_index_c)
filtered_c = power_datetime_index_c[mask_c]
assert bool(mask_nc.all(axis=None))
assert (len(filtered_c) == 96)
assert bool((mask_one_min.index.to_series().diff()[1:] ==
np.timedelta64(60, 's')).all(axis=None))
assert bool((mask_irregular.index == power_datetime_index_irregular.index)
.all(axis=None))
def test_xgboost_clip_filter(generate_power_time_series_no_clipping,
generate_power_time_series_clipping,
generate_power_time_series_one_min_intervals,
generate_power_time_series_irregular_intervals):
''' Unit tests for XGBoost clipping filter.'''
# Test the time series where the data isn't clipped
power_no_datetime_index_nc, power_datetime_index_nc, power_nc_tz_naive = \
generate_power_time_series_no_clipping
# Test that a Type Error is raised when a pandas series
# without a datetime index is used.
pytest.raises(TypeError, xgboost_clip_filter,
power_no_datetime_index_nc)
# Test that an error is thrown when we don't include the correct
# mounting configuration input
pytest.raises(ValueError, xgboost_clip_filter,
power_datetime_index_nc, 'not_fixed')
# Test that an error is thrown when there are 10 or fewer readings
# in the time series
pytest.raises(Exception, xgboost_clip_filter,
power_datetime_index_nc[:9])
# Test that a warning is thrown when the time series is tz-naive
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
xgboost_clip_filter(power_nc_tz_naive)
# Warning thrown for it being an experimental filter + tz-naive
assert len(w) == 2
# Scramble the index and run through the filter. This should throw
# an IndexError.
power_datetime_index_nc_shuffled = power_datetime_index_nc.sample(frac=1)
pytest.raises(IndexError, xgboost_clip_filter,
power_datetime_index_nc_shuffled, 'fixed')
# Generate 1-minute interval data, run it through the function, and
# check that the associated data returned is 1-minute
power_datetime_index_one_min_intervals = \
generate_power_time_series_one_min_intervals
mask_one_min = xgboost_clip_filter(power_datetime_index_one_min_intervals)
# Generate irregular interval data, and run it through the XGBoost model
power_datetime_index_irregular = \
generate_power_time_series_irregular_intervals
# Check that the returned time series index for XGBoost is the same
# as the passed time series index
mask_irregular = xgboost_clip_filter(power_datetime_index_irregular)
# Expect none of the sequence to be clipped (as it's
# constantly increasing)
mask_nc = xgboost_clip_filter(power_datetime_index_nc)
# Test the time series where the data is clipped
power_no_datetime_index_c, power_datetime_index_c = \
generate_power_time_series_clipping
# Expect 4 values in middle of sequence to be clipped (when x=50)
mask_c = xgboost_clip_filter(power_datetime_index_c)
filtered_c = power_datetime_index_c[mask_c]
assert bool(mask_nc.all(axis=None))
assert (len(filtered_c) == 96)
assert bool((mask_one_min.index.to_series().diff()[1:] ==
np.timedelta64(60, 's')).all(axis=None))
assert bool((mask_irregular.index == power_datetime_index_irregular.index)
.all(axis=None))
def test_clip_filter(generate_power_time_series_no_clipping):
''' Unit tests for inverter clipping filter.'''
# Create a time series to test
power_no_datetime_index_nc, power_datetime_index_nc, power_nc_tz_naive = \
generate_power_time_series_no_clipping
# Check that the master wrapper defaults to the
# quantile_clip_filter_function.
# Note: Power is expected to be Series object because clip_filter makes
# use of the Series.quantile() method.
filtered_quantile = clip_filter(power_no_datetime_index_nc, quantile=0.98)
# Expect 99% of the 98th quantile to be filtered
expected_result_quantile = power_no_datetime_index_nc < (98 * 0.99)
# Check that the clip filter defaults to quantile clip filter when
# deprecated params are passed
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
clip_filter(power_datetime_index_nc, 0.98)
assert len(w) == 1
# Check that a ValueError is thrown when a model is passed that
# is not in the acceptable list.
pytest.raises(ValueError, clip_filter,
power_datetime_index_nc,
'random_forest')
# Check that the wrapper handles the xgboost clipping
# function with kwargs.
filtered_xgboost = clip_filter(power_datetime_index_nc,
'xgboost',
mounting_type="fixed")
# Check that the wrapper handles the logic clipping
# function with kwargs.
filtered_logic = clip_filter(power_datetime_index_nc,
'logic',
mounting_type="fixed",
rolling_range_max_cutoff=0.3)
# Check that the function returns a Typr Error if a wrong keyword
# arg is passed in the kwarg arguments.
pytest.raises(TypeError, clip_filter, power_datetime_index_nc,
'xgboost',
rolling_range_max_cutoff=0.3)
assert bool((expected_result_quantile == filtered_quantile)
.all(axis=None))
assert bool(filtered_xgboost.all(axis=None))
assert bool(filtered_logic.all(axis=None))
def test_normalized_filter_default():
pd.testing.assert_series_equal(normalized_filter(pd.Series([-5, 5])),
pd.Series([False, True]))
pd.testing.assert_series_equal(normalized_filter(
pd.Series([-1e6, 1e6]),
energy_normalized_low=None,
energy_normalized_high=None),
pd.Series([True, True]))
pd.testing.assert_series_equal(normalized_filter(
pd.Series([-2, 2]),
energy_normalized_low=-1,
energy_normalized_high=1),
pd.Series([False, False]))
eps = 1e-16
pd.testing.assert_series_equal(normalized_filter(
pd.Series([0.01 - eps, 0.01 + eps, 1e308])),
pd.Series([False, True, True]))
| 2.421875 | 2 |
inpaint_melanoma/models/__init__.py | octaviomtz/inpaint_melanoma | 0 | 12761349 | from .common import *
from .downsampler import *
from .skip import * | 1 | 1 |
klever/core/pfg/fragmentation/busybox.py | kirillyat/klever | 16 | 12761350 | #
# Copyright (c) 2019 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
from klever.core.utils import make_relative_path
from klever.core.pfg.fragmentation import FragmentationAlgorythm
class Busybox(FragmentationAlgorythm):
CLADE_PRESET = 'busybox_linux'
def __init__(self, logger, conf, tactic, pf_dir):
super().__init__(logger, conf, tactic, pf_dir)
self._incorporate_libbb = tactic.get("include dependencies from libbb to applets fragments")
self._match_files = dict()
def _determine_units(self, program):
"""
Find all files that has \w+_main function and add dependencies files except that ones that stored in libbb dir.
All files from the libbb directory add to the specific unit with the libbb name.
:param program: Program object.
"""
main_func = re.compile("\\w+main")
libbb = set()
applets = dict()
for file in program.files:
rel_path = make_relative_path(self.source_paths, str(file))
if os.path.commonpath(['libbb', rel_path]):
libbb.add(file)
else:
for func in file.export_functions:
if main_func.match(func):
path, name = os.path.split(rel_path)
name = os.path.splitext(name)[0]
applets[name] = {file}
if self._incorporate_libbb:
dfiles = program.collect_dependencies({file})
else:
dfiles = program.collect_dependencies(
{file}, filter_func=lambda x:
not os.path.commonpath(['libbb', make_relative_path(self.source_paths, x.name)]))
applets[name].update(dfiles)
# Create fragments for found applets and libbb
for name, files in applets.items():
program.create_fragment(name, files, add=True)
for file in files:
if file.name not in self._match_files:
self._match_files[file.name] = 0
else:
self._match_files[file.name] += 1
program.create_fragment('libbb', libbb, add=True)
self.logger.info('Found {} applets: {}'.format(len(applets), ', '.join(applets)))
def _determine_targets(self, program):
"""
Determine that program fragments that should be verified. We refer to these fragments as target fragments.
:param program:
:return:
"""
super()._determine_targets(program)
# Do not consider libbb files as targets
for file in (program._files[f] for f in self._match_files if self._match_files[f] > 0):
file.target = False
| 2.0625 | 2 |