max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
system-tests/test_same_id_multiple_modules.py | geishm-ansto/kafka-to-nexus | 0 | 12758451 | from helpers.kafkahelpers import (
create_producer,
publish_run_start_message,
publish_f142_message,
)
from helpers.nexushelpers import OpenNexusFileWhenAvailable
from helpers.timehelpers import unix_time_milliseconds
from time import sleep
from datetime import datetime
import pytest
def check(condition, fail_string):
if not condition:
pytest.fail(fail_string)
def test_two_different_writer_modules_with_same_flatbuffer_id(docker_compose):
producer = create_producer()
start_time = unix_time_milliseconds(datetime.utcnow()) - 10000
for i in range(10):
publish_f142_message(
producer,
"TEST_sampleEnv",
int(start_time + i * 1000),
source_name="test_source_1",
)
publish_f142_message(
producer,
"TEST_sampleEnv",
int(start_time + i * 1000),
source_name="test_source_2",
)
check(producer.flush(5) == 0, "Unable to flush kafka messages.")
# Start file writing
publish_run_start_message(
producer,
"commands/nexus_structure_multiple_modules.json",
"output_file_multiple_modules.nxs",
start_time=int(start_time),
stop_time=int(start_time + 5 * 1000),
)
# Give it some time to accumulate data
sleep(10)
filepath = "output-files/output_file_multiple_modules.nxs"
with OpenNexusFileWhenAvailable(filepath) as file:
assert (
len(file["entry/sample/dataset1/time"][:]) > 0
and len(file["entry/sample/dataset1/value"][:]) > 0
), "f142 module should have written this dataset, it should have written a value and time"
assert (
"cue_timestamp_zero" not in file["entry/sample/dataset2"]
), "f142_test module should have written this dataset, it writes cue_index but no cue_timestamp_zero"
assert (
len(file["entry/sample/dataset2/cue_index"][:]) > 0
), "Expected index values, found none."
for i in range(len(file["entry/sample/dataset2/cue_index"][:])):
assert (
file["entry/sample/dataset2/cue_index"][i] == i
), "Expect consecutive integers to be written by f142_test"
| 2.234375 | 2 |
func_X.py | xieshentoken/dTheta | 2 | 12758452 | <filename>func_X.py
import re
from itertools import permutations
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
class Xyy():
def __init__(self, path, el=None, ael = None, d1=None, d2=None, d3=None, phi12=None, phi23=None, order_n=None):
self.path = path
self.el = el
self.ael = ael
self.d1 = d1
self.d2 = d2
self.d3 = d3
self.phi12 = phi12
self.phi23 = phi23
self.order_n = order_n
self.text = []
self.d_A_Index = 0
self.title = ''
self.cryForm = ''
self.cellPara = np.zeros(6) # 晶胞参数 np.array([[a,b,c],[alpha,beta,gamma]])
self.data = pd.DataFrame(columns=['d(A)','h','k','l']) # 包含晶面指数及晶面距等信息的pd.dataframe
self.dA_hkl = pd.DataFrame(columns=['d(A)','h','k','l']) # 去除异常值后的data
def getPdfInfo(self):
i = 0
d_A_Regex = re.compile(r'd\(.\)')
with open(self.path) as jcpds:# 根据路径读取PDF卡片,保存在text(list)中
for line in jcpds:
match = d_A_Regex.search(line)
if match:
self.d_A_Index = i
self.text.append(line)
i += 1
self.title = self.text[0].split()[0] + ' ' + self.text[2]# 记录PDF卡片号码及物质化学式
# 读取相应晶体类型cryForm及晶胞参数
cryFormRegex = re.compile(r'Cubic|Tetragonal|Orthorhombic|Monoclinic|Triclinic|Hexagonal|Trigonal|Rhombohedral')
cellParaRegex = re.compile(r'((\d)+\.(\d)+)')
paraIndex = 0
for i in self.text:
if cryFormRegex.search(i):
paraIndex = self.text.index(i) + 1
cryFormSearch = cryFormRegex.search(i)
self.cryForm = cryFormSearch.group()
break
cellParaSearch = cellParaRegex.findall(self.text[paraIndex].split('Pearson')[0])
# cellParaSearch = cellParaRegex.findall(self.text[paraIndex])
cellPara0 = [ cellParaSearch[i][0] for i in range(0, len(cellParaSearch)) ]
if self.cryForm == 'Cubic':
a = b = c = float(cellPara0[0])
alpha = beta = gamma = 90.0
elif self.cryForm == 'Tetragonal':
if (len(cellPara0) == 6)or(len(cellPara0) == 4)or((len(cellPara0) == 3)and(cellPara0[0] == cellPara0[1])):
a = float(cellPara0[0])
b = float(cellPara0[1])
c = float(cellPara0[2])
elif (len(cellPara0) == 5)or((len(cellPara0) == 3)and(cellPara0[2] == 90))or(len(cellPara0) == 2):
a = b = float(cellPara0[0])
c = float(cellPara0[1])
else:
raise Exception('晶格常数识别错误')
if a != b:
raise Exception('晶格常数识别错误')
alpha = beta = gamma = 90.0
elif self.cryForm == 'Orthorhombic':
a = float(cellPara0[0])
b = float(cellPara0[1])
c = float(cellPara0[2])
alpha = beta = gamma = 90.0
elif self.cryForm == 'Monoclinic':
a = float(cellPara0[0])
b = float(cellPara0[1])
c = float(cellPara0[2])
alpha = 90.0
if (len(cellPara0) == 4)or(len(cellPara0) == 5):
beta = float(cellPara0[3])
elif len(cellPara0) == 6:
beta = float(cellPara0[4])
if beta == 90.0:
raise Exception('晶格常数识别错误')
gamma = 90.0
elif self.cryForm == 'Triclinic':
if len(cellPara0) == 6:
a = float(cellPara0[0])
b = float(cellPara0[1])
c = float(cellPara0[2])
alpha = float(cellPara0[3])
beta = float(cellPara0[4])
gamma = float(cellPara0[5])
else:
raise Exception('晶格常数识别错误')
elif self.cryForm == 'Hexagonal':
if len(cellPara0) == 6:
a = float(cellPara0[0])
b = float(cellPara0[1])
c = float(cellPara0[2])
elif (len(cellPara0) == 5)and(float(cellPara0[2]) == 90.0):
a = b = float(cellPara0[0])
c = float(cellPara0[1])
elif (len(cellPara0) == 5)and(float(cellPara0[2]) != 90.0):
a = b = float(cellPara0[0])
c = float(cellPara0[2])
elif (len(cellPara0) == 4)and(float(cellPara0[2]) == 90.0):
a = b = float(cellPara0[0])
c = float(cellPara0[1])
elif (len(cellPara0) == 4)and(float(cellPara0[2]) != 90.0):
a = b = float(cellPara0[0])
c = float(cellPara0[2])
elif (len(cellPara0) == 3)and(float(cellPara0[2]) == 120.0):
a = b = float(cellPara0[0])
c = float(cellPara0[1])
elif (len(cellPara0) == 3)and(float(cellPara0[2]) != 120.0):
a = b = float(cellPara0[0])
c = float(cellPara0[2])
elif (len(cellPara0) == 2)and(float(cellPara0[0]) != float(cellPara0[1])):
a = b = float(cellPara0[0])
c = float(cellPara0[1])
else:
raise Exception('晶格常数识别错误')
alpha = 90.0
beta = 90.0
gamma = 120.0
elif (self.cryForm == 'Trigonal') or (self.cryForm =='Rhombohedral'):
if (len(cellPara0) == 2)and(cellPara0[0] != cellPara0[1]):
a = b = float(cellPara0[0])
c = float(cellPara0[1])
else:
raise Exception('晶格常数识别错误')
alpha = 90.0
beta = 90.0
gamma = 120.0
else:
print('Invalid PDF Card: {}'.format(self.title))
self.cellPara = np.array([a, b ,c, alpha, beta, gamma]).reshape(2,3)
# 获取晶面指数及晶面距、衍射强度等信息,以pandas.DataFrame形式保存
columns = self.text[self.d_A_Index].split()
if len(self.text[self.d_A_Index].split()) - len(self.text[self.d_A_Index+1].split()) >= 1:
columns.remove('n^2')
for i in columns:
if i == 'l)':
columns[columns.index(i)] = i.split(')')[0]
if '(' in columns:
columns.remove('(')
if 'd(?)' in columns:
columns.insert(columns.index('d(?)'), 'd(A)')
columns.remove('d(?)')
rest = self.text[self.d_A_Index+1:]
participle = []
for i in rest:
row = i.split()
if '(' in row:
row.remove('(')
participle.append(row)
else:
participle.append(row)
for i in participle:
for j in i:
if j != j.split(')')[0]:
i[i.index(j)] = j.split(')')[0]
else:
if '(-' in j:
i[i.index(j)] = j.split('(')[1]
preData = pd.DataFrame(participle, columns = columns)
preData = preData.dropna() # 去除空值
self.data = preData.astype('float')
dA = self.data['d(A)']
h = self.data[['h']]
k = self.data[['k']]
l = self.data[['l']]
self.dA_hkl = pd.concat([h,k,l], axis = 1)
self.dA_hkl.index = dA
def fit(self):
pod1 = self.dA_hkl.loc[[x for x in self.data['d(A)'] if abs(x-self.d1)<=self.el]]
pod2 = self.dA_hkl.loc[[x for x in self.data['d(A)'] if abs(x-self.d2)<=self.el]]
pod3 = self.dA_hkl.loc[[x for x in self.data['d(A)'] if abs(x-self.d3)<=self.el]]
# dA = self.data['d(A)']
if (pod1.values.tolist()==[])or(pod2.values.tolist()==[])or(pod3.values.tolist()==[]):
print('No solution in the card--{}'.format(self.title))
raise ValueError
else:
pass
# 生成三个包含同一组晶面的dataframe的list
expod1, expod2, expod3 = [], [], []
# 立方晶系48,六方晶系24,四方晶系16,三方晶系12,正交晶系8,单斜晶系4,三斜晶系2
# 立方晶系指数位置符号均可独立改变,共48种可能变换
if self.cryForm == 'Cubic':
h = pod1[['h']]
k = pod1[['k']]
l = pod1[['l']]
sel = [[p*h, q*k, m*l] for p in [1, -1] for q in [1,-1] for m in [1,-1]]
for x in sel:
ss = [p for p in permutations(x)]
for y in ss:
expod1.append(pd.concat(y, axis=1))
h = pod2[['h']]
k = pod2[['k']]
l = pod2[['l']]
sel = [[p*h, q*k, m*l] for p in [1, -1] for q in [1,-1] for m in [1,-1]]
for x in sel:
ss = [p for p in permutations(x)]
for y in ss:
expod2.append(pd.concat(y, axis=1))
h = pod3[['h']]
k = pod3[['k']]
l = pod3[['l']]
sel = [[p*h, q*k, m*l] for p in [1, -1] for q in [1,-1] for m in [1,-1]]
for x in sel:
ss = [p for p in permutations(x)]
for y in ss:
expod3.append(pd.concat(y, axis=1))
# 六方晶系i=-(h+k),可以从四指数中h、k、i任取两个作为三指数的h、k,三指数中h、k位置可互换,符号需一起变,l可任意改变符号,故共24种可能变换
elif self.cryForm == 'Hexagonal':
_h = pod1[['h']]
_k = pod1[['k']]
# _i = -_h-_k
_i = -pod1['h']-pod1['k']
l = pod1[['l']]
for i in permutations([_h, _k, _i]):
h, k = i[0], i[1]
sel = [[p*h, p*k, m*l] for p in [1, -1] for m in [1, -1]]
for x in sel:
ss = [p for p in permutations(x[:2])]
for y in ss:
hk = list(y)
hk.extend([x[-1]])
expod1.append(pd.concat(hk, axis=1))
_h = pod2[['h']]
_k = pod2[['k']]
# _i = -_h-_k
_i = -pod2['h']-pod2['k']
l = pod2[['l']]
for i in permutations([_h, _k, _i]):
h, k = i[0], i[1]
sel = [[p*h, p*k, m*l] for p in [1, -1] for m in [1, -1]]
for x in sel:
ss = [p for p in permutations(x[:2])]
for y in ss:
hk = list(y)
hk.extend([x[-1]])
expod2.append(pd.concat(hk, axis=1))
_h = pod3[['h']]
_k = pod3[['k']]
# _i = -_h-_k
_i = -pod3['h']-pod3['k']
l = pod3[['l']]
for i in permutations([_h, _k, _i]):
h, k = i[0], i[1]
sel = [[p*h, p*k, m*l] for p in [1, -1] for m in [1, -1]]
for x in sel:
ss = [p for p in permutations(x[:2])]
for y in ss:
hk = list(y)
hk.extend([x[-1]])
expod3.append(pd.concat(hk, axis=1))
# 四方晶系h、k指数位置可互换,符号可以任意改变,共16种可能变换
elif self.cryForm == 'Tetragonal':
h = pod1[['h']]
k = pod1[['k']]
l = pod1[['l']]
sel = [[p*h, q*k, m*l] for p in [1, -1] for q in [1,-1] for m in [1,-1]]
for x in sel:
ss = [p for p in permutations(x[:2])]
for y in ss:
hk = list(y)
hk.extend([x[-1]])
expod1.append(pd.concat(hk, axis=1))
h = pod2[['h']]
k = pod2[['k']]
l = pod2[['l']]
sel = [[p*h, q*k, m*l] for p in [1, -1] for q in [1,-1] for m in [1,-1]]
for x in sel:
ss = [p for p in permutations(x[:2])]
for y in ss:
hk = list(y)
hk.extend([x[-1]])
expod2.append(pd.concat(hk, axis=1))
h = pod3[['h']]
k = pod3[['k']]
l = pod3[['l']]
sel = [[p*h, q*k, m*l] for p in [1, -1] for q in [1,-1] for m in [1,-1]]
for x in sel:
ss = [p for p in permutations(x[:2])]
for y in ss:
hk = list(y)
hk.extend([x[-1]])
expod3.append(pd.concat(hk, axis=1))
# 正交晶系指数符号可以独立变化,位置不能变, 共8种可能变换
elif self.cryForm == 'Orthorhombic':
h = pod1[['h']]
k = pod1[['k']]
l = pod1[['l']]
sel = [[p*h, q*k, m*l] for p in [1, -1] for q in [1,-1] for m in [1,-1]]
for x in sel:
expod1.append(pd.concat(x, axis=1))
h = pod2[['h']]
k = pod2[['k']]
l = pod2[['l']]
sel = [[p*h, q*k, m*l] for p in [1, -1] for q in [1,-1] for m in [1,-1]]
for x in sel:
expod2.append(pd.concat(x, axis=1))
h = pod3[['h']]
k = pod3[['k']]
l = pod3[['l']]
sel = [[p*h, q*k, m*l] for p in [1, -1] for q in [1,-1] for m in [1,-1]]
for x in sel:
expod3.append(pd.concat(x, axis=1))
# 三方(菱形)晶系各指数位置可变,符号必须一起变,共12 种可能变换
elif (self.cryForm == 'Trigonal')or(self.cryForm == 'Rhombohedral'):
h = pod1[['h']]
k = pod1[['k']]
l = pod1[['l']]
sel = [[p*h, p*k, p*l] for p in [1, -1]]
for x in sel:
ss = [p for p in permutations(x)]
for y in ss:
expod1.append(pd.concat(y, axis=1))
h = pod2[['h']]
k = pod2[['k']]
l = pod2[['l']]
sel = [[p*h, p*k, p*l] for p in [1, -1]]
for x in sel:
ss = [p for p in permutations(x)]
for y in ss:
expod2.append(pd.concat(y, axis=1))
h = pod3[['h']]
k = pod3[['k']]
l = pod3[['l']]
sel = [[p*h, p*k, p*l] for p in [1, -1]]
for x in sel:
ss = [p for p in permutations(x)]
for y in ss:
expod3.append(pd.concat(y, axis=1))
# 单斜晶系指数的位置不能变,k的符号可以单独改变,共4种可能变换
elif self.cryForm == 'Monoclinic':
h = pod1[['h']]
k = pod1[['k']]
l = pod1[['l']]
sel = [[p*h, p*k, m*l] for p in [1, -1] for m in [1,-1]]
for x in sel:
expod1.append(pd.concat(x, axis=1))
h = pod2[['h']]
k = pod2[['k']]
l = pod2[['l']]
sel = [[p*h, p*k, m*l] for p in [1, -1] for m in [1,-1]]
for x in sel:
expod2.append(pd.concat(x, axis=1))
h = pod3[['h']]
k = pod3[['k']]
l = pod3[['l']]
sel = [[p*h, p*k, m*l] for p in [1, -1] for m in [1,-1]]
for x in sel:
expod3.append(pd.concat(x, axis=1))
# 三斜晶系指数的位置不能变,符号一起变,共2种可能变换
elif self.cryForm == 'Triclinic':
h = pod1[['h']]
k = pod1[['k']]
l = pod1[['l']]
sel = [[p*h, p*k, p*l] for p in [1, -1]]
for x in sel:
expod1.append(pd.concat(x, axis=1))
h = pod2[['h']]
k = pod2[['k']]
l = pod2[['l']]
sel = [[p*h, p*k, p*l] for p in [1, -1]]
for x in sel:
expod2.append(pd.concat(x, axis=1))
h = pod3[['h']]
k = pod3[['k']]
l = pod3[['l']]
sel = [[p*h, p*k, p*l] for p in [1, -1]]
for x in sel:
expod3.append(pd.concat(x, axis=1))
# 筛选出满足矢量加法条件的晶面
lis_extpod1, lis_extpod2, lis_extpod3 = [], [], []
lis_expod1, lis_expod2, lis_expod3 = [], [], []
for p in expod1:
lis_extpod1.extend(p.values.tolist())
for q in expod2:
lis_extpod2.extend(q.values.tolist())
for m in expod3:
lis_extpod3.extend(m.values.tolist())
for p in lis_extpod1:
if p not in lis_expod1:
lis_expod1.append(p)
for q in lis_extpod2:
if q not in lis_expod2:
lis_expod2.append(q)
for m in lis_extpod3:
if m not in lis_expod3:
lis_expod3.append(m)
# print('lis expod1=',lis_expod1,'\n', 'lis_expod2=',lis_expod2,'\n','lis_expod3=',lis_expod3)
rs=[]
psb_rslt = pd.DataFrame()
for q in lis_expod2:
for p in lis_expod1:
for m in lis_expod3:
# 筛选出满足矢量加法条件的晶面
if ((np.array(p)%self.order_n == np.array([0,0,0])).all())and((np.array(q)%self.order_n == np.array([0,0,0])).all())and((np.array(m)%self.order_n == np.array([0,0,0])).all()):
if ((np.array(p) + np.array(m)) == np.array(q)).all():
h11 = self.hihj(np.array(p), np.array(p))
h22 = self.hihj(np.array(q), np.array(q))
h33 = self.hihj(np.array(m), np.array(m))
h12 = self.hihj(np.array(p), np.array(q))
h23 = self.hihj(np.array(q), np.array(m))
cal_d1 = self.cal_d(np.array(p))
cal_d2 = self.cal_d(np.array(q))
cal_d3 = self.cal_d(np.array(m))
cal_phi12 = np.arccos(h12/(h11*h22)**0.5)*180./np.pi
cal_phi23 = np.arccos(h23/(h22*h33)**0.5)*180./np.pi
if (abs(cal_phi12 - self.phi12) <= self.ael)and(abs(cal_phi23 - self.phi23) <= self.ael):
error_phi12 = abs(self.phi12-cal_phi12)#/self.phi12
error_phi23 = abs(self.phi23-cal_phi23)#/self.phi23
error_d1 = abs(self.d1-cal_d1)/self.d1
error_d2 = abs(self.d2-cal_d2)/self.d2
error_d3 = abs(self.d3-cal_d3)/self.d3
p_std = [int(ip) for ip in p]
q_std = [int(iq) for iq in q]
m_std = [int(im) for im in m]
rs.append([p_std,q_std,m_std,cal_phi12,cal_phi23,cal_d1,cal_d2,cal_d3,error_phi12,error_phi23,error_d1,error_d2,error_d3])
if rs == []:
print('No solution in Card-*-: {}'.format(self.title))
else:
psb_rslt = pd.DataFrame(rs, columns = ['posiible d1', 'posiible d2', 'posiible d3', 'cal_phi<d1,d2>', 'cal_phi<d2,d3>', 'cal_d1', 'cal_d2', 'cal_d3', 'error of phi<d1,d2>', 'error of phi<d2,d3>', 'error of d1', 'error of d2', 'error of d3'])
psb_rslt[['cal_phi<d1,d2>', 'cal_phi<d2,d3>', 'cal_d1', 'cal_d2', 'cal_d3']]=psb_rslt[['cal_phi<d1,d2>', 'cal_phi<d2,d3>', 'cal_d1', 'cal_d2', 'cal_d3']].round(decimals=2)
psb_rslt[['error of phi<d1,d2>', 'error of phi<d2,d3>']]=psb_rslt[['error of phi<d1,d2>', 'error of phi<d2,d3>']].applymap(lambda x: str(format(x,'.3'))+'°')
psb_rslt[['error of d1', 'error of d2', 'error of d3']]=psb_rslt[['error of d1', 'error of d2', 'error of d3']].applymap(lambda x:format(x,'.2%'))
return psb_rslt
# H函数
# p1,p2为ndarray([h0,k0,l0])
def hihj(self, p1, p2):
abc = self.cellPara[0]
abg = self.cellPara[1]*np.pi/180
return (p1*p2).dot((np.sin(abg)**2)/(abc**2)) + (p1[1]*p2[2]+p1[2]*p2[1])*(np.cos(abg[1])*np.cos(abg[2])-np.cos(abg[0]))/(abc[1]*abc[2]) + (p1[2]*p2[0]+p1[0]*p2[2])*(np.cos(abg[2])*np.cos(abg[0])-np.cos(abg[1]))/(abc[2]*abc[0]) + (p1[0]*p2[1]+p1[1]*p2[0])*(np.cos(abg[0])*np.cos(abg[1])-np.cos(abg[2]))/(abc[0]*abc[1])
# 计算晶面距的函数
def cal_d(self, p):
ang = self.cellPara[1]*np.pi/180
vol = (1 - np.cos(ang[0])**2 - np.cos(ang[1])**2 - np.cos(ang[2])**2 + 2*np.cos(ang[0])*np.cos(ang[1])*np.cos(ang[2]))**0.5
cal_distance = vol/(self.hihj(p, p))**0.5
return cal_distance
| 2.421875 | 2 |
app.py | shivamtawari/XRayd-Ion-athon | 0 | 12758453 | <filename>app.py
import os
import cv2
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from flask import Flask, request, render_template
from werkzeug.utils import secure_filename
from models import TB, Cancer, Covid, Multiple
UPLOAD_FOLDER = os.path.join('static', 'inference')
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
tb = TB()
cancer = Cancer()
covid = Covid()
# multiple = Multiple()
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@app.route('/uploader', methods=['GET', 'POST'])
def uploader():
if request.method == 'POST':
f = request.files['file']
img_path = os.path.join(os.getcwd(), app.config['UPLOAD_FOLDER'], f.filename)
f.save(img_path)
return result(img_path)
@app.route('/result')
def result(img_path):
img = img_to_array(load_img(img_path, target_size=(600, 600)))
plt.imshow(np.uint8(img))
path_to_orig = os.path.join('static', 'inference', 'orig_resized.png')
plt.savefig(path_to_orig, transparent=True)
pred_tb = tb.predict(img_path)
tb.explain()
pred_cancer = cancer.predict(img_path)
pred_cancr = dict(zip(['Adenocarcinoma', 'Large Cell Carcinoma', 'normal', 'Squamous Cell Carcinoma'], pred_cancer))
del pred_cancr['normal']
cancer.explain()
pred_covid = covid.predict(img_path)
pred_cov = dict(zip(['Covid', 'Lung Opacity', 'normal', 'Viral Pneumonia'], pred_covid))
del pred_cov['normal']
covid.explain()
"""
pred_multiple = multiple.predict(img_path)
pred_mult = dict(zip(['Cardiomegaly', 'Hernia', 'Infiltration', 'Nodule', 'Emphysema', 'Effusion',
'Atelectasis', 'Pleural Thickening', 'Pneumothorax', 'Mass', 'Fibrosis',
'Consolidation', 'Edema', 'Pneumonia'], pred_multiple))
#del pred_mult['Pneumonia']
multiple.explain()
"""
return render_template('result.html',
pred_tb=pred_tb,
path_tb=os.path.join('static', 'explain', 'explain_tb.png'),
path_to_orig=path_to_orig,
pred_cancer=pred_cancr,
path_can=os.path.join('static', 'explain', 'explain_can.png'),
pred_cov=pred_cov,
path_cov=os.path.join('static', 'explain', 'explain_cov.png'),
# pred_mult=pred_mult,
# path_mult=os.path.join('static', 'explain', 'explain_mult.png'),
)
if __name__ == '__main__':
# from werkzeug.serving import run_simple
# run_simple('localhost', 5000, app)
app.run(debug=False)
| 2.453125 | 2 |
mysite/main/age_gender_predict/age_gender_prediction.py | trinamntn08/demoAI | 0 | 12758454 | <gh_stars>0
import cv2
import numpy as np
from matplotlib import pyplot as plt
import glob
import os
from django.conf import settings
def detect_face(image_name):
#file_path= os.path.join(settings.MEDIA_ROOT,image_name)
img = cv2.imdecode(image_name,cv2.IMREAD_UNCHANGED)
#Rescale image
print('Original Dimensions : ',img.shape)
scale_percent = 50 # percent of original size
height = int(img.shape[0] * scale_percent / 100)
width = int(img.shape[1] * scale_percent / 100)
dim = (width, height)
img = cv2.resize(img,dim,interpolation=cv2.INTER_AREA)
#Convert to image gray
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#Load HAAR XML files
facecascade = cv2.CascadeClassifier('D:\\learning\\web\\demoAI\\mysite\\main\\age_gender_predict\\haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('D:\\learning\\web\\demoAI\\mysite\\main\\age_gender_predict\\haarcascade_eye.xml')
faces = facecascade.detectMultiScale(img_gray, scaleFactor=1.2, minNeighbors=5)
print('nbr of faces:',len(faces))
for (x, y, w, h) in faces:
print(x,y,w,h)
face_detect = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
roi_gray = img[y:y + h, x:x + w]
#roi_color = img[y:y + h, x:x + w]
#cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
#cv2.imwrite("D:\\learning\\web\\demoAI\\mysite\\media\\images\\result5.jpg",img)
return cv2.imencode('.jpg',img)[1].tostring() | 2.75 | 3 |
netharn/metrics/sklearn_alts.py | VIAME/netharn | 38 | 12758455 | """
DEPRECATED
USE kwcoco.metrics instead!
Faster pure-python versions of sklearn functions that avoid expensive checks
and label rectifications. It is assumed that all labels are consecutive
non-negative integers.
"""
from scipy.sparse import coo_matrix
import numpy as np
def confusion_matrix(y_true, y_pred, n_labels=None, labels=None,
sample_weight=None):
"""
faster version of sklearn confusion matrix that avoids the
expensive checks and label rectification
Runs in about 0.7ms
Returns:
ndarray: matrix where rows represent real and cols represent pred
Example:
>>> y_true = np.array([0, 0, 0, 0, 1, 1, 1, 0, 0, 1])
>>> y_pred = np.array([0, 0, 0, 0, 0, 0, 0, 1, 1, 1])
>>> confusion_matrix(y_true, y_pred, 2)
array([[4, 2],
[3, 1]])
>>> confusion_matrix(y_true, y_pred, 2).ravel()
array([4, 2, 3, 1])
Benchmarks:
import ubelt as ub
y_true = np.random.randint(0, 2, 10000)
y_pred = np.random.randint(0, 2, 10000)
n = 1000
for timer in ub.Timerit(n, bestof=10, label='py-time'):
sample_weight = [1] * len(y_true)
confusion_matrix(y_true, y_pred, 2, sample_weight=sample_weight)
for timer in ub.Timerit(n, bestof=10, label='np-time'):
sample_weight = np.ones(len(y_true), dtype=np.int)
confusion_matrix(y_true, y_pred, 2, sample_weight=sample_weight)
"""
if sample_weight is None:
sample_weight = np.ones(len(y_true), dtype=np.int)
if n_labels is None:
n_labels = len(labels)
CM = coo_matrix((sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels),
dtype=np.int64).toarray()
return CM
def global_accuracy_from_confusion(cfsn):
# real is rows, pred is columns
n_ii = np.diag(cfsn)
# sum over pred = columns = axis1
t_i = cfsn.sum(axis=1)
global_acc = n_ii.sum() / t_i.sum()
return global_acc
def class_accuracy_from_confusion(cfsn):
# real is rows, pred is columns
n_ii = np.diag(cfsn)
# sum over pred = columns = axis1
t_i = cfsn.sum(axis=1)
per_class_acc = (n_ii / t_i).mean()
class_acc = np.nan_to_num(per_class_acc).mean()
return class_acc
| 2.59375 | 3 |
Basic_stats_visualizations/Stats_Zscore_Probability_Qqplot_Tdistribution.py | kunalk3/Machine_Learning_using_Python | 0 | 12758456 | <gh_stars>0
#---------------------------------------------------------------------
# File Name : Association_apriori.py
# Author : <NAME>.
# Description : Implementing Stats with Z score, prob, t distribution (basics)
# Date: : 5 Nov. 2020
# Version : V1.0
# Ref No : DS_Code_P_K07
#---------------------------------------------------------------------
# Importing necessary libraries
import pandas as pd
# importing data set using pandas
mba = pd.read_csv("mba.csv")
# Finding mean,median,mode
mba['gmat'].mean() # mba.gmat.mean()
mba['gmat'].median()
mba['gmat'].mode()
mba['gmat'].var()
mba['gmat'].std()
# variance & Standard Deviation for Sample
mba['gmat'].var() # 860
mba['gmat'].std() # 29.39
# Variacne & Standard Deviation for Population
import numpy as np
np.var(mba['gmat']) # 859.70
np.std(mba['gmat']) # 29.32
# calculating the range value
range = max(mba['gmat'])-min(mba['gmat']) # max(mba.gmat)-min(mba.gmat)
range
# calculating the population standard deviation and variance
np.var(mba.gmat) # population variance
np.std(mba.gmat) # population standard deviation
import scipy.stats as stats
# ppf => Percent point function
stats.norm.ppf(0.975,0,1)# similar to qnorm in R
# cdf => cumulative distributive function
stats.norm.cdf(740,711,29) # similar to pnorm in R
# cummulative distribution function
help(stats.norm.cdf)
#Q-Q plot
import pylab
import scipy.stats as st
# Checking Whether data is normally distributed
stats.probplot(mba['gmat'], dist="norm",plot=pylab)
stats.probplot(mba.workex,dist="norm",plot=pylab)
mtcars = pd.read_csv("mtcars.csv")
st.probplot(mtcars.mpg,dist="norm",plot=pylab)
help(st.probplot)
# t distribution
# Finding qnorm,qt for 90%,95%,99% confidence level
import scipy.stats as stats
# percentage point function
stats.norm.ppf(0.975,0,1)# similar to qnorm in R
stats.norm.ppf(0.995,0,1)
stats.norm.ppf(0.950,0,1)
stats.t.ppf(0.975, 139) # similar to qt in R
stats.t.ppf(0.995,139)
stats.t.ppf(0.950,139)
help(stats.t.ppf)
| 2.90625 | 3 |
bot/cogs/Eval/__init__.py | abindent/Utility-Bot | 2 | 12758457 | import nextcord, asyncio, os, io, contextlib
from nextcord.ext import commands
from nextcord.ui import Modal, TextInput
from util.messages import DeleteMessageSlash
from util.constants import Client
class SnekBox_Eval(nextcord.ui.Modal):
def __init__(self) -> None:
super().__init__(title="Evaluate Your Code", custom_id="evaluate_code")
self.add_item(
nextcord.ui.TextInput(
label="Your Eval Code",
placeholder="print('Hello')",
custom_id="evaluated code",
style=nextcord.TextInputStyle.paragraph,
min_length=10
),
)
async def callback(self, inter: nextcord.Interaction) -> None:
view = DeleteMessageSlash(inter)
embed = nextcord.Embed(title="Your code", description="✅ Your eval job has been completed and the result is provided below.", color=0x00FF00)
code = self.children[0].value
stdout = io.StringIO()
with contextlib.redirect_stdout(stdout):
exec(code)
res = stdout.getvalue()
if Client.token in res:
res = ":warning: We can't reveal any sensitive info."
embed.add_field(name="Input Code", value=f"```py\n{code}\n```", inline=False)
embed.add_field(name="Evaluated Code:", value=res, inline=False)
await inter.response.send_message(embed=embed,view=view)
async def on_error(self, error, interaction: nextcord.Interaction):
view = DeleteMessageSlash(interaction)
embed = nextcord.Embed(title="Code Status", description=":x: An error occurred.", color=0xFF0000)
embed.add_field(name=":warning: The Error", value=f"```{error}```", inline=False)
await interaction.response.send_message(embed=embed,view=view)
class Eval(commands.Cog, description='Evaluate Your Code.'):
COG_EMOJI = "💻"
def __init__(self, bot):
self.bot = bot
@nextcord.slash_command(name="eval", description="Evaluates the given python code")
async def eval(self, interaction: nextcord.Interaction):
await interaction.response.send_modal(modal=SnekBox_Eval())
| 2.234375 | 2 |
rgbd_seg/models/heads/builder.py | tomchol/ShapeConv | 57 | 12758458 | from rgbd_seg.utils import build_from_cfg
from .registry import HEADS
def build_head(cfg, default_args=None):
head = build_from_cfg(cfg, HEADS, default_args)
return head
| 1.679688 | 2 |
mycode/modify_tif.py | Xjg-0216/DCSNet | 0 | 12758459 | <gh_stars>0
# coding = utf8
# /usr/bin/env python
'''
Author: Xjg
Email:
date: 2021/12/15 下午3:19
desc:
'''
import cv2
import os
from glob import glob
from tqdm import tqdm
import shutil
path = '/data2/20120017/datasets/testData/train/target/'
#
# if not os.path.exists(save_path):
# os.makedirs(save_path)
imgs_path = glob(os.path.join(path, '*_target*'))
print(len(imgs_path))
for img_path in tqdm(imgs_path):
# img_name = img_path.split('/')[-1].replace('jpg', 'png')
# img = cv2.imread(img_path)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# cv2.imwrite(os.path.join(path, img_name), img)
# shutil.move(img_path, save_path)
new_path = img_path[:-11]+'.png'
os.rename(img_path, new_path) | 2.5625 | 3 |
kicker/app/domain/kicker_receiver.py | omBratteng/mottak | 4 | 12758460 | <gh_stars>1-10
import logging
from typing import List
from app.connectors.azure_servicebus.azure_servicebus_client import AzureQueueReceiver
from app.domain.models import KickerMessage
logger = logging.getLogger(__name__)
class KickerReceiver(AzureQueueReceiver):
"""
Class which contains the queue that receives KickerRequest from tusd
"""
def __init__(self, connection_string: str, queue_name: str):
super().__init__(connection_string=connection_string, queue_name=queue_name)
def receive_workflows(self, max_batch_size: int = 1) -> List[KickerMessage]:
"""
Receives messages from the service bus queue and convert them to KickerRequests
:param max_batch_size: Number of messages to process
:return: list with KickerRequest objects
"""
messages = self.receive_messages(max_batch_size)
kicker_messages = []
for message in messages:
logger.info(f'Received message on queue {self.queue_name}')
kicker_message = KickerMessage.from_string(message)
if kicker_message:
kicker_messages.append(kicker_message)
return kicker_messages
| 2.34375 | 2 |
pype/modules/rest_api/lib/exceptions.py | kalisp/pype | 0 | 12758461 | class ObjAlreadyExist(Exception):
"""Is used when is created multiple objects of same RestApi class."""
def __init__(self, cls=None, message=None):
if not (cls and message):
message = "RestApi object was created twice."
elif not message:
message = "{} object was created twice.".format(cls.__name__)
super().__init__(message)
class AbortException(Exception):
pass
| 2.78125 | 3 |
gincco/methods/__init__.py | paulmorio/gincco | 7 | 12758462 | <filename>gincco/methods/__init__.py
# pcomplexnet.methods init file
| 1.210938 | 1 |
ChaosFunctions/get_stationary.py | Psicowired87/ChaosFunctions | 0 | 12758463 | <reponame>Psicowired87/ChaosFunctions
"""This module contains the tools needed to extract the stationary points of
a dynamics and to stop when it is considered enough.
"""
import numpy as np
def logistic_map_bif_diagram(range_par, stop_f):
"""Logistic map bifurcation diagram computation.
Parameters
----------
range_par: list or np.ndarray
the parameters we want to compute.
stop_f: function
the stop condition.
Returns
-------
sequence: np.ndarray
the sequence information.
Example
-------
>>> y0 = np.linspace(0, 3, 31)
>>> y1 = np.linspace(1, 3.2, 23)
>>> seq = logistic_map_bif_diagram(y0, stationary_fixed_points)
>>> seq = logistic_map_bif_diagram(y1, stationary_fixed_points)
"""
iter_f = lambda r: lambda x: r*x*(1-x)
sequence = obtain_bifurcation_diagram(iter_f, range_par, stop_f)
sequence = np.array(sequence)
sequence = sequence.reshape((sequence.reshape(-1).shape[0]/2, 2))
return sequence
def obtain_bifurcation_diagram(iter_f, range_par, stop_f):
"""Compute the bifurcation diagram.
Parameters
----------
iter_f: function
the iteration function.
range_par: list or np.ndarray
the parameters we want to compute.
stop_f: function
the stop condition.
Returns
-------
fixedp: list
the list of pair parameters and fixed points associated.
"""
fixedp = []
for par in range_par:
print par
p0 = np.random.random()
iter_ff = iter_f(par)
sequence, fixed_points = generic_iteration_4_fixed_points(p0,
iter_ff,
stop_f)
fixedp.append([[par, fp] for fp in fixed_points])
return fixedp
def generic_iteration_4_fixed_points(p0, iter_f, stop_f_and_fixedp):
"""This functions implements a generic iterations. Repeat the given funcion
while the stopping condition is not fulfilled.
Parameters
---------
p0 : float
intial point of the iteration
iter_f: function
function which receives a number and return a number. Decides the next
state of the system.
stop_f_and_fixedp: function
function which receives a list of numbers and return a boolean and a
fixed points. Decides the stoping condition.
Returns
-------
sequence: np.ndarray
the sequence information.
fixed_points: np.ndarray
the fixed points.
"""
sequence = []
fixed_points = None
p = p0
complete = False
while not complete:
sequence.append(p)
# Stop clause
complete, fixed_points = stop_f_and_fixedp(np.array(sequence))
# Transformation
p = iter_f(p)
sequence = np.array(sequence)
return sequence, fixed_points
def stationary_fixed_points(history):
"""Take the decision if the point is stationary. It runs for different
orders.
Parameters
----------
sequence: np.ndarray
the sequence information.
Returns
-------
stationary: boolean
if the sequence is in a stationary point.
fixed_points: np.ndarray
the fixed points.
"""
stationary = False
n_limit = int(np.sqrt(history.shape[0]))
fixed_points = np.array([])
if n_limit > 100:
return True, fixed_points
for order in range(1, n_limit+1):
s = embedding_matrix(history, order)
stationary = decision_stationarity(s)
if stationary:
fixed_points = s[-1, :]
break
return stationary, fixed_points
def decision_stationarity(seq):
"""Take the decision if the point is stationary. It only works for the
1st order fixed point.
Parameters
----------
sequence: np.ndarray
the sequence information.
Returns
-------
decision: boolean
if the last state could be considered stationary.
"""
if seq.shape[0] <= 100:
decision = False
else:
decision = np.all(np.std(seq[-100:, ]) < 0.01)
return decision
def embedding_matrix(seq, order):
"""
"""
embeded_m = sliding_embeded_transf(seq, 1, order, order)
embeded_m = embeded_m[embeded_m[:, 0] != 0, :]
return embeded_m
def sliding_embeded_transf(X, tau, D, step=1, f=lambda x: x):
"""Build a set of embedding sequences from given time series X with lag Tau
and embedding dimension D. Let X = [x(1), x(2), ... , x(N)], then for each
i such that 1 < i < N - (D - 1) * Tau, we build an embedding sequence,
Y(i) = [x(i), x(i + Tau), ... , x(i + (D - 1) * Tau)]. All embedding
sequence are placed in a matrix Y.
Parameters
----------
X : array_like, shape(N,)
a time series
tau : int
the lag or delay when building embedding sequence
D : integer
the embedding dimension
step: int
the step for which we compute the sequence.
f: function
transformation function to be applied to each element of the sequence.
Returns
-------
Y : 2-D list
embedding matrix built
"""
N = X.shape[0]
# Check inputs
if D * tau > N:
message = "Cannot build such a matrix, because D * tau > N"
raise Exception(message)
if tau < 1:
message = "Tau has to be at least 1"
raise Exception(message)
Y = np.zeros((N - (D - 1) * tau, D))
for i in xrange(0, N - (D - 1) * tau, step):
for j in xrange(0, D):
Y[i][j] = f(X[i + j * tau])
return Y
| 2.765625 | 3 |
media_tree/contrib/media_extensions/images/focal_point/__init__.py | erlenddalen/django-media-tree | 29 | 12758464 | """
focal_point
===========
The *focal_point* extension allows you to drag a marker on image thumbnails
while editing, thus specifying the most relevant portion of the image. You can
then use these coordinates in templates for image cropping.
- To install it, add the extension module to your ``INSTALLED_APPS`` setting::
INSTALLED_APPS = (
# ... your apps here ...
'media_tree.contrib.media_extensions.images.focal_point'
)
- If you are not using ``django.contrib.staticfiles``, copy the contents of the
``static`` folder to the static root of your project. If you are using the
``staticfiles`` app, just run the usual command to collect static files::
$ ./manage.py collectstatic
.. Note::
This extension adds the fields ``focal_x`` and ``focal_y`` to
the ``FileNode`` model. You are going to have to add these fields to
the database table yourself by modifying the ``media_tree_filenode`` table
with a database client, **unless you installed it before running**
``syncdb``).
"""
| 2.140625 | 2 |
setup.py | liuzhuoling2011/music-dl | 18 | 12758465 | <gh_stars>10-100
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
@author: HJK
@file: setup.py
@time: 2019-01-26
打包配置文件
"""
import os
import sys
import setuptools
# 'setup.py publish' shortcut.
if sys.argv[-1] == 'publish':
os.system('rm -rf dist')
os.system('python setup.py sdist bdist_wheel')
os.system('twine upload dist/*')
sys.exit()
here = os.path.abspath(os.path.dirname(__file__))
about = {}
with open(os.path.join(here, 'music_dl', '__version__.py'), 'r', encoding='utf-8') as f:
exec(f.read(), about)
with open('README.md', 'r', encoding='utf-8') as fh:
long_description = fh.read()
setuptools.setup(
name=about['__title__'],
version=about['__version__'],
description=about['__description__'],
author=about['__author__'],
author_email=about['__author_email__'],
url=about['__url__'],
license=about['__license__'],
long_description=long_description,
long_description_content_type='text/markdown',
packages=setuptools.find_packages(),
test_suite = 'tests',
data_files = [("", ["LICENSE", "README.en.md"])],
entry_points={
'console_scripts': [
'music-dl = music_dl.__main__:main',
],
},
install_requires=[
'requests',
'click',
'pycryptodome',
'prettytable',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Internet',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Multimedia',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Utilities'
],
) | 1.546875 | 2 |
df.py | gbanegas/KissECC | 1 | 12758466 | <reponame>gbanegas/KissECC
from ecc import EC
class DiffieHellman(object):
"""Elliptic Curve Diffie Hellman (Key Agreement)
- ec: elliptic curve
- g: a point on ec
"""
def __init__(self, ec, g):
self.ec = ec
self.g = g
self.n = ec.order(g)
pass
def gen(self, priv):
"""generate pub key"""
assert 0 < priv and priv < self.n
return self.ec.mul(self.g, priv)
def secret(self, priv, pub):
"""calc shared secret key for the pair
- priv: my private key as int
- pub: partner pub key as a point on ec
- returns: shared secret as a point on ec
"""
assert self.ec.is_valid(pub)
assert self.ec.mul(pub, self.n) == self.ec.zero
return self.ec.mul(pub, priv)
pass
| 3.328125 | 3 |
py_celery/main.py | shimakaze-git/docker_django_celery_vote | 0 | 12758467 | <filename>py_celery/main.py
import tasks
print('<first task>')
# ここでタスク起動 (runタスク)
worker = tasks.run.delay()
# 終わらぬなら終わるまで待とうホトトギス
while not worker.ready():
pass
# 返り値をだす
print(worker.result)
print('<second task>')
# ここでタスク起動 (calcタスク)
worker = tasks.calc.delay(100, 200)
# 終わらぬなら終わるまで待とうホトトギス
while not worker.ready():
pass
# 返り値をだす
print(worker.result)
| 3.0625 | 3 |
geometry/transform2.py | Jack12xl/a-toy-fluid-engine | 21 | 12758468 | import taichi as ti
import taichi_glsl as ts
import math
from utils import Vector, Matrix, tiNormalize, Float
from config.base_cfg import error
## unity gameobject.transform
# ref: https://github.com/JYLeeLYJ/Fluid-Engine-Dev-on-Taichi/blob/master/src/python/geometry.py
@ti.data_oriented
class Transform2:
def __init__(self,
translation=ti.Vector([0.0, 0.0]),
orientation=0.0,
localscale=1.0):
self._translation = ti.Vector.field(2, dtype=ti.f32, shape=[])
self._orientation = ti.field(dtype=ti.f32, shape=[])
self._localScale = ti.Vector.field(2, dtype=ti.f32, shape=[])
# use buffer for later materialization
self.translation_buf = translation
self.orientation_buf = orientation % (2 * math.pi)
self.localscale_buf = localscale
def __repr__(self):
return '{} ( Trsln : {}, Ornttn: {}, lclScl: {})'.format(
self.__class__.__name__,
self.translation,
self.orientation,
self.localScale)
@ti.pyfunc
def kern_materialize(self):
self._translation[None] = self.translation_buf
self._orientation[None] = self.orientation_buf
self.localScale = self.localscale_buf
@property
@ti.pyfunc
def translation(self) -> Vector:
return self._translation[None]
@translation.setter
def translation(self, translation: ti.Vector):
self._translation[None] = translation
# @property
# def orientation(self) -> Float:
# return self._orientation[None]
@property
@ti.pyfunc
def orientation(self) -> Float:
return self._orientation[None]
@orientation.setter
def orientation(self, orientation: Float):
self._orientation[None] = orientation % (2 * math.pi)
# @property
# def localScale(self) -> Float:
# return self._localScale[None]
@property
@ti.pyfunc
def localScale(self) -> Vector:
return self._localScale[None]
@localScale.setter
def localScale(self, localScale: Vector):
# clamp above zero
self._localScale[None] = ti.max(ts.vec2(localScale), ts.vec2(error))
@ti.pyfunc
def to_local(self, p_world: Vector) -> Vector:
# translate
out = p_world - self.translation
# rotate back
out = apply_rot(-self.orientation, out)
# scale
out /= self.localScale
return out
@ti.func
def to_world(self, p_local: Vector) -> Vector:
# scale
out = p_local * self.localScale
# rotate
out = apply_rot(self.orientation, out)
# translate
out += self.translation
return out
@ti.func
def dir_2world(self, dir_local: Vector) -> Vector:
out = apply_rot(self.orientation, dir_local)
return tiNormalize(out)
@ti.func
def getRotMat2D(rotation) -> Matrix:
return ti.Matrix([[ti.cos(rotation), -ti.sin(rotation)], [ti.sin(rotation), ti.cos(rotation)]])
@ti.pyfunc
def apply_rot(rot, p) -> Vector:
cos = ti.cos(rot)
sin = ti.sin(rot)
return ti.Vector([cos * p[0] - sin * p[1], sin * p[0] + cos * p[1]])
@ti.kernel
def test_rotate():
# a._orientation[None] = ti.static(math.pi / 2)
a.orientation = math.pi / 2
b = ti.Vector([0, 1])
# print(apply_rot(2.0, b))
c = a.to_local(b)
d = a.to_world(c)
# should be the same
print("world b: ", b)
print("world d: ", d)
if __name__ == '__main__':
ti.init(ti.cpu, debug=True)
a = Transform2(ti.Vector([2.0, 4.0]), 15)
a.kern_materialize()
a.orientation = 100.0
a.localScale = 2.0
a.translation = ti.Vector([5.0, 2.0])
t = a.orientation
print(a.to_local(ti.Vector([2.0, 2.0])))
# print(a.translation)
# print(a.orientation)
# print(a.localScale)
#
# print(a._translation[None])
# print(a._orientation[None])
# print(a._localScale[None])
# test_rotate()
| 2.28125 | 2 |
Email Sender/email.py | Arbazkhan4712/Python---Programs | 1 | 12758469 | <gh_stars>1-10
import smtplib
to = input("Enter the recivers email id : \n")
content = input("Enter the content to send : \n")
def sendEmail(to, content):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login('<EMAIL>', 'Password')
server.sendmail('<EMAIL>', to, content)
server.close()
sendEmail(to, content) | 2.859375 | 3 |
webapp/test.py | a4242762/Novel-recommendation-system | 2 | 12758470 | from math import ceil
a = 1
b = 2
print(a/b)
print(ceil(1.6)) | 2.796875 | 3 |
generator.py | PetarPeychev/drunk-caves | 1 | 12758471 | import random
def generate(width, height, percentage):
map = [[1 for i in range(height)] for j in range(width)]
min_x = 1
max_x = width - 2
min_y = 1
max_y = height - 2
x = random.randint(min_x, max_x)
y = random.randint(min_y, max_y)
map_cells = width * height
filled_cells = 0
filled_percentage = 0
previous_delta_x = 0
previous_delta_y = 0
while filled_percentage <= percentage:
if map[x][y] == 1:
map[x][y] = 0
filled_cells += 1
filled_percentage = filled_cells / map_cells * 100
if random.choice([True, False]):
delta_x = random.choice([1, -1, previous_delta_x])
if x + delta_x < min_x or x + delta_x > max_x:
x = x - delta_x
previous_delta_x = -delta_x
else:
x = x + delta_x
previous_delta_x = delta_x
else:
delta_y = random.choice([1, -1, previous_delta_y])
if y + delta_y < min_y or y + delta_y > max_y:
y = y - delta_y
previous_delta_y = -delta_y
else:
y = y + delta_y
previous_delta_y = delta_y
return map
| 3.40625 | 3 |
niftynet/application/base_application.py | amh28/NIF | 0 | 12758472 | <reponame>amh28/NIF<gh_stars>0
# -*- coding: utf-8 -*-
"""
Interface of NiftyNet application
"""
import tensorflow as tf
from six import with_metaclass
from niftynet.layer.base_layer import TrainableLayer
from niftynet.utilities import util_common
class SingletonApplication(type):
_instances = None
def __call__(cls, *args, **kwargs):
if cls._instances is None:
cls._instances = \
super(SingletonApplication, cls).__call__(*args, **kwargs)
# else:
# raise RuntimeError('application instance already started.')
return cls._instances
class BaseApplication(with_metaclass(SingletonApplication, object)):
"""
BaseApplication represents an interface.
Each application type_str should support to use
the standard training and inference driver
"""
# defines name of the customised configuration file section
# the section collects all application specific user parameters
REQUIRED_CONFIG_SECTION = None
# boolean flag
is_training = True
# TF placeholders for switching network on the fly
is_validation = None
# input of the network
readers = None
sampler = None
# the network
net = None
# training the network
optimiser = None
gradient_op = None
# interpret network output
output_decoder = None
print("---------------------IN BASE APPLICATION")
def check_initialisations(self):
if self.readers is None:
raise NotImplementedError('reader should be initialised')
if self.sampler is None:
raise NotImplementedError('sampler should be initialised')
if self.net is None:
raise NotImplementedError('net should be initialised')
if not isinstance(self.net, TrainableLayer):
raise ValueError('self.net should be an instance'
' of niftynet.layer.TrainableLayer')
if self.optimiser is None and self.is_training:
raise NotImplementedError('optimiser should be initialised')
if self.gradient_op is None and self.is_training:
raise NotImplementedError('gradient_op should be initialised')
if self.output_decoder is None and not self.is_training:
raise NotImplementedError('output decoder should be initialised')
def initialise_dataset_loader(
self, data_param=None, task_param=None, data_partitioner=None):
"""
this function initialise self.readers
:param data_param: input modality specifications
:param task_param: contains task keywords for grouping data_param
:param data_partitioner:
specifies train/valid/infer splitting if needed
:return:
"""
raise NotImplementedError
def initialise_sampler(self):
"""
set samplers take self.reader as input and generates
sequences of ImageWindow that will be fed to the networks
This function sets self.sampler
"""
raise NotImplementedError
def initialise_network(self):
"""
This function create an instance of network
sets self.net
:return: None
"""
raise NotImplementedError
def connect_data_and_network(self,
outputs_collector=None,
gradients_collector=None):
"""
adding sampler output tensor and network tensors to the graph.
:param outputs_collector:
:param gradients_collector:
:return:
"""
raise NotImplementedError
def interpret_output(self, batch_output):
"""
implement output interpretations, e.g., save to hard drive
cache output windows
:param batch_output: outputs by running the tf graph
:return: True indicates the driver should continue the loop
False indicates the drive should stop
"""
raise NotImplementedError
def set_network_gradient_op(self, gradients):
"""
create gradient op by optimiser.apply_gradients
this function sets self.gradient_op
Override this function for more complex optimisations such as
using different optimisers for sub-networks.
:param gradients: processed gradients from the gradient_collector
:return:
"""
print("EEEEEEEEEEEEEEEntrando al set_network_gradient_op")
grad_list_depth = util_common.list_depth_count(gradients)
if grad_list_depth == 3:
# nested depth 3 means: gradients list is nested in terms of:
# list of networks -> list of network variables
self.gradient_op = [self.optimiser.apply_gradients(grad)
for grad in gradients]
elif grad_list_depth == 2:
# nested depth 2 means:
# gradients list is a list of variables
print("GGGGGGGGGGGGGGGGGGGGradients list is a list of variables, depth 2")
self.gradient_op = self.optimiser.apply_gradients(gradients)
else:
raise NotImplementedError(
'This app supports updating a network, or a list of networks.')
def stop(self):
for sampler_set in self.get_sampler():
for sampler in sampler_set:
if sampler:
sampler.close_all()
def set_iteration_update(self, iteration_message):
"""
At each iteration `application_driver` calls
`output = tf.session.run(variables_to_eval, feed_dict=data_dict)`
to evaluate TF graph elements, where
`variables_to_eval` and `data_dict` are retrieved from
`application_iteration.IterationMessage.ops_to_run` and
`application_iteration.IterationMessage.data_feed_dict`.
(in addition to the variables collected by output_collector;
see `application_driver.run_vars`)
This function (is called before `tf.session.run` by the
driver) provides an interface for accessing `variables_to_eval` and
`data_dict` at each iteration.
Override this function for more complex operations according to
`application_iteration.IterationMessage.current_iter`.
"""
if iteration_message.is_training:
iteration_message.data_feed_dict[self.is_validation] = False
elif iteration_message.is_validation:
iteration_message.data_feed_dict[self.is_validation] = True
def get_sampler(self):
return self.sampler
def add_validation_flag(self):
"""
add a TF placeholder for switching between train/valid graphs
:return:
"""
self.is_validation = \
tf.placeholder_with_default(False, [], 'is_validation')
| 2.03125 | 2 |
tests/unitary.py | cerealkill/pandapower_api | 1 | 12758473 | import unittest
from api.controllers.simulation import SimulationController
from api.server import rest
class SimulationControllerTest(unittest.TestCase):
def setUp(self):
self.controller = SimulationController()
def test_get_active_load_fails(self):
with self.assertRaises(Exception):
self.controller.active_load
def test_get_reactive_load_fails(self):
with self.assertRaises(Exception):
self.controller.reactive_load
def test_run_simulation(self):
active_load, reactive_load = self.controller.run_simulation()
self.assertEqual(active_load, 0.1)
self.assertEqual(reactive_load, 0.05)
def test_get_active_load(self):
self.controller.run_simulation()
self.assertEqual(self.controller.active_load, 0.1)
def test_get_reactive_load(self):
self.controller.run_simulation()
self.assertEqual(self.controller.reactive_load, 0.05)
class RestAPIv1Test(unittest.TestCase):
def setUp(self):
rest.config['TESTING'] = True
self.app = rest.test_client()
def test_get_active_load(self):
self.app.post('/api/v1/run')
simulation_res = self.app.get('/api/v1/simulation/0/load/active')
self.assertEqual(simulation_res.status_code, 200)
self.assertEqual(simulation_res.json, {'value': 0.1})
def test_get_reactive_load(self):
self.app.post('/api/v1/run')
simulation_res = self.app.get('/api/v1/simulation/0/load/reactive')
self.assertEqual(simulation_res.status_code, 200)
self.assertEqual(simulation_res.json, {'value': 0.05})
def test_get_simulation_by_id(self):
simulation_res = self.app.get('/api/v1/simulation/0')
self.assertEqual(simulation_res.status_code, 200)
self.assertEqual(simulation_res.json, {'id': 0, 'results': {'load': {'active': 0.1, 'reactive': 0.05}}})
def test_get_simulations_list(self):
simulation_res = self.app.get('/api/v1/simulations')
self.assertEqual(simulation_res.status_code, 200)
self.assertEqual(simulation_res.json, {'0': {'id': 0, 'results': {'load': {'active': 0.1, 'reactive': 0.05}}}})
def test_run_simulation(self):
simulation_res = self.app.post('/api/v1/simulations')
self.assertEqual(simulation_res.status_code, 201)
self.assertEqual(simulation_res.json, {'id': '10', 'results': {'load': {'active': 0.1, 'reactive': 0.05}}})
def test_run_simulation_raises(self):
simulation_res = self.app.post('/api/v1/simulations', data=dict(active=0.9, reactive=0.8))
self.assertEqual(simulation_res.status_code, 417)
def test_put_simulation_replace(self):
self.app.put('/api/v1/simulation/9', data=dict(active=0.4, reactive=0.01))
simulation_res = self.app.put('/api/v1/simulation/9', data=dict(active=0.2, reactive=0.02))
self.assertEqual(simulation_res.status_code, 201)
self.assertEqual(simulation_res.json, {'id': '9', 'results': {'load': {'active': 0.2, 'reactive': 0.02}}})
def test_put_simulation_new(self):
simulation_res = self.app.put('/api/v1/simulation/8', data=dict(active=0.2, reactive=0.02))
self.assertEqual(simulation_res.status_code, 201)
def test_put_simulation_raises(self):
simulation_res = self.app.put('/api/v1/simulation/1', data=dict(active=0.9, reactive=0.8))
self.assertEqual(simulation_res.status_code, 417)
def test_delete_simulation(self):
self.app.put('/api/v1/simulation/5', data=dict(active=0.2, reactive=0.02))
simulation_res = self.app.delete('/api/v1/simulation/5')
self.assertEqual(simulation_res.status_code, 204)
| 2.828125 | 3 |
test_nfp.py | chendu2017/irregular_packing | 3 | 12758474 | <filename>test_nfp.py
# -*- coding: utf-8 -*-
from nfp_function import Nester, content_loop_rate
from settings import BIN_WIDTH, BIN_NORMAL, BIN_CUT_BIG, LOOP_TIME
import ast
import pandas as pd
lingjian = pd.read_csv('.\L0002_lingjian.csv')
if __name__ == '__main__':
n = Nester()
s = [ast.literal_eval(contour) for contour in lingjian['外轮廓']]
n.add_objects(
#[ [ [0,0],[0,20],[20,0] ],
# [ [20,0],[20,10],[30,10],[30,0] ],
# [[10,0],[20,0],[20,10],[10,10]]
# ]
#[
#[[10,0],[20,0],[20,10],[10,10]],
#[[10,20],[20,20],[15,30]],
#[[30,10],[50,10],[35,15],[40,30],[30,30]]
#]
s[:50]#,lingjian['零件号'].values
)
if n.shapes_max_length > BIN_WIDTH:
BIN_NORMAL[2][0] = n.shapes_max_length
BIN_NORMAL[3][0] = n.shapes_max_length
# 选择面布
n.add_container(BIN_NORMAL)
# 运行计算
n.run() #进行一次未生成子代的计算
# 设计退出条件
res_list = list()
best = n.best
# 放置在一个容器里面
# set_target_loop(best, n) # T6
# 循环特定次数
content_loop_rate(best, n, loop_time=LOOP_TIME-1) # T7 , T4
| 2.359375 | 2 |
packages/simcore-sdk/tests/integration/test_node_data_data_manager.py | elisabettai/osparc-simcore | 0 | 12758475 | <reponame>elisabettai/osparc-simcore
# pylint:disable=unused-variable
# pylint:disable=unused-argument
# pylint:disable=redefined-outer-name
# pylint:disable=too-many-arguments
import hashlib
import os
from pathlib import Path
from typing import Callable, Set, Tuple
from uuid import uuid4
import pytest
from simcore_sdk.node_data import data_manager
pytest_simcore_core_services_selection = [
"migration",
"postgres",
"storage",
]
pytest_simcore_ops_services_selection = ["minio", "adminer"]
# UTILS
def _remove_file_or_folder(file_or_folder: Path) -> None:
if file_or_folder.is_file():
file_or_folder.unlink()
assert file_or_folder.exists() is False
file_or_folder.touch()
assert file_or_folder.exists() is True
else:
os.system(f"rm -rf {file_or_folder}")
assert file_or_folder.exists() is False
file_or_folder.mkdir(parents=True, exist_ok=True)
assert file_or_folder.exists() is True
def _get_file_hashes_in_path(path_to_hash: Path) -> Set[Tuple[Path, str]]:
def _hash_path(path: Path):
sha256_hash = hashlib.sha256()
with open(path, "rb") as f:
# Read and update hash string value in blocks of 4K
for byte_block in iter(lambda: f.read(4096), b""):
sha256_hash.update(byte_block)
return sha256_hash.hexdigest()
def _relative_path(root_path: Path, full_path: Path) -> Path:
return full_path.relative_to(root_path)
if path_to_hash.is_file():
return {(_relative_path(path_to_hash, path_to_hash), _hash_path(path_to_hash))}
return {
(_relative_path(path_to_hash, path), _hash_path(path))
for path in path_to_hash.rglob("*")
}
def _make_file_with_content(file_path: Path) -> Path:
content = " ".join(f"{uuid4()}" for x in range(10))
file_path.write_text(content)
assert file_path.exists()
return file_path
def _make_dir_with_files(temp_dir: Path, file_count: int) -> Path:
assert file_count > 0
content_dir_path = temp_dir / f"content_dir{uuid4()}"
content_dir_path.mkdir(parents=True, exist_ok=True)
for _ in range(file_count):
_make_file_with_content(file_path=content_dir_path / f"{uuid4()}_test.txt")
return content_dir_path
# FIXTURES
@pytest.fixture
def node_uuid() -> str:
return f"{uuid4()}"
@pytest.fixture
def temp_dir(tmpdir: Path) -> Path:
return Path(tmpdir)
@pytest.fixture
def random_tmp_dir_generator(temp_dir: Path) -> Callable[[bool], Path]:
def generator(is_file: bool) -> Path:
random_dir_path = temp_dir / f"{uuid4()}"
random_dir_path.mkdir(parents=True, exist_ok=True)
if is_file:
file_path = random_dir_path / f"{uuid4()}_test.txt"
file_path.touch()
return file_path
return random_dir_path
return generator
@pytest.fixture
def file_content_path(temp_dir: Path) -> Path:
return _make_file_with_content(file_path=temp_dir / f"{uuid4()}_test.txt")
@pytest.fixture
def dir_content_one_file_path(temp_dir: Path) -> Path:
return _make_dir_with_files(temp_dir, file_count=1)
@pytest.fixture
def dir_content_multiple_files_path(temp_dir: Path) -> Path:
return _make_dir_with_files(temp_dir, file_count=2)
@pytest.mark.parametrize(
"content_path",
[
# pylint: disable=no-member
pytest.lazy_fixture("file_content_path"),
pytest.lazy_fixture("dir_content_one_file_path"),
pytest.lazy_fixture("dir_content_multiple_files_path"),
],
)
async def test_valid_upload_download(
filemanager_cfg: None,
content_path: Path,
user_id: int,
project_id: str,
node_uuid: str,
):
await data_manager.push(
user_id=user_id,
project_id=project_id,
node_uuid=node_uuid,
file_or_folder=content_path,
)
uploaded_hashes = _get_file_hashes_in_path(content_path)
_remove_file_or_folder(content_path)
await data_manager.pull(
user_id=user_id,
project_id=project_id,
node_uuid=node_uuid,
file_or_folder=content_path,
)
downloaded_hashes = _get_file_hashes_in_path(content_path)
assert uploaded_hashes == downloaded_hashes
@pytest.mark.parametrize(
"content_path",
[
# pylint: disable=no-member
pytest.lazy_fixture("file_content_path"),
pytest.lazy_fixture("dir_content_one_file_path"),
pytest.lazy_fixture("dir_content_multiple_files_path"),
],
)
async def test_valid_upload_download_saved_to(
filemanager_cfg: None,
content_path: Path,
user_id: int,
project_id: str,
node_uuid: str,
random_tmp_dir_generator: Callable,
):
await data_manager.push(
user_id=user_id,
project_id=project_id,
node_uuid=node_uuid,
file_or_folder=content_path,
)
uploaded_hashes = _get_file_hashes_in_path(content_path)
_remove_file_or_folder(content_path)
new_destination = random_tmp_dir_generator(is_file=content_path.is_file())
await data_manager.pull(
user_id=user_id,
project_id=project_id,
node_uuid=node_uuid,
file_or_folder=content_path,
save_to=new_destination,
)
downloaded_hashes = _get_file_hashes_in_path(new_destination)
assert uploaded_hashes == downloaded_hashes
| 1.992188 | 2 |
bibchex/checks/basic.py | tinloaf/bibchex | 5 | 12758476 | <reponame>tinloaf/bibchex
import aiohttp
import re
import logging
from bibchex.config import Config
LOGGER = logging.getLogger(__name__)
class DOIChecker(object):
NAME = "doi"
def __init__(self):
self._cfg = Config()
async def check(self, entry):
nodoi = entry.options.get('nodoi', False)
if nodoi:
return []
doi = entry.data.get('doi')
if not doi:
suggested_doi = entry.get_doi()
details = ""
if suggested_doi:
details = "Suggested DOI: {}".format(suggested_doi)
elif entry.get_suggested_dois():
details = "Suggested DOIs: {}".format(
entry.get_suggested_dois())
return [(type(self).NAME, "Missing DOI", details)]
return []
class DOIURLChecker(object):
NAME = "doi_url"
DOI_RE = re.compile(r'https?://(dx\.)?doi.org/.*')
def __init__(self):
self._cfg = Config()
async def check(self, entry):
url = entry.data.get('url')
problems = []
if not url:
return []
m = DOIURLChecker.DOI_RE.match(url)
if m:
problems.append((type(self).NAME, "URL points to doi.org", ""))
return problems
class DeadURLChecker(object):
NAME = "dead_url"
def __init__(self):
self._cfg = Config()
async def check(self, entry):
url = entry.data.get('url')
problems = []
if not url:
return []
try:
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
status = resp.status
if status >= 400 or status < 200:
problems.append((type(self).NAME, "URL seems inaccessible",
"Accessing URL '{}' gives status code {}"
.format(url, status)))
except aiohttp.client_exceptions.ClientConnectorError:
problems.append((type(self).NAME, "Could not connect to host",
f"Could not connect to the host for URL {url}."))
except AssertionError:
# For some reasons, aiohttp sometimes fails with an assertion instead of a
# ClientConnectError.
LOGGER.warn(f"Connecting to {url} triggers assertion")
problems.append((type(self).NAME, "Could not connect to host",
f"Could not connect to the host for URL {url}."))
return problems
class RequiredFieldsChecker(object):
NAME = "required_fields"
def __init__(self):
self._cfg = Config()
async def check(self, entry):
problems = []
required_fields = self._cfg.get('required', entry)
for field_raw in required_fields:
field = field_raw.lower()
if field == 'author':
# Special handling
if len(entry.authors) == 0:
problems.append(
(type(self).NAME,
"Required field 'author' missing", ""))
elif field == 'editor':
# Special handling
if len(entry.editors) == 0:
problems.append(
(type(self).NAME,
"Required field 'editor' missing", ""))
else:
if field not in entry.data:
problems.append(
(type(self).NAME,
"Required field '{}' missing".format(field), ""))
return problems
class ForbiddenFieldsChecker(object):
NAME = "forbidden_fields"
def __init__(self):
self._cfg = Config()
async def check(self, entry):
problems = []
forbidden_fields = self._cfg.get('forbidden', entry, [])
for field_raw in forbidden_fields:
field = field_raw.lower()
if field == 'author':
# Special handling
if len(entry.authors) > 0:
problems.append(
(type(self).NAME,
"Forbidden field 'author' present", ""))
if field == 'editor':
# Special handling
if len(entry.editors) > 0:
problems.append(
(type(self).NAME,
"Forbidden field 'editor' present", ""))
else:
if field in entry.data:
problems.append(
(type(self).NAME,
"Forbidden field '{}' present".format(field), ""))
return problems
| 2.359375 | 2 |
proof_constructor/test_prettyprint.py | eileenwang1/Python-Prolog-Proof-Constuctor | 2 | 12758477 | <filename>proof_constructor/test_prettyprint.py
import sys
import os
from prologpy.solver import Solver
def test_prettyprint(filename):
# read file, get rules text and goal text
rules_text=""
goal_text = ""
is_goal = 0
f = open(filename, "r")
line = f.readline()
while line:
if line=="\n":
is_goal = 1
if is_goal:
goal_text+=line
else:
rules_text+=line
line = f.readline()
f.close()
# output_file = filename+"_output"
solver = Solver(rules_text)
rules = solver.database.rules
to_print = ["{}".format(i) for i in rules]
print("<rules rules={}>".format(to_print))
print("</rules>")
solution = solver.find_solutions(goal_text)
test_prettyprint(sys.argv[1]) | 3.40625 | 3 |
stegnography.py | Pineapple-1/open-cv | 1 | 12758478 | import cv2 as cv
import numpy as np
cameraman = cv.imread('./Photos/cameraman.tif')
saturn = cv.imread('./Photos/saturn.png')
saturn = cv.resize(saturn, (cameraman.shape[0], cameraman.shape[1]), interpolation=cv.INTER_AREA)
# we can split channels by using this
cameraman = cv.cvtColor(cameraman,cv.COLOR_BGR2GRAY)
b, g, r = cv.split(saturn)
r = r >> 2
r = r << 2
g = g >> 2
g = g << 2
b = b >> 2
b = b << 2
cr = cameraman >> 6
cg = cameraman << 2
cg = cg >> 6
cb = cameraman << 4
cb= cb >> 6
# bitwise or perfoms
r = cv.bitwise_or(r, cr)
g = cv.bitwise_or(g, cg)
b = cv.bitwise_or(b, cb)
merged = cv.merge([b,g,r])
b,g,r=cv.split(merged)
redpart = r<<6
greenpart = g<<6
greenpart = greenpart>>2
bluepart= b<<6
bluepart = b>>4
# if we use bit wise or here the imgae gets distorted.
# if we use merge here the image gets red.
image=bluepart|greenpart|redpart
cv.imshow('saturn',merged)
cv.imshow('hiddenimage',image)
cv.waitKey(0)
| 3.0625 | 3 |
Yank/reports/notebook.py | kmboehm/yank | 0 | 12758479 | """
YANK Health Report Notebook formatter
This module handles all the figure formatting and processing to minimize the code shown in the Health Report Jupyter
Notebook. All data processing and analysis is handled by the main multistate.analyzers package,
mainly image formatting is passed here.
"""
import os
import yaml
import numpy as np
from scipy import interpolate
from matplotlib import pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from matplotlib import gridspec
from pymbar import MBAR
import seaborn as sns
from simtk import unit as units
from .. import analyze
kB = units.BOLTZMANN_CONSTANT_kB * units.AVOGADRO_CONSTANT_NA
class HealthReportData(analyze.ExperimentAnalyzer):
"""
Class which houses the data used for the notebook and the generation of all plots including formatting
"""
def general_simulation_data(self):
"""
General purpose simulation data on number of iterations, number of states, and number of atoms.
This just prints out this data in a regular, formatted pattern.
"""
general = self.get_general_simulation_data()
iterations = {}
nreplicas = {}
nstates = {}
natoms = {}
for phase_name in self.phase_names:
iterations[phase_name] = general[phase_name]['iterations']
nreplicas[phase_name] = general[phase_name]['nreplicas']
nstates[phase_name] = general[phase_name]['nstates']
natoms[phase_name] = general[phase_name]['natoms']
leniter = max(len('Iterations'), *[len(str(i)) for i in iterations.values()]) + 2
lenreplica = max(len('Replicas'), *[len(str(i)) for i in nreplicas.values()]) + 2
lenstates = max(len('States'), *[len(str(i)) for i in nstates.values()]) + 2
lennatoms = max(len('Num Atoms'), *[len(str(i)) for i in natoms.values()]) + 2
lenleftcol = max(len('Phase'), *[len(phase) for phase in self.phase_names]) + 2
lines = []
headstring = ''
headstring += ('{:^' + '{}'.format(lenleftcol) + '}').format('Phase') + '|'
headstring += ('{:^' + '{}'.format(leniter) + '}').format('Iterations') + '|'
headstring += ('{:^' + '{}'.format(lenreplica) + '}').format('Replicas') + '|'
headstring += ('{:^' + '{}'.format(lenstates) + '}').format('States') + '|'
headstring += ('{:^' + '{}'.format(lennatoms) + '}').format('Num Atoms')
lines.append(headstring)
lenline = len(headstring)
topdiv = '=' * lenline
lines.append(topdiv)
for phase in self.phase_names:
phasestring = ''
phasestring += ('{:^' + '{}'.format(lenleftcol) + '}').format(phase) + '|'
phasestring += ('{:^' + '{}'.format(leniter) + '}').format(iterations[phase]) + '|'
phasestring += ('{:^' + '{}'.format(lenreplica) + '}').format(nreplicas[phase]) + '|'
phasestring += ('{:^' + '{}'.format(lenstates) + '}').format(nstates[phase]) + '|'
phasestring += ('{:^' + '{}'.format(lennatoms) + '}').format(natoms[phase])
lines.append(phasestring)
lines.append('-' * lenline)
for line in lines:
print(line)
def generate_equilibration_plots(self, discard_from_start=1):
"""
Create the equilibration scatter plots showing the trend lines, correlation time,
and number of effective samples
Returns
-------
equilibration_figure : matplotlib.figure
Figure showing the equilibration between both phases
"""
serial_data = self.get_equilibration_data(discard_from_start=discard_from_start)
# Adjust figure size
plt.rcParams['figure.figsize'] = 20, 6 * self.nphases * 2
plot_grid = gridspec.GridSpec(self.nphases, 1) # Vertical distribution
equilibration_figure = plt.figure()
# Add some space between the figures
equilibration_figure.subplots_adjust(hspace=0.4)
for i, phase_name in enumerate(self.phase_names):
phase_data = serial_data[phase_name]
sub_grid = gridspec.GridSpecFromSubplotSpec(3, 1, subplot_spec=plot_grid[i])
# FIRST SUBPLOT: energy scatter
# Attach subplot to figure
p = equilibration_figure.add_subplot(sub_grid[0])
# Data assignment for plot generation
y = self.u_ns[phase_name]
N = y.size
x = np.arange(N)
# Scatter plot
p.plot(x, y, 'k.')
# Smoothed equilibrium, this is very crude but it works for large data
tck = interpolate.splrep(x, y, k=5, s=N * 1E7)
smoothed = interpolate.splev(x, tck, der=0)
p.plot(x, smoothed, '-r', linewidth=4)
# Nequil line
ylim = p.get_ylim()
p.vlines(self.nequils[phase_name], *ylim, colors='b', linewidth=4)
p.set_ylim(*ylim) # Reset limits in case vlines expanded them
p.set_xlim([0, N])
# Set text
p.set_title(phase_name + " phase", fontsize=20)
p.set_ylabel(r'$\Sigma_n u_n$ in kT', fontsize=20)
# Extra info in text boxes
subsample_string = 'Subsample Rate: {0:.2f}\nDecorelated Samples: {1:d}'.format(self.g_ts[phase_name], int(
np.floor(self.Neff_maxs[phase_name])))
if np.mean([0, N]) > self.nequils[phase_name]:
txt_horz = 'right'
txt_xcoord = 0.95
else:
txt_horz = 'left'
txt_xcoord = 0.05
smooth_index = {'right': -1, 'left': 0} # condition y
if np.mean(ylim) > smoothed[smooth_index[txt_horz]]:
txt_vert = 'top'
txt_ycoord = 0.95
else:
txt_vert = 'bottom'
txt_ycoord = 0.05
p.text(txt_xcoord, txt_ycoord,
subsample_string,
verticalalignment=txt_vert, horizontalalignment=txt_horz,
transform=p.transAxes,
fontsize=15,
bbox={'alpha': 1.0, 'facecolor': 'white'}
)
# SECOND SUBPLOT: g_t trace
i_t = phase_data['iterations_considered']
g_i = phase_data['subsample_rate_by_iterations_considered']
n_effective_i = phase_data['effective_samples_by_iterations_considered']
x = i_t
g = equilibration_figure.add_subplot(sub_grid[1])
g.plot(x, g_i)
ylim = g.get_ylim()
g.vlines(self.nequils[phase_name], *ylim, colors='b', linewidth=4)
g.set_ylim(*ylim) # Reset limits in case vlines expanded them
g.set_xlim([0, N])
g.set_ylabel(r'Decor. Time', fontsize=20)
# THRID SUBPLOT: Neff trace
ne = equilibration_figure.add_subplot(sub_grid[2])
ne.plot(x, n_effective_i)
ylim = ne.get_ylim()
ne.vlines(self.nequils[phase_name], *ylim, colors='b', linewidth=4)
ne.set_ylim(*ylim) # Reset limits in case vlines expanded them
ne.set_xlim([0, N])
ne.set_ylabel(r'Neff samples', fontsize=20)
ne.set_xlabel(r'Iteration', fontsize=20)
return equilibration_figure
def compute_rmsds(self):
return NotImplementedError("This function is still a prototype and has segfault issues, please disable for now")
# """Compute the RMSD of the ligand and the receptor by state"""
# if not self._equilibration_run:
# raise RuntimeError("Cannot run RMSD without first running the equilibration. Please run the "
# "corresponding function/cell first!")
# plt.rcParams['figure.figsize'] = 20, 6 * self.nphases * 2
# rmsd_figure, subplots = plt.subplots(2, 1)
# for i, phase_name in enumerate(self.phase_names):
# if phase_name not in self._serialized_data:
# self._serialized_data[phase_name] = {}
# self._serialized_data[phase_name]['rmsd'] = {}
# serial = self._serialized_data[phase_name]['rmsd']
# analyzer = self.analyzers[phase_name]
# reporter = analyzer.reporter
# metadata = reporter.read_dict('metadata')
# topography = mmtools.utils.deserialize(metadata['topography'])
# topology = topography.topology
# test_positions = reporter.read_sampler_states(0, analysis_particles_only=True)[0]
# atoms_analysis = test_positions.positions.shape[0]
# topology = topology.subset(range(atoms_analysis))
# iterations = self.iterations[phase_name]
# positions = np.zeros([iterations, atoms_analysis, 3])
# for j in range(iterations):
# sampler_states = reporter.read_sampler_states(j, analysis_particles_only=True)
# # Deconvolute
# thermo_states = reporter.read_replica_thermodynamic_states(iteration=j)
# sampler = sampler_states[thermo_states[0]]
# positions[j, :, :] = sampler.positions
# trajectory = md.Trajectory(positions, topology)
# rmsd_ligand = md.rmsd(trajectory, trajectory, frame=0, atom_indices=topography.ligand_atoms)
# rmsd_recpetor = md.rmsd(trajectory, trajectory, frame=0, atom_indices=topography.receptor_atoms)
# serial['ligand'] = rmsd_ligand.tolist()
# serial['receptor'] = rmsd_recpetor.tolist()
# p = subplots[i]
# x = range(iterations)
# p.set_title(phase_name + " phase", fontsize=20)
# p.plot(x, rmsd_ligand, label='Ligand RMSD')
# p.plot(x, rmsd_recpetor, label='Receptor RMSD')
# p.legend()
# p.set_xlim([0, iterations])
# ylim = p.get_ylim()
# p.set_ylim([0, ylim[-1]])
# p.set_ylabel(r'RMSD (nm)', fontsize=20)
# p.set_xlabel(r'Iteration', fontsize=20)
# return rmsd_figure
def generate_decorrelation_plots(self, decorrelation_threshold=0.1):
"""
Parameters
----------
decorrelation_threshold : float, Optional
When number of decorrelated samples is less than this percent of the total number of samples, raise a
warning. Default: `0.1`.
Returns
-------
decorrelation_figure : matplotlib.figure
Figure showing the decorrelation pie chart data of how the samples are distributed between equilibration,
correlation, and decorrelation.
"""
if not self._general_run or not self._equilibration_run:
raise RuntimeError("Cannot generate decorrelation data without general simulation data and equilibration "
"data first! Please run the corresponding functions/cells.")
# This will exist because of _equilibration_run
eq_data = self.get_equilibration_data(discard_from_start=self._n_discarded)
# Readjust figure output
plt.rcParams['figure.figsize'] = 20, 8
decorrelation_figure = plt.figure()
decorrelation_figure.subplots_adjust(wspace=0.2)
plotkeys = [100 + (10 * self.nphases) + (i + 1) for i in range(self.nphases)] # Horizontal distribution
for phase_name, plotid in zip(self.phase_names, plotkeys):
serial = eq_data[phase_name]
# Create subplot
p = decorrelation_figure.add_subplot(plotid)
labels = ['Decorrelated', 'Correlated', 'Equilibration']
colors = ['#2c7bb6', '#abd0e0', '#fdae61'] # blue, light blue, and orange
explode = [0, 0, 0.0]
n_iter = self.iterations[phase_name]
decor = serial['count_decorrelated_samples']
eq = serial['count_total_equilibration_samples']
cor = serial['count_correlated_samples']
dat = np.array([decor, cor, eq]) / float(n_iter)
if dat[0] <= decorrelation_threshold:
colors[0] = '#d7191c' # Red for warning
patch, txt, autotxt = p.pie(
dat,
explode=explode,
labels=labels,
colors=colors,
autopct='%1.1f%%',
shadow=True,
startangle=90 + 360 * dat[0] / 2, # put center of decor at top
counterclock=False,
textprops={'fontsize': 14}
)
for tx in txt: # This is the only way I have found to adjust the label font size
tx.set_fontsize(18)
p.axis('equal')
p.set_title(phase_name + " phase", fontsize=20, y=1.05)
# Generate warning if need be
if dat[0] <= decorrelation_threshold:
p.text(
0.5, -0.1,
"Warning! Fewer than {0:.1f}% samples are\nequilibrated and decorelated!".format(
decorrelation_threshold * 100),
verticalalignment='bottom', horizontalalignment='center',
transform=p.transAxes,
fontsize=20,
color='red',
bbox={'alpha': 1.0, 'facecolor': 'white', 'lw': 0, 'pad': 0}
)
return decorrelation_figure
def generate_mixing_plot(self, mixing_cutoff=0.05, mixing_warning_threshold=0.90, cmap_override=None):
"""
Generate the state diffusion mixing map as an image instead of array of number
Parameters
----------
mixing_cutoff : float
Minimal level of mixing percent from state `i` to `j` that will be plotted.
Domain: [0,1]
Default: 0.05.
mixing_warning_threshold : float
Level of mixing where transition from state `i` to `j` generates a warning based on percent of total swaps.
Domain (mixing_cutoff, 1)
Default: `0.90`.
cmap_override : None or string
Override the custom colormap that is used for this figure in case the figure is too white or you wnat to
do something besides the custom one here.
Returns
-------
mixing_figure : matplotlib.figure
Figure showing the state mixing as a color diffusion map instead of grid of numbers
"""
mixing_serial = self.get_mixing_data()
# Set up image
mixing_figure, subplots = plt.subplots(1, 2)
# Create custom cmap goes from white to pure blue, goes red if the threshold is reached
if mixing_cutoff is None:
mixing_cutoff = 0
if mixing_warning_threshold <= mixing_cutoff:
raise ValueError("mixing_warning_threshold must be larger than mixing_cutoff")
if (mixing_warning_threshold > 1 or mixing_cutoff > 1 or
mixing_warning_threshold < 0 or mixing_cutoff < 0):
raise ValueError("mixing_warning_threshold and mixing_cutoff must be between [0,1]")
cdict = {'red': ((0.0, 1.0, 1.0),
(mixing_cutoff, 1.0, 1.0),
(mixing_warning_threshold, 0.0, 0.0),
(mixing_warning_threshold, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 1.0, 1.0),
(mixing_cutoff, 1.0, 1.0),
(mixing_warning_threshold, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 1.0, 1.0),
(mixing_cutoff, 1.0, 1.0),
(mixing_warning_threshold, 1.0, 1.0),
(mixing_warning_threshold, 0.0, 0.0),
(1.0, 0.0, 0.0))}
if cmap_override is not None:
# Use this cmap instead if your results are too diffuse to see over the white
cmap = plt.get_cmap("Blues")
else:
cmap = LinearSegmentedColormap('BlueWarnRed', cdict)
# Plot a diffusing mixing map for each phase.
for phase_name, subplot in zip(self.phase_names, subplots):
serial = mixing_serial[phase_name]
transition_matrix = serial['transitions']
eigenvalues = serial['eigenvalues']
statistical_inefficiency = serial['stat_inefficiency']
# Without vmin/vmax, the image normalizes the values to mixing_data.max
# which screws up the warning colormap.
# Can also use norm=NoNorm(), but that makes the colorbar manipulation fail.
output_image = subplot.imshow(transition_matrix, aspect='equal',
cmap=cmap, vmin=0, vmax=1)
# Add colorbar.
decimal = 2 # Precision setting
nticks = 11
# The color bar has to be configured independently of the source image
# or it cant be truncated to only show the data. i.e. it would instead
# go 0-1 always.
ubound = np.min([np.around(transition_matrix.max(), decimals=decimal) + 10 ** (-decimal), 1])
lbound = np.max([np.around(transition_matrix.min(), decimals=decimal) - 10 ** (-decimal), 0])
boundslice = np.linspace(lbound, ubound, 256)
cbar = plt.colorbar(output_image, ax=subplot, orientation='vertical',
boundaries=boundslice,
values=boundslice[1:],
format='%.{}f'.format(decimal))
# Update ticks.
ticks = np.linspace(lbound, ubound, nticks)
cbar.set_ticks(ticks)
# Title: Perron eigenvalue, equilibration time and statistical inefficiency.
perron_eigenvalue = eigenvalues[1]
title_txt = (phase_name + ' phase\n'
'Perron eigenvalue: {}\n'
'State equilibration timescale: ~{} iterations\n')
if perron_eigenvalue >= 1:
title_txt = title_txt.format('1.0', '$\infty$')
else:
equilibration_timescale = 1.0 / (1.0 - perron_eigenvalue)
title_txt = title_txt.format('{:.5f}', '{:.1f}')
title_txt = title_txt.format(perron_eigenvalue, equilibration_timescale)
title_txt += 'Replica state index statistical inefficiency: {:.3f}'.format(statistical_inefficiency)
subplot.set_title(title_txt, fontsize=20, y=1.05)
# Display Warning.
if np.any(transition_matrix >= mixing_warning_threshold):
subplot.text(
0.5, -0.2,
("Warning!\nThere were states that less than {0:.2f}% swaps!\n"
"Consider adding more states!".format((1 - mixing_warning_threshold) * 100)),
verticalalignment='bottom', horizontalalignment='center',
transform=subplot.transAxes,
fontsize=20,
color='red',
bbox={'alpha': 1.0, 'facecolor': 'white', 'lw': 0, 'pad': 0}
)
return mixing_figure
def generate_replica_mixing_plot(self, phase_stacked_replica_plots=False):
"""
Generate the replica trajectory mixing plots. Show the state of each replica as a function of simulation time
Parameters
----------
phase_stacked_replica_plots : boolean, Default: False
Determine if the phases should be shown side by side, or one on top of the other. If True, the two phases
will be shown with phase 1 on top and phase 2 on bottom.
Returns
-------
replica_figure : matplotlib.figure
Figure showing the replica state trajectories for both phases
"""
# Determine max number of states
max_n_replicas = 0
for i, phase_name in enumerate(self.phase_names):
# Gather state NK
analyzer = self.analyzers[phase_name]
n_replicas = analyzer.reporter.n_replicas
max_n_replicas = max(n_replicas, max_n_replicas)
# Create Parent Gridspec
if phase_stacked_replica_plots:
plot_grid = gridspec.GridSpec(2, 1)
plt.rcParams['figure.figsize'] = 20, max_n_replicas * 6
else:
plot_grid = gridspec.GridSpec(1, 2)
plt.rcParams['figure.figsize'] = 20, max_n_replicas * 3
replica_figure = plt.figure()
for i, phase_name in enumerate(self.phase_names):
# Gather state NK
analyzer = self.analyzers[phase_name]
sampled_energies, _, _, state_kn = analyzer.read_energies()
n_replicas, n_states, n_iterations = sampled_energies.shape
# Create subgrid
sub_grid = gridspec.GridSpecFromSubplotSpec(n_replicas, 1, subplot_spec=plot_grid[i])
# Loop through all states
for replica_index in range(n_replicas):
# Add plot
plot = replica_figure.add_subplot(sub_grid[replica_index])
# Actually plot
plot.plot(state_kn[replica_index, :], 'k.')
# Format plot
plot.set_yticks([])
plot.set_xlim([0, n_iterations])
plot.set_ylim([0, n_states])
if replica_index < n_replicas - 1:
plot.set_xticks([])
plot.set_ylabel('{}'.format(replica_index))
if replica_index == 0: # Title
plot.set_title('{} phase'.format(phase_name), fontsize=20)
self._replica_mixing_run = True
return replica_figure
def generate_free_energy(self):
fe_data = self.get_experiment_free_energy_data()
delta_f = fe_data['free_energy_diff']
delta_h = fe_data['enthalpy_diff']
delta_f_err = fe_data['free_energy_diff_error']
delta_h_err = fe_data['enthalpy_diff_error']
delta_f_unit = fe_data['free_energy_diff_unit']
delta_h_unit = fe_data['enthalpy_diff_unit']
delta_f_err_unit = fe_data['free_energy_diff_error_unit']
delta_h_err_unit = fe_data['enthalpy_diff_error_unit']
# Attempt to guess type of calculation
calculation_type = ''
for phase in self.phase_names:
if 'complex' in phase:
calculation_type = ' of binding'
elif 'solvent1' in phase:
calculation_type = ' of solvation'
print('Free energy{:<13}: {:9.3f} +- {:.3f} kT ({:.3f} +- {:.3f} kcal/mol)'.format(
calculation_type, delta_f, delta_f_err, delta_f_unit / units.kilocalories_per_mole,
delta_f_err_unit / units.kilocalories_per_mole))
for phase in self.phase_names:
delta_f_phase = fe_data[phase]['free_energy_diff']
delta_f_err_phase = fe_data[phase]['free_energy_diff_error']
detla_f_ssc_phase = fe_data[phase]['free_energy_diff_standard_state_correction']
print('DeltaG {:<17}: {:9.3f} +- {:.3f} kT'.format(phase, delta_f_phase,
delta_f_err_phase))
if detla_f_ssc_phase != 0.0:
print('DeltaG {:<17}: {:18.3f} kT'.format('standard state correction', detla_f_ssc_phase))
print('')
print('Enthalpy{:<16}: {:9.3f} +- {:.3f} kT ({:.3f} +- {:.3f} kcal/mol)'.format(
calculation_type, delta_h, delta_h_err, delta_h_unit / units.kilocalories_per_mole,
delta_h_err_unit / units.kilocalories_per_mole)
)
def free_energy_trace(self, discard_from_start=1, n_trace=10):
"""
Trace the free energy by keeping fewer and fewer samples in both forward and reverse direction
Returns
-------
free_energy_trace_figure : matplotlib.figure
Figure showing the equilibration between both phases
"""
trace_spacing = 1.0/n_trace
def format_trace_plot(plot: plt.Axes, trace_forward: np.ndarray, trace_reverse: np.ndarray):
x = np.arange(n_trace + 1)[1:] * trace_spacing * 100
plot.errorbar(x, trace_forward[:, 0], yerr=2 * trace_forward[:, 1], ecolor='b',
elinewidth=0, mec='none', mew=0, linestyle='None',
zorder=10)
plot.plot(x, trace_forward[:, 0], 'b-', marker='o', mec='b', mfc='w', label='Forward', zorder=20,)
plot.errorbar(x, trace_reverse[:, 0], yerr=2 * trace_reverse[:, 1], ecolor='r',
elinewidth=0, mec='none', mew=0, linestyle='None',
zorder=10)
plot.plot(x, trace_reverse[:, 0], 'r-', marker='o', mec='r', mfc='w', label='Reverse', zorder=20)
y_fill_upper = [trace_forward[-1, 0] + 2 * trace_forward[-1, 1]] * 2
y_fill_lower = [trace_forward[-1, 0] - 2 * trace_forward[-1, 1]] * 2
xlim = [0, 100]
plot.fill_between(xlim, y_fill_lower, y_fill_upper, color='orchid', zorder=5)
plot.set_xlim(xlim)
plot.legend()
plot.set_xlabel("% Samples Analyzed", fontsize=20)
plot.set_ylabel(r"$\Delta G$ in kcal/mol", fontsize=20)
# Adjust figure size
plt.rcParams['figure.figsize'] = 15, 6 * (self.nphases + 1) * 2
plot_grid = gridspec.GridSpec(self.nphases + 1, 1) # Vertical distribution
free_energy_trace_figure = plt.figure()
# Add some space between the figures
free_energy_trace_figure.subplots_adjust(hspace=0.4)
traces = {}
for i, phase_name in enumerate(self.phase_names):
traces[phase_name] = {}
if phase_name not in self._serialized_data:
self._serialized_data[phase_name] = {}
serial = self._serialized_data[phase_name]
if "free_energy" not in serial:
serial["free_energy"] = {}
serial = serial["free_energy"]
free_energy_trace_f = np.zeros([n_trace, 2], dtype=float)
free_energy_trace_r = np.zeros([n_trace, 2], dtype=float)
p = free_energy_trace_figure.add_subplot(plot_grid[i])
analyzer = self.analyzers[phase_name]
kcal = analyzer.kT / units.kilocalorie_per_mole
# Data crunching to get timeseries
sampled_energies, _, _, states = analyzer.read_energies()
n_replica, n_states, _ = sampled_energies.shape
# Sample at index 0 is actually the minimized structure and NOT from the equilibrium distribution
# This throws off all of the equilibrium data
sampled_energies = sampled_energies[:, :, discard_from_start:]
states = states[:, discard_from_start:]
total_iterations = sampled_energies.shape[-1]
for trace_factor in range(n_trace, 0, -1): # Reverse order tracing
trace_percent = trace_spacing*trace_factor
j = trace_factor - 1 # Indexing
kept_iterations = int(np.ceil(trace_percent*total_iterations))
u_forward = sampled_energies[:, :, :kept_iterations]
s_forward = states[:, :kept_iterations]
u_reverse = sampled_energies[:, :, -1:-kept_iterations-1:-1]
s_reverse = states[:, -1:-kept_iterations - 1:-1]
for energy_sub, state_sub, storage in [
(u_forward, s_forward, free_energy_trace_f), (u_reverse, s_reverse, free_energy_trace_r)]:
u_n = analyzer.get_effective_energy_timeseries(energies=energy_sub,
replica_state_indices=state_sub)
i_t, g_i, n_effective_i = analyze.multistate.get_equilibration_data_per_sample(u_n)
i_max = n_effective_i.argmax()
number_equilibrated = i_t[i_max]
g_t = g_i[i_max]
if not self.use_full_trajectory:
energy_sub = analyze.multistate.utils.remove_unequilibrated_data(energy_sub,
number_equilibrated,
-1)
state_sub = analyze.multistate.utils.remove_unequilibrated_data(state_sub,
number_equilibrated, -1)
energy_sub = analyze.multistate.utils.subsample_data_along_axis(energy_sub, g_t, -1)
state_sub = analyze.multistate.utils.subsample_data_along_axis(state_sub, g_t, -1)
samples_per_state = np.zeros([n_states], dtype=int)
unique_sampled_states, counts = np.unique(state_sub, return_counts=True)
# Assign those counts to the correct range of states
samples_per_state[unique_sampled_states] = counts
mbar = MBAR(energy_sub, samples_per_state)
fe_data = mbar.getFreeEnergyDifferences(compute_uncertainty=True)
# Trap theta_ij output
try:
fe, dfe, _ = fe_data
except ValueError:
fe, dfe = fe_data
ref_i, ref_j = analyzer.reference_states
storage[j, :] = fe[ref_i, ref_j] * kcal, dfe[ref_i, ref_j] * kcal
format_trace_plot(p, free_energy_trace_f, free_energy_trace_r)
p.set_title("{} Phase".format(phase_name.title()), fontsize=20)
traces[phase_name]['f'] = free_energy_trace_f
traces[phase_name]['r'] = free_energy_trace_r
serial['forward'] = free_energy_trace_f.tolist()
serial['reverse'] = free_energy_trace_r.tolist()
# Finally handle last combined plot
combined_trace_f = np.zeros([n_trace, 2], dtype=float)
combined_trace_r = np.zeros([n_trace, 2], dtype=float)
for phase_name in self.phase_names:
phase_f = traces[phase_name]['f']
phase_r = traces[phase_name]['r']
combined_trace_f[:, 0] += phase_f[:, 0]
combined_trace_f[:, 1] = np.sqrt(combined_trace_f[:, 1]**2 + phase_f[:, 1]**2)
combined_trace_r[:, 0] += phase_r[:, 0]
combined_trace_r[:, 1] = np.sqrt(combined_trace_r[:, 1] ** 2 + phase_r[:, 1] ** 2)
p = free_energy_trace_figure.add_subplot(plot_grid[-1])
format_trace_plot(p, combined_trace_f, combined_trace_r)
p.set_title("Combined Phases", fontsize=20)
return free_energy_trace_figure
def restraint_distributions_plot(self):
ENERGIES_IDX = 0
DISTANCES_IDX = 1
# Find the phase that defines the restraint energies and distances.
for phase_name in self.phase_names:
analyzer = self.analyzers[phase_name]
lambda1_data = list(analyzer._get_restraint_energies_distances_at_state(0))
if len(lambda1_data[ENERGIES_IDX]) != 0:
break
# Check if we have a restraint at all.
if len(lambda1_data[ENERGIES_IDX]) == 0:
print('The restraint unbiasing step was not performed for this calculation.')
return
# The restraint distances are not computed if there's no distance cutoff.
lambda0_data = list(analyzer._get_restraint_energies_distances_at_state(-1))
cutoffs = list(analyzer._get_restraint_cutoffs())
xlabels = ['Restraint energies [kT]', 'Restraint distances [Angstrom]']
for data in [lambda1_data, lambda0_data, cutoffs, xlabels]:
if len(lambda1_data[DISTANCES_IDX]) == 0:
del data[DISTANCES_IDX]
elif isinstance(data[DISTANCES_IDX], units.Quantity):
# Convert the distances into the units that will be printed.
data[DISTANCES_IDX] /= units.angstroms
# Plot the lambda=1 and lambda=0 restraints data.
figure, axes = plt.subplots(ncols=len(lambda1_data), figsize=(20, 10))
if len(lambda1_data) == 1:
axes = [axes]
for ax, lambda1, lambda0 in zip(axes, lambda1_data, lambda0_data):
sns.distplot(lambda1, ax=ax, kde=False, label='bound state')
sns.distplot(lambda0, ax=ax, kde=False, label='non-interacting state')
# Plot the cutoffs used for the restraint unbiasing.
for ax, cutoff in zip(axes, cutoffs):
limits = ax.get_ylim()
ax.plot([cutoff for _ in range(100)], np.linspace(limits[0], limits[1]/2, num=100))
# Labels and legend.
for i, (ax, xlabel) in enumerate(zip(axes, xlabels)):
ax.set_xlabel(xlabel)
if i == 0:
ax.set_ylabel('Number of samples')
elif i == 1:
ax.legend(loc='upper right')
return figure
def report_version(self):
current_version = self._serialized_data['yank_version']
print("Rendered with YANK Version {}".format(current_version))
def dump_serial_data(self, path):
"""Dump the serialized data to YAML file"""
true_path, ext = os.path.splitext(path)
if not ext: # empty string check
ext = '.yaml'
true_path += ext
with open(true_path, 'w') as f:
f.write(yaml.dump(self._serialized_data))
| 2.890625 | 3 |
zerver/lib/test_helpers.py | k0nsl/zulip | 0 | 12758480 | from django.test import TestCase
from zerver.lib.initial_password import initial_password
from zerver.lib.db import TimeTrackingCursor
from zerver.lib import cache
from zerver.lib import event_queue
from zerver.worker import queue_processors
from zerver.lib.actions import (
check_send_message, create_stream_if_needed, do_add_subscription,
get_display_recipient,
)
from zerver.models import (
get_realm,
get_user_profile_by_email,
resolve_email_to_domain,
Client,
Message,
Realm,
Recipient,
Stream,
Subscription,
UserMessage,
)
import base64
import os
import re
import time
import ujson
import urllib
from contextlib import contextmanager
API_KEYS = {}
@contextmanager
def stub(obj, name, f):
old_f = getattr(obj, name)
setattr(obj, name, f)
yield
setattr(obj, name, old_f)
@contextmanager
def simulated_queue_client(client):
real_SimpleQueueClient = queue_processors.SimpleQueueClient
queue_processors.SimpleQueueClient = client
yield
queue_processors.SimpleQueueClient = real_SimpleQueueClient
@contextmanager
def tornado_redirected_to_list(lst):
real_event_queue_process_notification = event_queue.process_notification
event_queue.process_notification = lst.append
yield
event_queue.process_notification = real_event_queue_process_notification
@contextmanager
def simulated_empty_cache():
cache_queries = []
def my_cache_get(key, cache_name=None):
cache_queries.append(('get', key, cache_name))
return None
def my_cache_get_many(keys, cache_name=None):
cache_queries.append(('getmany', keys, cache_name))
return None
old_get = cache.cache_get
old_get_many = cache.cache_get_many
cache.cache_get = my_cache_get
cache.cache_get_many = my_cache_get_many
yield cache_queries
cache.cache_get = old_get
cache.cache_get_many = old_get_many
@contextmanager
def queries_captured():
'''
Allow a user to capture just the queries executed during
the with statement.
'''
queries = []
def wrapper_execute(self, action, sql, params=()):
start = time.time()
try:
return action(sql, params)
finally:
stop = time.time()
duration = stop - start
queries.append({
'sql': self.mogrify(sql, params),
'time': "%.3f" % duration,
})
old_execute = TimeTrackingCursor.execute
old_executemany = TimeTrackingCursor.executemany
def cursor_execute(self, sql, params=()):
return wrapper_execute(self, super(TimeTrackingCursor, self).execute, sql, params)
TimeTrackingCursor.execute = cursor_execute
def cursor_executemany(self, sql, params=()):
return wrapper_execute(self, super(TimeTrackingCursor, self).executemany, sql, params)
TimeTrackingCursor.executemany = cursor_executemany
yield queries
TimeTrackingCursor.execute = old_execute
TimeTrackingCursor.executemany = old_executemany
def find_key_by_email(address):
from django.core.mail import outbox
key_regex = re.compile("accounts/do_confirm/([a-f0-9]{40})>")
for message in reversed(outbox):
if address in message.to:
return key_regex.search(message.body).groups()[0]
def message_ids(result):
return set(message['id'] for message in result['messages'])
def message_stream_count(user_profile):
return UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
count()
def most_recent_usermessage(user_profile):
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('-message')
return query[0] # Django does LIMIT here
def most_recent_message(user_profile):
usermessage = most_recent_usermessage(user_profile)
return usermessage.message
def get_user_messages(user_profile):
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('message')
return [um.message for um in query]
class DummyObject:
pass
class DummyTornadoRequest:
def __init__(self):
self.connection = DummyObject()
self.connection.stream = DummyStream()
class DummyHandler(object):
def __init__(self, assert_callback):
self.assert_callback = assert_callback
self.request = DummyTornadoRequest()
# Mocks RequestHandler.async_callback, which wraps a callback to
# handle exceptions. We return the callback as-is.
def async_callback(self, cb):
return cb
def write(self, response):
raise NotImplemented
def zulip_finish(self, response, *ignore):
if self.assert_callback:
self.assert_callback(response)
class DummySession(object):
session_key = "0"
class DummyStream:
def closed(self):
return False
class POSTRequestMock(object):
method = "POST"
def __init__(self, post_data, user_profile, assert_callback=None):
self.REQUEST = self.POST = post_data
self.user = user_profile
self._tornado_handler = DummyHandler(assert_callback)
self.session = DummySession()
self._log_data = {}
self.META = {'PATH_INFO': 'test'}
self._log_data = {}
class AuthedTestCase(TestCase):
# Helper because self.client.patch annoying requires you to urlencode
def client_patch(self, url, info={}, **kwargs):
info = urllib.urlencode(info)
return self.client.patch(url, info, **kwargs)
def client_put(self, url, info={}, **kwargs):
info = urllib.urlencode(info)
return self.client.put(url, info, **kwargs)
def client_delete(self, url, info={}, **kwargs):
info = urllib.urlencode(info)
return self.client.delete(url, info, **kwargs)
def login(self, email, password=None):
if password is None:
password = initial_password(email)
return self.client.post('/accounts/login/',
{'username':email, 'password':password})
def register(self, username, password, domain="zulip.com"):
self.client.post('/accounts/home/',
{'email': username + "@" + domain})
return self.submit_reg_form_for_user(username, password, domain=domain)
def submit_reg_form_for_user(self, username, password, domain="zulip.com"):
"""
Stage two of the two-step registration process.
If things are working correctly the account should be fully
registered after this call.
"""
return self.client.post('/accounts/register/',
{'full_name': username, 'password': password,
'key': find_key_by_email(username + '@' + domain),
'terms': True})
def get_api_key(self, email):
if email not in API_KEYS:
API_KEYS[email] = get_user_profile_by_email(email).api_key
return API_KEYS[email]
def api_auth(self, email):
credentials = "%s:%s" % (email, self.get_api_key(email))
return {
'HTTP_AUTHORIZATION': 'Basic ' + base64.b64encode(credentials)
}
def get_streams(self, email):
"""
Helper function to get the stream names for a user
"""
user_profile = get_user_profile_by_email(email)
subs = Subscription.objects.filter(
user_profile = user_profile,
active = True,
recipient__type = Recipient.STREAM)
return [get_display_recipient(sub.recipient) for sub in subs]
def send_message(self, sender_name, recipient_list, message_type,
content="test content", subject="test", **kwargs):
sender = get_user_profile_by_email(sender_name)
if message_type == Recipient.PERSONAL:
message_type_name = "private"
else:
message_type_name = "stream"
if isinstance(recipient_list, basestring):
recipient_list = [recipient_list]
(sending_client, _) = Client.objects.get_or_create(name="<NAME>")
return check_send_message(
sender, sending_client, message_type_name, recipient_list, subject,
content, forged=False, forged_timestamp=None,
forwarder_user_profile=sender, realm=sender.realm, **kwargs)
def get_old_messages(self, anchor=1, num_before=100, num_after=100):
post_params = {"anchor": anchor, "num_before": num_before,
"num_after": num_after}
result = self.client.post("/json/get_old_messages", dict(post_params))
data = ujson.loads(result.content)
return data['messages']
def users_subscribed_to_stream(self, stream_name, realm_domain):
realm = get_realm(realm_domain)
stream = Stream.objects.get(name=stream_name, realm=realm)
recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
subscriptions = Subscription.objects.filter(recipient=recipient, active=True)
return [subscription.user_profile for subscription in subscriptions]
def assert_json_success(self, result):
"""
Successful POSTs return a 200 and JSON of the form {"result": "success",
"msg": ""}.
"""
self.assertEqual(result.status_code, 200, result)
json = ujson.loads(result.content)
self.assertEqual(json.get("result"), "success")
# We have a msg key for consistency with errors, but it typically has an
# empty value.
self.assertIn("msg", json)
return json
def get_json_error(self, result, status_code=400):
self.assertEqual(result.status_code, status_code)
json = ujson.loads(result.content)
self.assertEqual(json.get("result"), "error")
return json['msg']
def assert_json_error(self, result, msg, status_code=400):
"""
Invalid POSTs return an error status code and JSON of the form
{"result": "error", "msg": "reason"}.
"""
self.assertEqual(self.get_json_error(result, status_code=status_code), msg)
def assert_length(self, queries, count, exact=False):
actual_count = len(queries)
if exact:
return self.assertTrue(actual_count == count,
"len(%s) == %s, != %s" % (queries, actual_count, count))
return self.assertTrue(actual_count <= count,
"len(%s) == %s, > %s" % (queries, actual_count, count))
def assert_json_error_contains(self, result, msg_substring):
self.assertIn(msg_substring, self.get_json_error(result))
def fixture_data(self, type, action, file_type='json'):
return open(os.path.join(os.path.dirname(__file__),
"../fixtures/%s/%s_%s.%s" % (type, type, action,file_type))).read()
# Subscribe to a stream directly
def subscribe_to_stream(self, email, stream_name, realm=None):
realm = get_realm(resolve_email_to_domain(email))
stream, _ = create_stream_if_needed(realm, stream_name)
user_profile = get_user_profile_by_email(email)
do_add_subscription(user_profile, stream, no_log=True)
# Subscribe to a stream by making an API request
def common_subscribe_to_streams(self, email, streams, extra_post_data = {}, invite_only=False):
post_data = {'subscriptions': ujson.dumps([{"name": stream} for stream in streams]),
'invite_only': ujson.dumps(invite_only)}
post_data.update(extra_post_data)
result = self.client.post("/api/v1/users/me/subscriptions", post_data, **self.api_auth(email))
return result
def send_json_payload(self, email, url, payload, stream_name=None, **post_params):
if stream_name != None:
self.subscribe_to_stream(email, stream_name)
result = self.client.post(url, payload, **post_params)
self.assert_json_success(result)
# Check the correct message was sent
msg = Message.objects.filter().order_by('-id')[0]
self.assertEqual(msg.sender.email, email)
self.assertEqual(get_display_recipient(msg.recipient), stream_name)
return msg
| 1.804688 | 2 |
webtemplate_dbca/tests/urls.py | parksandwildlife/webtemplate | 0 | 12758481 | <reponame>parksandwildlife/webtemplate
from django.urls import path
from .views import TestPage, TestDBCAPage, TestPage2, TestInternetPage, TestB4Page, TestB5Page
urlpatterns = [
path('test/', TestPage.as_view(), name='test_page'),
path('test-dbca/', TestDBCAPage.as_view(), name='test_dbca_page'),
path('test2/', TestPage2.as_view(), name='test_page_2'),
path('test-internet/', TestInternetPage.as_view(), name='test_internet_page'),
path('test-b4/', TestB4Page.as_view(), name='test_page_b4'),
path('test-b5/', TestB5Page.as_view(), name='test_page_b5'),
# We need the following named URLs to render the base template.
path('login/', TestPage.as_view(), name='login'),
path('logout/', TestPage.as_view(), name='logout'),
]
| 2.03125 | 2 |
Projetos/desafios/desa112/utilidades/dado/__init__.py | LucasDeAndradeMarin/Marin-python-training | 0 | 12758482 | <filename>Projetos/desafios/desa112/utilidades/dado/__init__.py<gh_stars>0
def leiaDinheiro(msg):
valido = False
valor = 0
while not valido:
din = str(input(msg)).strip().replace(',', '.')
if din.isalpha() or din == '':
print(f'\033[0;31mERRO! \"{din}\" é um preço inválido!\033[m')
else:
valido = True
return float(din) | 3.0625 | 3 |
QPtomographer/_version.py | Tomographer/QPtomographer | 2 | 12758483 | <reponame>Tomographer/QPtomographer<filename>QPtomographer/_version.py
version = "1.0"
version_maj = 1
version_min = 0
| 1.007813 | 1 |
scrumate/core/issue/views.py | nahidsaikat/scrumate | 1 | 12758484 | from django.conf import settings
from django.contrib import messages
from django.shortcuts import render, redirect, reverse, get_object_or_404
from django.contrib.auth.decorators import login_required, permission_required
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.views.generic import DetailView
from scrumate.core.issue.filters import IssueFilter
from scrumate.core.issue.models import Issue
from scrumate.core.issue.forms import IssueForm
from scrumate.core.project.models import Project
from scrumate.general.views import HistoryList
@login_required(login_url='/login/')
def issue_list(request, project_id, **kwargs):
issue_filter = IssueFilter(request.GET, queryset=Issue.objects.filter(project_id=project_id).order_by('-id'))
issue_list = issue_filter.qs
page = request.GET.get('page', 1)
paginator = Paginator(issue_list, settings.PAGE_SIZE)
try:
issues = paginator.page(page)
except PageNotAnInteger:
issues = paginator.page(1)
except EmptyPage:
issues = paginator.page(paginator.num_pages)
project = Project.objects.get(pk=project_id)
return render(request, 'core/issue_list.html', {'issues': issues, 'filter': issue_filter, 'project': project})
@login_required(login_url='/login/')
def issue_add(request, project_id, **kwargs):
if request.method == 'POST':
form = IssueForm(request.POST)
if form.is_valid():
issue = form.save(commit=False)
issue.project_id = project_id
issue.save()
messages.success(request, "Issue added successfully!")
return redirect('issue_list', permanent=True, project_id=project_id)
else:
form = IssueForm()
title = 'New Issue'
project = Project.objects.get(pk=project_id)
return render(request, 'core/common_add.html', {'form': form, 'title': title, 'list_url_name': 'issue_list', 'project': project})
@login_required(login_url='/login/')
def issue_edit(request, project_id, pk, **kwargs):
instance = get_object_or_404(Issue, id=pk)
form = IssueForm(request.POST or None, instance=instance)
if form.is_valid():
form.save()
messages.success(request, "Issue updated successfully!")
return redirect('issue_list', project_id=project_id)
title = 'Edit Issue'
project = Project.objects.get(pk=project_id)
return render(request, 'core/common_add.html', {'form': form, 'title': title, 'list_url_name': 'issue_list', 'project': project})
@login_required(login_url='/login/')
@permission_required('core.update_issue_status', raise_exception=True)
def update_issue_status(request, project_id, pk, **kwargs):
instance = get_object_or_404(Issue, id=pk)
form = IssueForm(request.POST or None, instance=instance)
if request.POST:
status = request.POST.get('status')
instance.status = status
instance.save()
messages.success(request, "Issue status updated successfurrl!")
return redirect('issue_list', project_id=project_id)
return render(request, 'includes/single_field.html', {
'field': form.visible_fields()[5],
'title': 'Update Status',
'url': reverse('issue_list', kwargs={'project_id': project_id}),
'project': Project.objects.get(pk=project_id),
'base_template': 'general/index_project_view.html'
})
class IssueHistoryList(HistoryList):
permission_required = 'scrumate.core.issue_history'
def get_issue_id(self):
return self.kwargs.get('pk')
def get_project_id(self):
return self.kwargs.get('project_id')
def get_queryset(self):
return Issue.history.filter(id=self.get_issue_id())
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
project = Project.objects.get(pk=self.get_project_id())
issue = Issue.objects.get(pk=self.get_issue_id())
context['project'] = project
context['title'] = f'History of {issue.name}'
context['back_url'] = reverse('issue_list', kwargs={'project_id': self.get_project_id()})
context['base_template'] = 'general/index_project_view.html'
return context
class IssueDetailView(DetailView):
queryset = Issue.objects.all()
template_name = 'includes/generic_view.html'
context_object_name = 'issue'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
project_id = self.kwargs.get('project_id')
instance = self.get_object()
context['form'] = IssueForm(instance=instance)
context['edit_url'] = reverse('issue_edit', kwargs={'project_id': project_id, 'pk': instance.pk})
context['list_url'] = reverse('issue_list', kwargs={'project_id': project_id})
context['title'] = instance.name
context['project'] = Project.objects.get(pk=project_id)
context['base_template'] = 'general/index_project_view.html'
return context
| 2.046875 | 2 |
Lab_3/cae.py | gradampl/MONTY | 0 | 12758485 | <gh_stars>0
digits = {'0': 'zero', '1': 'jeden', '2': 'dwa', '3': 'trzy', '4': 'cztery', '5': 'pięć', \
'6': 'sześć', '7': 'siedem', '8': 'osiem', '9': 'dziewięć'}
user_input = input("Wpisz cyfry, a ja zamienię je na słowa: \n")
for i in range(len(user_input)):
if user_input[i] not in '0123456789':
continue
else:
print(digits[user_input[i]], end=' ')
| 3.4375 | 3 |
autograd/numpy/numpy_wrapper.py | cassianobecker/tgcn | 0 | 12758486 | <reponame>cassianobecker/tgcn
from __future__ import absolute_import
import types
import warnings
from autograd.extend import primitive, notrace_primitive
import numpy as _np
import autograd.builtins as builtins
from numpy.core.einsumfunc import _parse_einsum_input
notrace_functions = [
_np.ndim, _np.shape, _np.iscomplexobj, _np.result_type
]
def wrap_intdtype(cls):
class IntdtypeSubclass(cls):
__new__ = notrace_primitive(cls.__new__)
return IntdtypeSubclass
def wrap_namespace(old, new):
unchanged_types = {float, int, type(None), type}
int_types = {_np.int, _np.int8, _np.int16, _np.int32, _np.int64, _np.integer}
function_types = {_np.ufunc, types.FunctionType, types.BuiltinFunctionType}
for name, obj in old.items():
if obj in notrace_functions:
new[name] = notrace_primitive(obj)
elif type(obj) in function_types:
new[name] = primitive(obj)
elif type(obj) is type and obj in int_types:
new[name] = wrap_intdtype(obj)
elif type(obj) in unchanged_types:
new[name] = obj
wrap_namespace(_np.__dict__, globals())
# ----- Special treatment of list-input functions -----
@primitive
def concatenate_args(axis, *args):
return _np.concatenate(args, axis).view(ndarray)
concatenate = lambda arr_list, axis=0 : concatenate_args(axis, *arr_list)
vstack = row_stack = lambda tup: concatenate([atleast_2d(_m) for _m in tup], axis=0)
def hstack(tup):
arrs = [atleast_1d(_m) for _m in tup]
if arrs[0].ndim == 1:
return concatenate(arrs, 0)
return concatenate(arrs, 1)
def column_stack(tup):
arrays = []
for v in tup:
arr = array(v)
if arr.ndim < 2:
arr = array(arr, ndmin=2).T
arrays.append(arr)
return concatenate(arrays, 1)
def array(A, *args, **kwargs):
t = builtins.type(A)
if t in (list, tuple):
return array_from_args(args, kwargs, *map(array, A))
else:
return _array_from_scalar_or_array(args, kwargs, A)
def wrap_if_boxes_inside(raw_array, slow_op_name=None):
if raw_array.dtype is _np.dtype('O'):
if slow_op_name:
warnings.warn("{0} is slow for array inputs. "
"np.concatenate() is faster.".format(slow_op_name))
return array_from_args((), {}, *raw_array.ravel()).reshape(raw_array.shape)
else:
return raw_array
@primitive
def _array_from_scalar_or_array(array_args, array_kwargs, scalar):
return _np.array(scalar, *array_args, **array_kwargs)
@primitive
def array_from_args(array_args, array_kwargs, *args):
return _np.array(args, *array_args, **array_kwargs)
def select(condlist, choicelist, default=0):
raw_array = _np.select(list(condlist), list(choicelist), default=default)
return array(list(raw_array.ravel())).reshape(raw_array.shape)
def stack(arrays, axis=0):
# this code is basically copied from numpy/core/shape_base.py's stack
# we need it here because we want to re-implement stack in terms of the
# primitives defined in this file
arrays = [array(arr) for arr in arrays]
if not arrays:
raise ValueError('need at least one array to stack')
shapes = set(arr.shape for arr in arrays)
if len(shapes) != 1:
raise ValueError('all input arrays must have the same shape')
result_ndim = arrays[0].ndim + 1
if not -result_ndim <= axis < result_ndim:
raise IndexError('axis {0} out of bounds [-{1}, {1})'.format(axis, result_ndim))
if axis < 0:
axis += result_ndim
sl = (slice(None),) * axis + (None,)
return concatenate([arr[sl] for arr in arrays], axis=axis)
def append(arr, values, axis=None):
# this code is basically copied from numpy/lib/function_base.py's append
arr = array(arr)
if axis is None:
if ndim(arr) != 1:
arr = ravel(arr)
values = ravel(array(values))
axis = ndim(arr) - 1
return concatenate((arr, values), axis=axis)
# ----- Enable functions called using [] ----
class r_class():
def __getitem__(self, args):
raw_array = _np.r_[args]
return wrap_if_boxes_inside(raw_array, slow_op_name = "r_")
r_ = r_class()
class c_class():
def __getitem__(self, args):
raw_array = _np.c_[args]
return wrap_if_boxes_inside(raw_array, slow_op_name = "c_")
c_ = c_class()
# ----- misc -----
@primitive
def make_diagonal(D, offset=0, axis1=0, axis2=1):
# Numpy doesn't offer a complement to np.diagonal: a function to create new
# diagonal arrays with extra dimensions. We need such a function for the
# gradient of np.diagonal and it's also quite handy to have. So here it is.
if not (offset==0 and axis1==-1 and axis2==-2):
raise NotImplementedError("Currently make_diagonal only supports offset=0, axis1=-1, axis2=-2")
# We use a trick: calling np.diagonal returns a view on the original array,
# so we can modify it in-place. (only valid for numpy version >= 1.10.)
new_array = _np.zeros(D.shape + (D.shape[-1],))
new_array_diag = _np.diagonal(new_array, offset=0, axis1=-1, axis2=-2)
new_array_diag.flags.writeable = True
new_array_diag[:] = D
return new_array
@notrace_primitive
def metadata(A):
return _np.shape(A), _np.ndim(A), _np.result_type(A), _np.iscomplexobj(A)
@notrace_primitive
def parse_einsum_input(*args):
return _parse_einsum_input(args)
@primitive
def _astype(A, dtype, order='filter_order', casting='unsafe', subok=True, copy=True):
return A.astype(dtype, order, casting, subok, copy)
| 2.0625 | 2 |
venv/lib/python3.8/site-packages/black/handle_ipynb_magics.py | matthewalunni/saas-template-django | 1 | 12758487 | <reponame>matthewalunni/saas-template-django
"""Functions to process IPython magics with."""
from functools import lru_cache
import dataclasses
import ast
from typing import Dict, List, Tuple, Optional
import secrets
import sys
import collections
if sys.version_info >= (3, 10):
from typing import TypeGuard
else:
from typing_extensions import TypeGuard
from black.report import NothingChanged
from black.output import out
TRANSFORMED_MAGICS = frozenset(
(
"get_ipython().run_cell_magic",
"get_ipython().system",
"get_ipython().getoutput",
"get_ipython().run_line_magic",
)
)
TOKENS_TO_IGNORE = frozenset(
(
"ENDMARKER",
"NL",
"NEWLINE",
"COMMENT",
"DEDENT",
"UNIMPORTANT_WS",
"ESCAPED_NL",
)
)
NON_PYTHON_CELL_MAGICS = frozenset(
(
"%%bash",
"%%html",
"%%javascript",
"%%js",
"%%latex",
"%%markdown",
"%%perl",
"%%ruby",
"%%script",
"%%sh",
"%%svg",
"%%writefile",
)
)
@dataclasses.dataclass(frozen=True)
class Replacement:
mask: str
src: str
@lru_cache()
def jupyter_dependencies_are_installed(*, verbose: bool, quiet: bool) -> bool:
try:
import IPython # noqa:F401
import tokenize_rt # noqa:F401
except ModuleNotFoundError:
if verbose or not quiet:
msg = (
"Skipping .ipynb files as Jupyter dependencies are not installed.\n"
"You can fix this by running ``pip install black[jupyter]``"
)
out(msg)
return False
else:
return True
def remove_trailing_semicolon(src: str) -> Tuple[str, bool]:
"""Remove trailing semicolon from Jupyter notebook cell.
For example,
fig, ax = plt.subplots()
ax.plot(x_data, y_data); # plot data
would become
fig, ax = plt.subplots()
ax.plot(x_data, y_data) # plot data
Mirrors the logic in `quiet` from `IPython.core.displayhook`, but uses
``tokenize_rt`` so that round-tripping works fine.
"""
from tokenize_rt import (
src_to_tokens,
tokens_to_src,
reversed_enumerate,
)
tokens = src_to_tokens(src)
trailing_semicolon = False
for idx, token in reversed_enumerate(tokens):
if token.name in TOKENS_TO_IGNORE:
continue
if token.name == "OP" and token.src == ";":
del tokens[idx]
trailing_semicolon = True
break
if not trailing_semicolon:
return src, False
return tokens_to_src(tokens), True
def put_trailing_semicolon_back(src: str, has_trailing_semicolon: bool) -> str:
"""Put trailing semicolon back if cell originally had it.
Mirrors the logic in `quiet` from `IPython.core.displayhook`, but uses
``tokenize_rt`` so that round-tripping works fine.
"""
if not has_trailing_semicolon:
return src
from tokenize_rt import src_to_tokens, tokens_to_src, reversed_enumerate
tokens = src_to_tokens(src)
for idx, token in reversed_enumerate(tokens):
if token.name in TOKENS_TO_IGNORE:
continue
tokens[idx] = token._replace(src=token.src + ";")
break
else: # pragma: nocover
raise AssertionError(
"INTERNAL ERROR: Was not able to reinstate trailing semicolon. "
"Please report a bug on https://github.com/psf/black/issues. "
) from None
return str(tokens_to_src(tokens))
def mask_cell(src: str) -> Tuple[str, List[Replacement]]:
"""Mask IPython magics so content becomes parseable Python code.
For example,
%matplotlib inline
'foo'
becomes
"25716f358c32750e"
'foo'
The replacements are returned, along with the transformed code.
"""
replacements: List[Replacement] = []
try:
ast.parse(src)
except SyntaxError:
# Might have IPython magics, will process below.
pass
else:
# Syntax is fine, nothing to mask, early return.
return src, replacements
from IPython.core.inputtransformer2 import TransformerManager
transformer_manager = TransformerManager()
transformed = transformer_manager.transform_cell(src)
transformed, cell_magic_replacements = replace_cell_magics(transformed)
replacements += cell_magic_replacements
transformed = transformer_manager.transform_cell(transformed)
transformed, magic_replacements = replace_magics(transformed)
if len(transformed.splitlines()) != len(src.splitlines()):
# Multi-line magic, not supported.
raise NothingChanged
replacements += magic_replacements
return transformed, replacements
def get_token(src: str, magic: str) -> str:
"""Return randomly generated token to mask IPython magic with.
For example, if 'magic' was `%matplotlib inline`, then a possible
token to mask it with would be `"43fdd17f7e5ddc83"`. The token
will be the same length as the magic, and we make sure that it was
not already present anywhere else in the cell.
"""
assert magic
nbytes = max(len(magic) // 2 - 1, 1)
token = secrets.token_hex(nbytes)
counter = 0
while token in src: # pragma: nocover
token = secrets.token_hex(nbytes)
counter += 1
if counter > 100:
raise AssertionError(
"INTERNAL ERROR: Black was not able to replace IPython magic. "
"Please report a bug on https://github.com/psf/black/issues. "
f"The magic might be helpful: {magic}"
) from None
if len(token) + 2 < len(magic):
token = f"{token}."
return f'"{token}"'
def replace_cell_magics(src: str) -> Tuple[str, List[Replacement]]:
"""Replace cell magic with token.
Note that 'src' will already have been processed by IPython's
TransformerManager().transform_cell.
Example,
get_ipython().run_cell_magic('t', '-n1', 'ls =!ls\\n')
becomes
"a794."
ls =!ls
The replacement, along with the transformed code, is returned.
"""
replacements: List[Replacement] = []
tree = ast.parse(src)
cell_magic_finder = CellMagicFinder()
cell_magic_finder.visit(tree)
if cell_magic_finder.cell_magic is None:
return src, replacements
if cell_magic_finder.cell_magic.header.split()[0] in NON_PYTHON_CELL_MAGICS:
raise NothingChanged
mask = get_token(src, cell_magic_finder.cell_magic.header)
replacements.append(Replacement(mask=mask, src=cell_magic_finder.cell_magic.header))
return f"{mask}\n{cell_magic_finder.cell_magic.body}", replacements
def replace_magics(src: str) -> Tuple[str, List[Replacement]]:
"""Replace magics within body of cell.
Note that 'src' will already have been processed by IPython's
TransformerManager().transform_cell.
Example, this
get_ipython().run_line_magic('matplotlib', 'inline')
'foo'
becomes
"5e67db56d490fd39"
'foo'
The replacement, along with the transformed code, are returned.
"""
replacements = []
magic_finder = MagicFinder()
magic_finder.visit(ast.parse(src))
new_srcs = []
for i, line in enumerate(src.splitlines(), start=1):
if i in magic_finder.magics:
offsets_and_magics = magic_finder.magics[i]
if len(offsets_and_magics) != 1: # pragma: nocover
raise AssertionError(
f"Expecting one magic per line, got: {offsets_and_magics}\n"
"Please report a bug on https://github.com/psf/black/issues."
)
col_offset, magic = (
offsets_and_magics[0].col_offset,
offsets_and_magics[0].magic,
)
mask = get_token(src, magic)
replacements.append(Replacement(mask=mask, src=magic))
line = line[:col_offset] + mask
new_srcs.append(line)
return "\n".join(new_srcs), replacements
def unmask_cell(src: str, replacements: List[Replacement]) -> str:
"""Remove replacements from cell.
For example
"9b20"
foo = bar
becomes
%%time
foo = bar
"""
for replacement in replacements:
src = src.replace(replacement.mask, replacement.src)
return src
def _is_ipython_magic(node: ast.expr) -> TypeGuard[ast.Attribute]:
"""Check if attribute is IPython magic.
Note that the source of the abstract syntax tree
will already have been processed by IPython's
TransformerManager().transform_cell.
"""
return (
isinstance(node, ast.Attribute)
and isinstance(node.value, ast.Call)
and isinstance(node.value.func, ast.Name)
and node.value.func.id == "get_ipython"
)
@dataclasses.dataclass(frozen=True)
class CellMagic:
header: str
body: str
@dataclasses.dataclass
class CellMagicFinder(ast.NodeVisitor):
"""Find cell magics.
Note that the source of the abstract syntax tree
will already have been processed by IPython's
TransformerManager().transform_cell.
For example,
%%time\nfoo()
would have been transformed to
get_ipython().run_cell_magic('time', '', 'foo()\\n')
and we look for instances of the latter.
"""
cell_magic: Optional[CellMagic] = None
def visit_Expr(self, node: ast.Expr) -> None:
"""Find cell magic, extract header and body."""
if (
isinstance(node.value, ast.Call)
and _is_ipython_magic(node.value.func)
and node.value.func.attr == "run_cell_magic"
):
args = []
for arg in node.value.args:
assert isinstance(arg, ast.Str)
args.append(arg.s)
header = f"%%{args[0]}"
if args[1]:
header += f" {args[1]}"
self.cell_magic = CellMagic(header=header, body=args[2])
self.generic_visit(node)
@dataclasses.dataclass(frozen=True)
class OffsetAndMagic:
col_offset: int
magic: str
@dataclasses.dataclass
class MagicFinder(ast.NodeVisitor):
"""Visit cell to look for get_ipython calls.
Note that the source of the abstract syntax tree
will already have been processed by IPython's
TransformerManager().transform_cell.
For example,
%matplotlib inline
would have been transformed to
get_ipython().run_line_magic('matplotlib', 'inline')
and we look for instances of the latter (and likewise for other
types of magics).
"""
magics: Dict[int, List[OffsetAndMagic]] = dataclasses.field(
default_factory=lambda: collections.defaultdict(list)
)
def visit_Assign(self, node: ast.Assign) -> None:
"""Look for system assign magics.
For example,
black_version = !black --version
would have been transformed to
black_version = get_ipython().getoutput('black --version')
and we look for instances of the latter.
"""
if (
isinstance(node.value, ast.Call)
and _is_ipython_magic(node.value.func)
and node.value.func.attr == "getoutput"
):
args = []
for arg in node.value.args:
assert isinstance(arg, ast.Str)
args.append(arg.s)
assert args
src = f"!{args[0]}"
self.magics[node.value.lineno].append(
OffsetAndMagic(node.value.col_offset, src)
)
self.generic_visit(node)
def visit_Expr(self, node: ast.Expr) -> None:
"""Look for magics in body of cell.
For examples,
!ls
!!ls
?ls
??ls
would (respectively) get transformed to
get_ipython().system('ls')
get_ipython().getoutput('ls')
get_ipython().run_line_magic('pinfo', 'ls')
get_ipython().run_line_magic('pinfo2', 'ls')
and we look for instances of any of the latter.
"""
if isinstance(node.value, ast.Call) and _is_ipython_magic(node.value.func):
args = []
for arg in node.value.args:
assert isinstance(arg, ast.Str)
args.append(arg.s)
assert args
if node.value.func.attr == "run_line_magic":
if args[0] == "pinfo":
src = f"?{args[1]}"
elif args[0] == "pinfo2":
src = f"??{args[1]}"
else:
src = f"%{args[0]}"
if args[1]:
assert src is not None
src += f" {args[1]}"
elif node.value.func.attr == "system":
src = f"!{args[0]}"
elif node.value.func.attr == "getoutput":
src = f"!!{args[0]}"
else:
raise NothingChanged # unsupported magic.
self.magics[node.value.lineno].append(
OffsetAndMagic(node.value.col_offset, src)
)
self.generic_visit(node)
| 2.21875 | 2 |
ourstylePy/our_palettes.py | PeterGrahamJersey/ourstylePy | 0 | 12758488 | import data
import our_colours
def our_palettes(palette = None, n = None, reverse = False):
'''
Access our colour palettes as hexcodes
- palette: string, which palette should be accessed, should match a name from our_palettes_raw
- n: integer, number of colours to generate from palette
- reverse: boolean, should the order of colours be reversed?
Returns: If palette is NA, return the raw palette data. If n is NA, return
the hexcodes of colours in the data, otherwise return n colours interpolated
from the chosen palette
Examples:
our_palettes()
our_palettes('default')
our_palettes('default', reverse = TRUE)
our_palettes('default', 10)
our_palettes('default', 2)
'''
if palette is None:
return data.our_palettes_raw
else:
if n is None:
pal = our_colours.our_colours(data.our_palettes_raw[palette])
if reverse:
pal = rev(pal)
return pal
else:
return our_palettes_interpolator(palette, reverse)(n)
| 3.34375 | 3 |
cftool/ml/param_utils/core.py | SaizhuoWang/carefree-toolkit | 5 | 12758489 | import math
import numpy as np
from typing import Dict
from typing import List
from typing import Union
from typing import Iterator
from typing import Optional
from .types import *
from .data_types import *
from .normalizers import *
from .distributions import *
from ...misc import *
params_type = Dict[str, Union[DataType, Iterable, "params_type"]]
class ParamsGenerator:
"""
Parameter generator for param searching, see cftool.ml.hpo.base.HPOBase for usage.
Parameters
----------
params : params_type, parameter settings.
Examples
----------
>>> grid = ParamsGenerator({
>>> "a": Any(Choice(values=[1, 2, 3])),
>>> "c": {
>>> "d": Int(Choice(values=[1, 2, 3])),
>>> "e": Float(Choice(values=[1, 2])),
>>> }
>>> })
>>> for param in grid.all():
>>> print(param)
>>> # output : {'a': 1, 'c': {'d': 1, 'e': 1, 'f': 3}}, {'a': 1, 'c': {'d': 1, 'e': 1, 'f': 4}}
>>> # {'a': 1, 'c': {'d': 1, 'e': 2, 'f': 3}}, {'a': 1, 'c': {'d': 1, 'e': 2, 'f': 4}}
>>> # {'a': 1, 'c': {'d': 2, 'e': 1, 'f': 3}}, {'a': 1, 'c': {'d': 2, 'e': 1, 'f': 4}}
>>> # {'a': 1, 'c': {'d': 2, 'e': 2, 'f': 3}}, {'a': 1, 'c': {'d': 2, 'e': 2, 'f': 4}}
>>> # ......
>>> # {'a': 3, 'c': {'d': 3, 'e': 2, 'f': 3}}, {'a': 3, 'c': {'d': 3, 'e': 2, 'f': 4}}
"""
def __init__(
self,
params: params_type,
*,
normalize_method: Optional[str] = None,
normalize_config: Optional[Dict[str, Any]] = None,
):
self._data_types = params
def _data_type_offset(value: DataType) -> int:
if not isinstance(value, Iterable):
return 1
return len(value.values)
self._data_types_nested = Nested(params, offset_fn=_data_type_offset)
if normalize_method is None:
self._normalizers_flattened = None
else:
if normalize_config is None:
normalize_config = {}
def _data_type_normalizer(value: DataType) -> Normalizer:
return Normalizer(normalize_method, value, **normalize_config)
normalizers_nested = self._data_types_nested.apply(_data_type_normalizer)
self._normalizers_flattened = normalizers_nested.flattened
self._all_params_nested = self._all_flattened_data_types = None
self._array_dim = self._all_bounds = None
@property
def params(self) -> params_type:
return self._data_types
@property
def num_params(self) -> number_type:
def _num_params(params):
if isinstance(params, (DataType, Iterable)):
return params.num_params
assert isinstance(params, dict)
num_params = prod(_num_params(v) for v in params.values())
if math.isinf(num_params):
return num_params
return int(num_params)
return _num_params(self._data_types)
@property
def array_dim(self) -> int:
if self._array_dim is None:
self._array_dim = self.flattened2array(
self.flatten_nested(self.pop())
).shape[0]
return self._array_dim
@property
def all_bounds(self) -> np.ndarray:
if self._all_bounds is None:
bounds_list = []
for key in self.sorted_flattened_keys:
if self._normalizers_flattened is None:
normalizer = None
else:
normalizer = self._normalizers_flattened[key]
if normalizer is None:
data_type = self._data_types_nested.get_value_from(key)
if not isinstance(data_type, Iterable):
bounds_list.append(list(data_type.bounds))
else:
bounds_list.extend(list(map(list, data_type.bounds)))
else:
if normalizer.is_iterable:
bounds_list.extend(list(map(list, normalizer.bounds)))
else:
bounds_list.append(list(normalizer.bounds))
self._all_bounds = np.array(bounds_list, np.float32)
return self._all_bounds
@property
def all_flattened_params(self) -> all_flattened_type:
if self._all_params_nested is None:
apply = lambda data_type: data_type.all()
self._all_params_nested = self._data_types_nested.apply(apply)
return self._all_params_nested.flattened
@property
def sorted_flattened_keys(self) -> List[str]:
return self._data_types_nested.sorted_flattened_keys
def pop(self) -> nested_type:
def _pop(src: dict, tgt: dict):
for k, v in src.items():
if isinstance(v, dict):
next_tgt = tgt.setdefault(k, {})
_pop(v, next_tgt)
else:
tgt[k] = v.pop()
return tgt
return _pop(self._data_types, {})
def all(self) -> Iterator[nested_type]:
for flattened_params in Grid(self.all_flattened_params):
yield self._data_types_nested.nest_flattened(flattened_params)
def flatten_nested(self, nested: nested_type) -> nested_type:
return self._data_types_nested.flatten_nested(nested)
def nest_flattened(self, flattened: flattened_type) -> nested_type:
return self._data_types_nested.nest_flattened(flattened)
def flattened2array(self, flattened: flattened_type) -> np.ndarray:
if self._normalizers_flattened is None:
normalized = flattened
else:
normalized = {
k: self._normalizers_flattened[k].normalize(v)
for k, v in flattened.items()
}
return self._data_types_nested.flattened2array(normalized)
def array2flattened(self, array: np.ndarray) -> flattened_type:
normalized = self._data_types_nested.array2flattened(array)
if self._normalizers_flattened is None:
flattened = normalized
else:
flattened = {
k: self._normalizers_flattened[k].recover(v)
for k, v in normalized.items()
}
for key, value in flattened.items():
data_type = self._data_types_nested.get_value_from(key)
flattened[key] = data_type.transform(value)
return flattened
__all__ = ["ParamsGenerator", "params_type"]
| 2.546875 | 3 |
Housing_Analysis_Data Science/Housing_analysis/Housing_data_scrap.py | WajeehAhmed/Housing-Price-Analysis | 0 | 12758490 | from bs4 import BeautifulSoup as soup
import requests
import re
from word2number import w2n
import pandas as pd
response = requests.get('https://www.zameen.com/Houses_Property/Lahore-1-1.html')
Price=[]
Location=[]
Beds=[]
Size = []
#file1 = open("myfile.txt","w")
#file1.writelines(response.text)
#file1.close
#print(response.text)
data = soup(response.text)
dataa = data.find_all('li',role = 'article')
for info in dataa:
Pirces = info.find_all('span',class_ = 'f343d9ce')
Locations = info.find_all('div',class_ = '_162e6469')
Bedss = info.find_all('span',class_ = 'b6a29bc0')
Sizes = info.find_all('h2',class_='c0df3811')
#print(Locations[0].text)
#print(Pirces[0].text)
#print(Bedss[0].text)
Sizes = Sizes[0].text
sizer = Sizes.split(' ')
Sizes = str(sizer[0])
Price.append(Pirces[0].text)
Location.append(Locations[0].text)
Beds.append(Bedss[0].text)
Size.append(Sizes)
'''
print(Price)
print()
print(Location)
print()
print(Beds)
print()
print(Size)
print()
'''
i = 0
for items in Price:
if(str(items).endswith('Crore')):
num = items.split(' ')
number = float(num[0])*pow(10,7)
Price[i] = number
i+=1
else:
num = items.split(' ')
number = float(num[0])*pow(10,5)
Price[i]=number
i+=1
df = pd.DataFrame(list(zip(Location,Size,Beds,Price)),columns =['Location', 'Size(Marla)','Beds','Price in Pkr'])
print(df)
df.to_csv('dataset.csv', index=False)
#info = dataa[0]
'''
file1 = open("myfile.txt","w")
file1.writelines(str(info))
file1.close
#print(response.text)
'''
#print(Size)
#span aria-label = Listing price
#span aria-label = Beds
#span aria-label = Listing price
| 3.109375 | 3 |
src/tespy/components/component.py | juliusmeier/tespy | 0 | 12758491 | <reponame>juliusmeier/tespy<filename>src/tespy/components/component.py
# -*- coding: utf-8
"""Module class component.
All tespy components inherit from this class.
This file is part of project TESPy (github.com/oemof/tespy). It's copyrighted
by the contributors recorded in the version control history of the file,
available from its original location tespy/components/components.py
SPDX-License-Identifier: MIT
"""
import logging
from collections import OrderedDict
import numpy as np
from tespy.tools.characteristics import CharLine
from tespy.tools.characteristics import CharMap
from tespy.tools.characteristics import load_default_char as ldc
from tespy.tools.data_containers import ComponentCharacteristicMaps as dc_cm
from tespy.tools.data_containers import ComponentCharacteristics as dc_cc
from tespy.tools.data_containers import ComponentProperties as dc_cp
from tespy.tools.data_containers import DataContainerSimple as dc_simple
from tespy.tools.data_containers import GroupedComponentCharacteristics as dc_gcc
from tespy.tools.data_containers import GroupedComponentProperties as dc_gcp
from tespy.tools.document_models import generate_latex_eq
from tespy.tools.fluid_properties import v_mix_ph
from tespy.tools.global_vars import err
from tespy.tools.helpers import bus_char_derivative
from tespy.tools.helpers import bus_char_evaluation
from tespy.tools.helpers import newton
# %%
class Component:
r"""
Class Component is the base class of all TESPy components.
Parameters
----------
label : str
The label of the component.
design : list
List containing design parameters (stated as String).
offdesign : list
List containing offdesign parameters (stated as String).
design_path : str
Path to the components design case.
local_offdesign : boolean
Treat this component in offdesign mode in a design calculation.
local_design : boolean
Treat this component in design mode in an offdesign calculation.
char_warnings : boolean
Ignore warnings on default characteristics usage for this component.
printout : boolean
Include this component in the network's results printout.
**kwargs :
See the class documentation of desired component for available
keywords.
Note
----
The initialisation method (__init__), setter method (set_attr) and getter
method (get_attr) are used for instances of class component and its
children.
Allowed keywords in kwargs are 'design_path', 'design' and 'offdesign'.
Additional keywords depend on the type of component you want to create.
Example
-------
Basic example for a setting up a
:py:class:`tespy.components.component.Component` object. This example does
not run a tespy calculation.
>>> from tespy.components.component import Component
>>> comp = Component('myComponent')
>>> type(comp)
<class 'tespy.components.component.Component'>
"""
def __init__(self, label, **kwargs):
# check if components label is of type str and for prohibited chars
if not isinstance(label, str):
msg = 'Component label must be of type str!'
logging.error(msg)
raise ValueError(msg)
elif len([x for x in [';', ',', '.'] if x in label]) > 0:
msg = (
'You must not use ' + str([';', ',', '.']) + ' in label (' +
str(self.component()) + ').')
logging.error(msg)
raise ValueError(msg)
else:
self.label = label
# defaults
self.new_design = True
self.design_path = None
self.design = []
self.offdesign = []
self.local_design = False
self.local_offdesign = False
self.char_warnings = True
self.printout = True
# add container for components attributes
self.variables = OrderedDict(self.get_variables().copy())
self.__dict__.update(self.variables)
self.set_attr(**kwargs)
def set_attr(self, **kwargs):
r"""
Set, reset or unset attributes of a component for provided arguments.
Parameters
----------
design : list
List containing design parameters (stated as String).
offdesign : list
List containing offdesign parameters (stated as String).
design_path: str
Path to the components design case.
**kwargs :
See the class documentation of desired component for available
keywords.
Note
----
Allowed keywords in kwargs are obtained from class documentation as all
components share the
:py:meth:`tespy.components.component.Component.set_attr` method.
"""
# set specified values
for key in kwargs:
if key in self.variables.keys():
data = self.get_attr(key)
if kwargs[key] is None:
data.set_attr(is_set=False)
try:
data.set_attr(is_var=False)
except KeyError:
pass
continue
try:
float(kwargs[key])
is_numeric = True
except (TypeError, ValueError):
is_numeric = False
# dict specification
if (isinstance(kwargs[key], dict) and
not isinstance(data, dc_simple)):
data.set_attr(**kwargs[key])
# value specification for component properties
elif isinstance(data, dc_cp) or isinstance(data, dc_simple):
if is_numeric:
if np.isnan(kwargs[key]):
data.set_attr(is_set=False)
if isinstance(data, dc_cp):
data.set_attr(is_var=False)
else:
data.set_attr(val=kwargs[key], is_set=True)
if isinstance(data, dc_cp):
data.set_attr(is_var=False)
elif (kwargs[key] == 'var' and
isinstance(data, dc_cp)):
data.set_attr(is_set=True, is_var=True)
elif isinstance(data, dc_simple):
data.set_attr(val=kwargs[key], is_set=True)
# invalid datatype for keyword
else:
msg = (
'Bad datatype for keyword argument ' + key +
' at ' + self.label + '.')
logging.error(msg)
raise TypeError(msg)
elif isinstance(data, dc_cc) or isinstance(data, dc_cm):
# value specification for characteristics
if (isinstance(kwargs[key], CharLine) or
isinstance(kwargs[key], CharMap)):
data.char_func = kwargs[key]
# invalid datatype for keyword
else:
msg = (
'Bad datatype for keyword argument ' + key +
' at ' + self.label + '.')
logging.error(msg)
raise TypeError(msg)
elif isinstance(data, dc_gcp):
# value specification of grouped component parameter method
if isinstance(kwargs[key], str):
data.method = kwargs[key]
# invalid datatype for keyword
else:
msg = (
'Bad datatype for keyword argument ' + key +
' at ' + self.label + '.')
logging.error(msg)
raise TypeError(msg)
elif key in ['design', 'offdesign']:
if not isinstance(kwargs[key], list):
msg = (
'Please provide the ' + key + ' parameters as list '
'at ' + self.label + '.')
logging.error(msg)
raise TypeError(msg)
if set(kwargs[key]).issubset(list(self.variables.keys())):
self.__dict__.update({key: kwargs[key]})
else:
msg = (
'Available parameters for (off-)design specification '
'are: ' + str(list(self.variables.keys())) + ' at ' +
self.label + '.')
logging.error(msg)
raise ValueError(msg)
elif key in ['local_design', 'local_offdesign',
'printout', 'char_warnings']:
if not isinstance(kwargs[key], bool):
msg = (
'Please provide the parameter ' + key + ' as boolean '
'at component ' + self.label + '.')
logging.error(msg)
raise TypeError(msg)
else:
self.__dict__.update({key: kwargs[key]})
elif key == 'design_path' or key == 'fkt_group':
if isinstance(kwargs[key], str):
self.__dict__.update({key: kwargs[key]})
elif kwargs[key] is None:
self.design_path = None
elif np.isnan(kwargs[key]):
self.design_path = None
else:
msg = (
'Please provide the design_path parameter as string. '
'For unsetting use np.nan or None.')
logging.error(msg)
raise TypeError(msg)
self.new_design = True
# invalid keyword
else:
msg = (
'Component ' + self.label + ' has no attribute ' +
str(key) + '.')
logging.error(msg)
raise KeyError(msg)
def get_attr(self, key):
r"""
Get the value of a component's attribute.
Parameters
----------
key : str
The attribute you want to retrieve.
Returns
-------
out :
Value of specified attribute.
"""
if key in self.__dict__:
return self.__dict__[key]
else:
msg = ('Component ' + self.label + ' has no attribute \"' +
key + '\".')
logging.error(msg)
raise KeyError(msg)
def comp_init(self, nw, num_eq=0):
r"""
Perform component initialization in network preprocessing.
Parameters
----------
nw : tespy.networks.network.Network
Network this component is integrated in.
"""
self.num_nw_fluids = len(nw.fluids)
self.nw_fluids = nw.fluids
self.always_all_equations = nw.always_all_equations
self.num_nw_vars = self.num_nw_fluids + 3
self.it = 0
self.num_eq = 0
self.vars = {}
self.num_vars = 0
self.constraints = OrderedDict(self.get_mandatory_constraints().copy())
self.__dict__.update(self.constraints)
for constraint in self.constraints.values():
self.num_eq += constraint['num_eq']
for key, val in self.variables.items():
data = self.get_attr(key)
if isinstance(val, dc_cp):
if data.is_var:
data.var_pos = self.num_vars
self.num_vars += 1
self.vars[data] = key
# component characteristics
elif isinstance(val, dc_cc):
if data.char_func is None:
try:
data.char_func = ldc(
self.component(), key, 'DEFAULT', CharLine)
except KeyError:
data.char_func = CharLine(x=[0, 1], y=[1, 1])
# component characteristics
elif isinstance(val, dc_cm):
if data.char_func is None:
try:
data.char_func = ldc(
self.component(), key, 'DEFAULT', CharMap)
except KeyError:
data.char_func = CharLine(x=[0, 1], y=[1, 1])
# grouped component properties
elif isinstance(val, dc_gcp):
is_set = True
for e in data.elements:
if not self.get_attr(e).is_set:
is_set = False
if is_set:
data.set_attr(is_set=True)
elif data.is_set:
start = (
'All parameters of the component group have to be '
'specified! This component group uses the following '
'parameters: ')
end = ' at ' + self.label + '. Group will be set to False.'
logging.warning(start + ', '.join(val.elements) + end)
val.set_attr(is_set=False)
else:
val.set_attr(is_set=False)
# component properties
if data.is_set and data.func is not None:
self.num_eq += data.num_eq
# print(key, data.is_set, self.num_eq)
# set up Jacobian matrix and residual vector
self.jacobian = np.zeros((
self.num_eq,
self.num_i + self.num_o + self.num_vars,
self.num_nw_vars))
self.residual = np.zeros(self.num_eq)
sum_eq = 0
for constraint in self.constraints.values():
num_eq = constraint['num_eq']
if constraint['constant_deriv']:
self.jacobian[sum_eq:sum_eq + num_eq] = constraint['deriv']()
sum_eq += num_eq
# done
msg = (
'The component ' + self.label + ' has ' + str(self.num_vars) +
' custom variables.')
logging.debug(msg)
def get_variables(self):
return {}
def get_mandatory_constraints(self):
return {
'mass_flow_constraints': {
'func': self.mass_flow_func, 'deriv': self.mass_flow_deriv,
'constant_deriv': True, 'latex': self.mass_flow_func_doc,
'num_eq': self.num_i},
'fluid_constraints': {
'func': self.fluid_func, 'deriv': self.fluid_deriv,
'constant_deriv': True, 'latex': self.fluid_func_doc,
'num_eq': self.num_nw_fluids * self.num_i}
}
@staticmethod
def inlets():
return []
@staticmethod
def outlets():
return []
def get_char_expr(self, param, type='rel', inconn=0, outconn=0):
r"""
Generic method to access characteristic function parameters.
Parameters
----------
param : str
Parameter for characteristic function evaluation.
type : str
Type of expression:
- :code:`rel`: relative to design value
- :code:`abs`: absolute value
inconn : int
Index of inlet connection.
outconn : int
Index of outlet connection.
Returns
-------
expr : float
Value of expression
"""
if type == 'rel':
if param == 'm':
return (
self.inl[inconn].m.val_SI / self.inl[inconn].m.design)
elif param == 'm_out':
return (
self.outl[outconn].m.val_SI /
self.outl[outconn].m.design)
elif param == 'v':
v = self.inl[inconn].m.val_SI * v_mix_ph(
self.inl[inconn].get_flow(),
T0=self.inl[inconn].T.val_SI)
return v / self.inl[inconn].v.design
elif param == 'pr':
return (
(self.outl[outconn].p.val_SI *
self.inl[inconn].p.design) /
(self.inl[inconn].p.val_SI *
self.outl[outconn].p.design))
else:
msg = (
'The parameter ' + str(param) + ' is not available '
'for characteristic function evaluation.')
logging.error(msg)
raise ValueError(msg)
else:
if param == 'm':
return self.inl[inconn].m.val_SI
elif param == 'm_out':
return self.outl[outconn].m.val_SI
elif param == 'v':
return self.inl[inconn].m.val_SI * v_mix_ph(
self.inl[inconn].get_flow(),
T0=self.inl[inconn].T.val_SI)
elif param == 'pr':
return (
self.outl[outconn].p.val_SI /
self.inl[inconn].p.val_SI)
else:
return False
def get_char_expr_doc(self, param, type='rel', inconn=0, outconn=0):
r"""
Generic method to access characteristic function parameters.
Parameters
----------
param : str
Parameter for characteristic function evaluation.
type : str
Type of expression:
- :code:`rel`: relative to design value
- :code:`abs`: absolute value
inconn : int
Index of inlet connection.
outconn : int
Index of outlet connection.
Returns
-------
expr : str
LaTeX code for documentation
"""
if type == 'rel':
if param == 'm':
return (
r'\frac{\dot{m}_\mathrm{in,' + str(inconn + 1) + r'}}'
r'{\dot{m}_\mathrm{in,' + str(inconn + 1) +
r',design}}')
elif param == 'm_out':
return (
r'\frac{\dot{m}_\mathrm{out,' + str(outconn + 1) +
r'}}{\dot{m}_\mathrm{out,' + str(outconn + 1) +
r',design}}')
elif param == 'v':
return (
r'\frac{\dot{V}_\mathrm{in,' + str(inconn + 1) + r'}}'
r'{\dot{V}_\mathrm{in,' + str(inconn + 1) +
r',design}}')
elif param == 'pr':
return (
r'\frac{p_\mathrm{out,' + str(outconn + 1) +
r'}\cdot p_\mathrm{in,' + str(inconn + 1) +
r',design}}{p_\mathrm{out,' + str(outconn + 1) +
r',design}\cdot p_\mathrm{in,' + str(inconn + 1) +
r'}}')
else:
if param == 'm':
return r'\dot{m}_\mathrm{in,' + str(inconn + 1) + r'}'
elif param == 'm_out':
return r'\dot{m}_\mathrm{out,' + str(outconn + 1) + r'}'
elif param == 'v':
return r'\dot{V}_\mathrm{in,' + str(inconn + 1) + r'}'
elif param == 'pr':
return (
r'\frac{p_\mathrm{out,' + str(outconn + 1) +
r'}}{p_\mathrm{in,' + str(inconn + 1) + r'}}')
def solve(self, increment_filter):
"""
Solve equations and calculate partial derivatives of a component.
Parameters
----------
increment_filter : ndarray
Matrix for filtering non-changing variables.
"""
sum_eq = 0
for constraint in self.constraints.values():
num_eq = constraint['num_eq']
self.residual[sum_eq:sum_eq + num_eq] = constraint['func']()
if not constraint['constant_deriv']:
constraint['deriv'](increment_filter, sum_eq)
sum_eq += num_eq
for parameter, data in self.variables.items():
if data.is_set and data.func is not None:
self.residual[sum_eq:sum_eq + data.num_eq] = data.func(
**data.func_params)
data.deriv(increment_filter, sum_eq, **data.func_params)
sum_eq += data.num_eq
def bus_func(self, bus):
r"""
Base method for calculation of the value of the bus function.
Parameters
----------
bus : tespy.connections.bus.Bus
TESPy bus object.
Returns
-------
residual : float
Residual value of bus equation.
"""
return 0
def bus_func_doc(self, bus):
r"""
Base method for LaTeX equation generation of the bus function.
Parameters
----------
bus : tespy.connections.bus.Bus
TESPy bus object.
Returns
-------
latex : str
Bus function in LaTeX format.
"""
return None
def bus_deriv(self, bus):
r"""
Base method for partial derivatives of the bus function.
Parameters
----------
bus : tespy.connections.bus.Bus
TESPy bus object.
Returns
-------
deriv : ndarray
Matrix of partial derivatives.
"""
return np.zeros((1, self.num_i + self.num_o, self.num_nw_vars))
def calc_bus_expr(self, bus):
r"""
Return the busses' characteristic line input expression.
Parameters
----------
bus : tespy.connections.bus.Bus
Bus to calculate the characteristic function expression for.
Returns
-------
expr : float
Ratio of power to power design depending on the bus base
specification.
"""
b = bus.comps.loc[self]
if np.isnan(b['P_ref']) or b['P_ref'] == 0:
return 1
else:
comp_val = self.bus_func(b)
if b['base'] == 'component':
return abs(comp_val / b['P_ref'])
else:
bus_value = newton(
bus_char_evaluation,
bus_char_derivative,
[comp_val, b['P_ref'], b['char']], 0,
val0=b['P_ref'], valmin=-1e15, valmax=1e15)
return bus_value / b['P_ref']
def calc_bus_efficiency(self, bus):
r"""
Return the busses' efficiency.
Parameters
----------
bus : tespy.connections.bus.Bus
Bus to calculate the efficiency value on.
Returns
-------
efficiency : float
Efficiency value of the bus.
.. math::
\eta_\mathrm{bus} = \begin{cases}
\eta\left(
\frac{\dot{E}_\mathrm{bus}}{\dot{E}_\mathrm{bus,ref}}\right) &
\text{bus base = 'bus'}\\
\eta\left(
\frac{\dot{E}_\mathrm{component}}
{\dot{E}_\mathrm{component,ref}}\right) &
\text{bus base = 'component'}
\end{cases}
Note
----
If the base value of the bus is the bus value itself, a newton
iteration is used to find the bus value satisfying the corresponding
equation (case 1).
"""
return bus.comps.loc[self, 'char'].evaluate(self.calc_bus_expr(bus))
def calc_bus_value(self, bus):
r"""
Return the busses' value of the component's energy transfer.
Parameters
----------
bus : tespy.connections.bus.Bus
Bus to calculate energy transfer on.
Returns
-------
bus_value : float
Value of the energy transfer on the specified bus.
.. math::
\dot{E}_\mathrm{bus} = \begin{cases}
\frac{\dot{E}_\mathrm{component}}{f\left(
\frac{\dot{E}_\mathrm{bus}}{\dot{E}_\mathrm{bus,ref}}\right)} &
\text{bus base = 'bus'}\\
\dot{E}_\mathrm{component} \cdot f\left(
\frac{\dot{E}_\mathrm{component}}
{\dot{E}_\mathrm{component,ref}}\right) &
\text{bus base = 'component'}
\end{cases}
Note
----
If the base value of the bus is the bus value itself, a newton
iteration is used to find the bus value satisfying the corresponding
equation (case 1).
"""
b = bus.comps.loc[self]
comp_val = self.bus_func(b)
expr = self.calc_bus_expr(bus)
if b['base'] == 'component':
return comp_val * b['char'].evaluate(expr)
else:
return comp_val / b['char'].evaluate(expr)
def initialise_source(self, c, key):
r"""
Return a starting value for pressure and enthalpy at outlet.
Parameters
----------
c : tespy.connections.connection.Connection
Connection to perform initialisation on.
key : str
Fluid property to retrieve.
Returns
-------
val : float
Starting value for pressure/enthalpy in SI units.
.. math::
val = \begin{cases}
0 & \text{key = 'p'}\\
0 & \text{key = 'h'}
\end{cases}
"""
return 0
def initialise_target(self, c, key):
r"""
Return a starting value for pressure and enthalpy at inlet.
Parameters
----------
c : tespy.connections.connection.Connection
Connection to perform initialisation on.
key : str
Fluid property to retrieve.
Returns
-------
val : float
Starting value for pressure/enthalpy in SI units.
.. math::
val = \begin{cases}
0 & \text{key = 'p'}\\
0 & \text{key = 'h'}
\end{cases}
"""
return 0
def propagate_fluid_to_target(self, inconn, start):
r"""
Propagate the fluids towards connection's target in recursion.
Parameters
----------
inconn : tespy.connections.connection.Connection
Connection to initialise.
start : tespy.components.component.Component
This component is the fluid propagation starting point.
The starting component is saved to prevent infinite looping.
"""
conn_idx = self.inl.index(inconn)
outconn = self.outl[conn_idx]
for fluid, x in inconn.fluid.val.items():
if (outconn.fluid.val_set[fluid] is False and
outconn.good_starting_values is False):
outconn.fluid.val[fluid] = x
outconn.target.propagate_fluid_to_target(outconn, start)
def propagate_fluid_to_source(self, outconn, start):
r"""
Propagate the fluids towards connection's source in recursion.
Parameters
----------
outconn : tespy.connections.connection.Connection
Connection to initialise.
start : tespy.components.component.Component
This component is the fluid propagation starting point.
The starting component is saved to prevent infinite looping.
"""
conn_idx = self.outl.index(outconn)
inconn = self.inl[conn_idx]
for fluid, x in outconn.fluid.val.items():
if (inconn.fluid.val_set[fluid] is False and
inconn.good_starting_values is False):
inconn.fluid.val[fluid] = x
inconn.source.propagate_fluid_to_source(inconn, start)
def set_parameters(self, mode, data):
r"""
Set or unset design values of component parameters.
Parameters
----------
mode : str
Setting component design values for :code:`mode='offdesign'`
and unsetting them for :code:`mode='design'`.
df : pandas.core.series.Series
Series containing the component parameters.
"""
if mode == 'design' or self.local_design:
self.new_design = True
for key, dc in self.variables.items():
if isinstance(dc, dc_cp):
if ((mode == 'offdesign' and not self.local_design) or
(mode == 'design' and self.local_offdesign)):
self.get_attr(key).design = data[key]
else:
self.get_attr(key).design = np.nan
def calc_parameters(self):
r"""Postprocessing parameter calculation."""
return
def check_parameter_bounds(self):
r"""Check parameter value limits."""
for p in self.variables.keys():
data = self.get_attr(p)
if isinstance(data, dc_cp):
if data.val > data.max_val + err:
msg = (
'Invalid value for ' + p + ': ' + p + ' = ' +
str(data.val) + ' above maximum value (' +
str(data.max_val) + ') at component ' + self.label +
'.')
logging.warning(msg)
elif data.val < data.min_val - err:
msg = (
'Invalid value for ' + p + ': ' + p + ' = ' +
str(data.val) + ' below minimum value (' +
str(data.min_val) + ') at component ' + self.label +
'.')
logging.warning(msg)
elif isinstance(data, dc_cc) and data.is_set:
expr = self.get_char_expr(data.param, **data.char_params)
data.char_func.get_domain_errors(expr, self.label)
elif isinstance(data, dc_gcc) and data.is_set:
for char in data.elements:
char_data = self.get_attr(char)
expr = self.get_char_expr(
char_data.param, **char_data.char_params)
char_data.char_func.get_domain_errors(expr, self.label)
def initialise_fluids(self):
return
def convergence_check(self):
return
def entropy_balance(self):
r"""Entropy balance calculation method."""
return
def exergy_balance(self, T0):
r"""
Exergy balance calculation method.
Parameters
----------
T0 : float
Ambient temperature T0 / K.
"""
self.E_P = np.nan
self.E_F = np.nan
self.E_bus = np.nan
self.E_D = np.nan
self.epsilon = np.nan
def get_plotting_data(self):
return
def fluid_func(self):
r"""
Calculate the vector of residual values for fluid balance equations.
Returns
-------
residual : list
Vector of residual values for component's fluid balance.
.. math::
0 = x_{fl,in,i} - x_{fl,out,i} \; \forall fl \in
\text{network fluids,} \; \forall i \in \text{inlets}
"""
residual = []
for i in range(self.num_i):
for fluid, x in self.inl[0].fluid.val.items():
residual += [x - self.outl[0].fluid.val[fluid]]
return residual
def fluid_func_doc(self, label):
r"""
Get fluid balance equations in LaTeX format.
Parameters
----------
label : str
Label for equation.
Returns
-------
latex : str
LaTeX code of equations applied.
"""
indices = list(range(1, self.num_i + 1))
if len(indices) > 1:
indices = ', '.join(str(idx) for idx in indices)
else:
indices = str(indices[0])
latex = (
r'0=x_{fl\mathrm{,in,}i}-x_{fl\mathrm{,out,}i}\;'
r'\forall fl \in\text{network fluids,}'
r'\; \forall i \in [' + indices + r']')
return generate_latex_eq(self, latex, label)
def fluid_deriv(self):
r"""
Calculate partial derivatives for all fluid balance equations.
Returns
-------
deriv : ndarray
Matrix with partial derivatives for the fluid equations.
"""
deriv = np.zeros((self.fluid_constraints['num_eq'],
2 * self.num_i + self.num_vars,
self.num_nw_vars))
for i in range(self.num_i):
for j in range(self.num_nw_fluids):
deriv[i * self.num_nw_fluids + j, i, j + 3] = 1
deriv[i * self.num_nw_fluids + j, self.num_i + i, j + 3] = -1
return deriv
def mass_flow_func(self):
r"""
Calculate the residual value for mass flow balance equation.
Returns
-------
residual : list
Vector with residual value for component's mass flow balance.
.. math::
0 = \dot{m}_{in,i} -\dot{m}_{out,i} \;\forall i\in\text{inlets}
"""
residual = []
for i in range(self.num_i):
residual += [self.inl[i].m.val_SI - self.outl[i].m.val_SI]
return residual
def mass_flow_func_doc(self, label):
r"""
Get mass flow equations in LaTeX format.
Parameters
----------
label : str
Label for equation.
Returns
-------
latex : str
LaTeX code of equations applied.
"""
indices = list(range(1, self.num_i + 1))
if len(indices) > 1:
indices = ', '.join(str(idx) for idx in indices)
else:
indices = str(indices[0])
latex = (
r'0=\dot{m}_{\mathrm{in,}i}-\dot{m}_{\mathrm{out,}i}'
r'\; \forall i \in [' + indices + r']')
return generate_latex_eq(self, latex, label)
def mass_flow_deriv(self):
r"""
Calculate partial derivatives for all mass flow balance equations.
Returns
-------
deriv : ndarray
Matrix with partial derivatives for the mass flow balance
equations.
"""
deriv = np.zeros((
self.num_i,
self.num_i + self.num_o + self.num_vars,
self.num_nw_vars))
for i in range(self.num_i):
deriv[i, i, 0] = 1
for j in range(self.num_o):
deriv[j, j + i + 1, 0] = -1
return deriv
def pressure_equality_func(self):
r"""
Equation for pressure equality.
Returns
-------
residual : float
Residual value of equation.
.. math::
0 = p_{in,i} - p_{out,i} \;\forall i\in\text{inlets}
"""
residual = []
for i in range(self.num_i):
residual += [self.inl[i].p.val_SI - self.outl[i].p.val_SI]
return residual
def pressure_equality_func_doc(self, label):
r"""
Equation for pressure equality.
Parameters
----------
label : str
Label for equation.
Returns
-------
latex : str
LaTeX code of equations applied.
"""
indices = list(range(1, self.num_i + 1))
if len(indices) > 1:
indices = ', '.join(str(idx) for idx in indices)
else:
indices = str(indices[0])
latex = (
r'0=p_{\mathrm{in,}i}-p_{\mathrm{out,}i}'
r'\; \forall i \in [' + indices + r']')
return generate_latex_eq(self, latex, label)
def pressure_equality_deriv(self):
r"""
Calculate partial derivatives for all mass flow balance equations.
Returns
-------
deriv : ndarray
Matrix with partial derivatives for the mass flow balance
equations.
"""
deriv = np.zeros((
self.num_i,
self.num_i + self.num_o + self.num_vars,
self.num_nw_vars))
for i in range(self.num_i):
deriv[i, i, 1] = 1
for j in range(self.num_o):
deriv[j, j + i + 1, 1] = -1
return deriv
def enthalpy_equality_func(self):
r"""
Equation for enthalpy equality.
Returns
-------
residual : list
Residual values of equations.
.. math::
0 = h_{in,i} - h_{out,i} \;\forall i\in\text{inlets}
"""
residual = []
for i in range(self.num_i):
residual += [self.inl[i].h.val_SI - self.outl[i].h.val_SI]
return residual
def enthalpy_equality_func_doc(self, label):
r"""
Equation for enthalpy equality.
Parameters
----------
label : str
Label for equation.
Returns
-------
latex : str
LaTeX code of equations applied.
"""
indices = list(range(1, self.num_i + 1))
if len(indices) > 1:
indices = ', '.join(str(idx) for idx in indices)
else:
indices = str(indices[0])
latex = (
r'0=h_{\mathrm{in,}i}-h_{\mathrm{out,}i}'
r'\; \forall i \in [' + indices + r']')
return generate_latex_eq(self, latex, label)
def enthalpy_equality_deriv(self):
r"""
Calculate partial derivatives for all mass flow balance equations.
Returns
-------
deriv : ndarray
Matrix with partial derivatives for the mass flow balance
equations.
"""
deriv = np.zeros((
self.num_i,
self.num_i + self.num_o + self.num_vars,
self.num_nw_vars))
for i in range(self.num_i):
deriv[i, i, 2] = 1
for j in range(self.num_o):
deriv[j, j + i + 1, 2] = -1
return deriv
def numeric_deriv(self, func, dx, pos, **kwargs):
r"""
Calculate partial derivative of the function func to dx.
Parameters
----------
func : function
Function :math:`f` to calculate the partial derivative for.
dx : str
Partial derivative.
pos : int
Position of connection regarding to inlets and outlet of the
component, logic: ['in1', 'in2', ..., 'out1', ...] ->
0, 1, ..., n, n + 1, ..., n + m
Returns
-------
deriv : float/list
Partial derivative(s) of the function :math:`f` to variable(s)
:math:`x`.
.. math::
\frac{\partial f}{\partial x} = \frac{f(x + d) + f(x - d)}{2 d}
"""
if dx == 'fluid':
d = 1e-5
conns = self.inl + self.outl
deriv = []
for f in conns[0].fluid.val.keys():
val = conns[pos].fluid.val[f]
if conns[pos].fluid.val[f] + d <= 1:
conns[pos].fluid.val[f] += d
else:
conns[pos].fluid.val[f] = 1
exp = func(**kwargs)
if conns[pos].fluid.val[f] - 2 * d >= 0:
conns[pos].fluid.val[f] -= 2 * d
else:
conns[pos].fluid.val[f] = 0
exp -= func(**kwargs)
conns[pos].fluid.val[f] = val
deriv += [exp / (2 * d)]
elif dx in ['m', 'p', 'h']:
if dx == 'm':
d = 1e-4
else:
d = 1e-1
conns = self.inl + self.outl
conns[pos].get_attr(dx).val_SI += d
exp = func(**kwargs)
conns[pos].get_attr(dx).val_SI -= 2 * d
exp -= func(**kwargs)
deriv = exp / (2 * d)
conns[pos].get_attr(dx).val_SI += d
else:
d = self.get_attr(dx).d
exp = 0
self.get_attr(dx).val += d
exp += func(**kwargs)
self.get_attr(dx).val -= 2 * d
exp -= func(**kwargs)
deriv = exp / (2 * d)
self.get_attr(dx).val += d
return deriv
def pr_func(self, pr='', inconn=0, outconn=0):
r"""
Calculate residual value of pressure ratio function.
Parameters
----------
pr : str
Component parameter to evaluate the pr_func on, e.g.
:code:`pr1`.
inconn : int
Connection index of inlet.
outconn : int
Connection index of outlet.
Returns
-------
residual : float
Residual value of function.
.. math::
0 = p_{in} \cdot pr - p_{out}
"""
pr = self.get_attr(pr)
return (self.inl[inconn].p.val_SI * pr.val -
self.outl[outconn].p.val_SI)
def pr_func_doc(self, label, pr='', inconn=0, outconn=0):
r"""
Calculate residual value of pressure ratio function.
Parameters
----------
pr : str
Component parameter to evaluate the pr_func on, e.g.
:code:`pr1`.
inconn : int
Connection index of inlet.
outconn : int
Connection index of outlet.
Returns
-------
residual : float
Residual value of function.
"""
latex = (
r'0=p_\mathrm{in,' + str(inconn + 1) + r'}\cdot ' + pr +
r' - p_\mathrm{out,' + str(outconn + 1) + r'}'
)
return generate_latex_eq(self, latex, label)
def pr_deriv(self, increment_filter, k, pr='', inconn=0, outconn=0):
r"""
Calculate residual value of pressure ratio function.
Parameters
----------
increment_filter : ndarray
Matrix for filtering non-changing variables.
k : int
Position of equation in Jacobian matrix.
pr : str
Component parameter to evaluate the pr_func on, e.g.
:code:`pr1`.
inconn : int
Connection index of inlet.
outconn : int
Connection index of outlet.
"""
pr = self.get_attr(pr)
self.jacobian[k, inconn, 1] = pr.val
self.jacobian[k, self.num_i + outconn, 1] = -1
if pr.is_var:
pos = self.num_i + self.num_o + pr.var_pos
self.jacobian[k, pos, 0] = self.inl[inconn].p.val_SI
def zeta_func(self, zeta='', inconn=0, outconn=0):
r"""
Calculate residual value of :math:`\zeta`-function.
Parameters
----------
zeta : str
Component parameter to evaluate the zeta_func on, e.g.
:code:`zeta1`.
inconn : int
Connection index of inlet.
outconn : int
Connection index of outlet.
Returns
-------
residual : float
Residual value of function.
.. math::
0 = \begin{cases}
p_{in} - p_{out} & |\dot{m}| < \epsilon \\
\frac{\zeta}{D^4} - \frac{(p_{in} - p_{out}) \cdot \pi^2}
{8 \cdot \dot{m}_{in} \cdot |\dot{m}_{in}| \cdot \frac{v_{in} +
v_{out}}{2}} &
|\dot{m}| > \epsilon
\end{cases}
Note
----
The zeta value is caluclated on the basis of a given pressure loss at
a given flow rate in the design case. As the cross sectional area A
will not change, it is possible to handle the equation in this way:
.. math::
\frac{\zeta}{D^4} = \frac{\Delta p \cdot \pi^2}
{8 \cdot \dot{m}^2 \cdot v}
"""
data = self.get_attr(zeta)
i = self.inl[inconn].get_flow()
o = self.outl[outconn].get_flow()
if abs(i[0]) < 1e-4:
return i[1] - o[1]
else:
v_i = v_mix_ph(i, T0=self.inl[inconn].T.val_SI)
v_o = v_mix_ph(o, T0=self.outl[outconn].T.val_SI)
return (data.val - (i[1] - o[1]) * np.pi ** 2 /
(8 * abs(i[0]) * i[0] * (v_i + v_o) / 2))
def zeta_func_doc(self, label, zeta='', inconn=0, outconn=0):
r"""
Calculate residual value of :math:`\zeta`-function.
Parameters
----------
zeta : str
Component parameter to evaluate the zeta_func on, e.g.
:code:`zeta1`.
inconn : int
Connection index of inlet.
outconn : int
Connection index of outlet.
Returns
-------
residual : float
Residual value of function.
"""
inl = r'_\mathrm{in,' + str(inconn + 1) + r'}'
outl = r'_\mathrm{out,' + str(outconn + 1) + r'}'
latex = (
r'0 = \begin{cases}' + '\n' +
r'p' + inl + r'- p' + outl + r' & |\dot{m}' + inl +
r'| < \unitfrac[0.0001]{kg}{s} \\' + '\n' +
r'\frac{\zeta}{D^4}-\frac{(p' + inl + r'-p' + outl + r')'
r'\cdot\pi^2}{8\cdot\dot{m}' + inl + r'\cdot|\dot{m}' + inl +
r'|\cdot\frac{v' + inl + r' + v' + outl + r'}{2}}' +
r'& |\dot{m}' + inl + r'| \geq \unitfrac[0.0001]{kg}{s}' + '\n'
r'\end{cases}'
)
return generate_latex_eq(self, latex, label)
def zeta_deriv(self, increment_filter, k, zeta='', inconn=0, outconn=0):
r"""
Calculate partial derivatives of zeta function.
Parameters
----------
increment_filter : ndarray
Matrix for filtering non-changing variables.
k : int
Position of equation in Jacobian matrix.
zeta : str
Component parameter to evaluate the zeta_func on, e.g.
:code:`zeta1`.
inconn : int
Connection index of inlet.
outconn : int
Connection index of outlet.
"""
data = self.get_attr(zeta)
f = self.zeta_func
outpos = self.num_i + outconn
if not increment_filter[inconn, 0]:
self.jacobian[k, inconn, 0] = self.numeric_deriv(
f, 'm', inconn, zeta=zeta, inconn=inconn, outconn=outconn)
if not increment_filter[inconn, 2]:
self.jacobian[k, inconn, 1] = self.numeric_deriv(
f, 'p', inconn, zeta=zeta, inconn=inconn, outconn=outconn)
if not increment_filter[inconn, 2]:
self.jacobian[k, inconn, 2] = self.numeric_deriv(
f, 'h', inconn, zeta=zeta, inconn=inconn, outconn=outconn)
if not increment_filter[outpos, 1]:
self.jacobian[k, outpos, 1] = self.numeric_deriv(
f, 'p', outpos, zeta=zeta, inconn=inconn, outconn=outconn)
if not increment_filter[outpos, 2]:
self.jacobian[k, outpos, 2] = self.numeric_deriv(
f, 'h', outpos, zeta=zeta, inconn=inconn, outconn=outconn)
# custom variable zeta
if data.is_var:
pos = self.num_i + self.num_o + data.var_pos
self.jacobian[k, pos, 0] = self.numeric_deriv(
f, zeta, 2, zeta=zeta, inconn=inconn, outconn=outconn)
| 1.859375 | 2 |
ossdbtoolsservice/language/contracts/status_changed_notification.py | DaeunYim/pgtoolsservice | 33 | 12758492 | <gh_stars>10-100
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""This module holds contracts for the status change notification"""
from ossdbtoolsservice.serialization import Serializable
class StatusChangeParams(Serializable):
def __init__(self, owner_uri=None, status=None):
self.owner_uri: str = owner_uri
self.status: str = status
STATUS_CHANGE_NOTIFICATION = 'textDocument/statusChanged'
| 2.40625 | 2 |
itests/fe/audits_test.py | TimYagan/merou | 0 | 12758493 | <reponame>TimYagan/merou<gh_stars>0
from datetime import datetime, timedelta
from itests.fixtures import async_server # noqa: F401
from itests.pages.audits import AuditsCreatePage
from itests.pages.groups import GroupViewPage
from plugins import group_ownership_policy
from tests.fixtures import ( # noqa: F401
fe_app as app,
graph,
groups,
permissions,
service_accounts,
session,
standard_graph,
users,
)
from tests.url_util import url
from tests.util import add_member
def test_remove_last_owner_via_audit(async_server, browser, users, groups, session): # noqa: F811
future = datetime.utcnow() + timedelta(1)
add_member(groups["auditors"], users["<EMAIL>"], role="owner")
add_member(groups["audited-team"], users["<EMAIL>"], role="owner", expiration=future)
session.commit()
fe_url = url(async_server, "/audits/create")
browser.get(fe_url)
page = AuditsCreatePage(browser)
page.set_end_date(future.strftime("%m/%d/%Y"))
page.submit()
fe_url = url(async_server, "/groups/audited-team")
browser.get(fe_url)
page = GroupViewPage(browser)
audit_modal = page.get_audit_modal()
audit_modal.find_member_row("<EMAIL>").set_audit_status("remove")
audit_modal.confirm()
assert page.current_url.endswith("/groups/audited-team")
assert page.has_text(group_ownership_policy.EXCEPTION_MESSAGE)
| 1.84375 | 2 |
02 Sequence Types/rangetype.py | Himanshu44626748/Learn-Python | 2 | 12758494 | r = range(5) # Counts from 0 to 4
for i in r:
print(i)
r = range(1,6) # Counts from 1 to 5
for i in r:
print(i)
# Step Value
r = range(1,15,3) # Counts from 1 to 15 with a gap of '3', thereby, counting till '13' only as 16 is not in the range
for i in r:
print(i) | 4.0625 | 4 |
EM/EM.py | AutuanLiu/Machine-Learning-on-docker | 11 | 12758495 | """EM 算法的实现
"""
import copy
import math
import matplotlib.pyplot as plt
import numpy as np
isdebug = True
# 指定k个高斯分布参数,这里指定k=2。注意2个高斯分布具有相同均方差Sigma,均值分别为Mu1,Mu2。
def init_data(Sigma, Mu1, Mu2, k, N):
global X
global Mu
global Expectations
X = np.zeros((1, N))
Mu = np.random.random(k)
Expectations = np.zeros((N, k))
for i in range(0, N):
if np.random.random(1) > 0.5:
X[0, i] = np.random.normal(Mu1, Sigma)
else:
X[0, i] = np.random.normal(Mu2, Sigma)
if isdebug:
print("***********")
print("初始观测数据X:")
print(X)
# EM算法:步骤1,计算E[zij]
def e_step(Sigma, k, N):
global Expectations
global Mu
global X
for i in range(0, N):
Denom = 0
Numer = [0.0] * k
for j in range(0, k):
Numer[j] = math.exp((-1 / (2 * (float(Sigma**2)))) * (float(X[0, i] - Mu[j]))**2)
Denom += Numer[j]
for j in range(0, k):
Expectations[i, j] = Numer[j] / Denom
if isdebug:
print("***********")
print("隐藏变量E(Z):")
print(Expectations)
# EM算法:步骤2,求最大化E[zij]的参数Mu
def m_step(k, N):
global Expectations
global X
for j in range(0, k):
Numer = 0
Denom = 0
for i in range(0, N):
Numer += Expectations[i, j] * X[0, i]
Denom += Expectations[i, j]
Mu[j] = Numer / Denom
# 算法迭代iter_num次,或达到精度Epsilon停止迭代
def run(Sigma, Mu1, Mu2, k, N, iter_num, Epsilon):
init_data(Sigma, Mu1, Mu2, k, N)
print("初始<u1,u2>:", Mu)
for i in range(iter_num):
Old_Mu = copy.deepcopy(Mu)
e_step(Sigma, k, N)
m_step(k, N)
print(i, Mu)
if sum(abs(Mu - Old_Mu)) < Epsilon:
break
if __name__ == '__main__':
sigma = 6 # 高斯分布具有相同的方差
mu1 = 40 # 第一个高斯分布的均值 用于产生样本
mu2 = 20 # 第二个高斯分布的均值 用于产生样本
k = 2 # 高斯分布的个数
N = 1000 # 样本个数
iter_num = 1000 # 最大迭代次数
epsilon = 0.0001 # 当两次误差小于这个时退出
run(sigma, mu1, mu2, k, N, iter_num, epsilon)
plt.hist(X[0, :], 50)
plt.show()
| 3.703125 | 4 |
stylobate_mgmt/commands/stop.py | digitaltembo/stylobate-mgmt | 0 | 12758496 | import os
from .utils import docker, Command
class Stop(Command):
'''
stylo stop --back-end/-b
--front-end/-f
--docker-dev/-d
--docker-prod/-D
'''
name = 'stop'
description = "Stops a currently running background process"
def add_args(self, parser):
parser.add_argument(
'--docker-dev', '-d',
action='store_true',
help='Stops the dev docker container'
)
parser.add_argument(
'--docker-prod', '-D',
action='store_true',
help='Stops the production docker container'
)
parser.add_argument(
'--docker-ssl', '-s',
action='store_true',
help='Stops the ssl docker container'
)
def main(self, args):
if not (args.docker_dev or args.docker_prod or args.docker_ssl):
self.print('One of --docker-dev, --docker-prod, or --docker-ssl must be specified')
return
docker_env = docker.get_env(args)
self.stop_docker(docker_env)
def stop_docker(self, docker_env):
self.execute('docker-compose -f {} down'.format(docker_env))
| 2.65625 | 3 |
qaz/application/update.py | samueljsb/qaz | 0 | 12758497 | from qaz import settings
from qaz.managers import git, shell
def update_qaz() -> None:
"""
Update QAZ.
This pulls the latest version of QAZ and installs the necessary Python dependencies
for this tool.
"""
root_dir = settings.get_root_dir()
git.pull(root_dir)
shell.run(
"poetry install --no-dev --remove-untracked",
cwd=root_dir,
env=dict(VIRTUAL_ENV=str(root_dir / ".venv")),
)
| 1.625 | 2 |
paratransit/api/migrations/0012_auto_20170511_2201.py | NiJeLorg/paratransit_api | 0 | 12758498 | <reponame>NiJeLorg/paratransit_api<filename>paratransit/api/migrations/0012_auto_20170511_2201.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-11 22:01
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0011_dropoff_locations_pickup_locations'),
]
operations = [
migrations.RenameField(
model_name='dropoff_locations',
old_name='p_lat',
new_name='d_lat',
),
migrations.RenameField(
model_name='dropoff_locations',
old_name='p_lng',
new_name='d_lng',
),
migrations.RenameField(
model_name='dropoff_locations',
old_name='point',
new_name='the_geom',
),
migrations.RenameField(
model_name='pickup_locations',
old_name='point',
new_name='the_geom',
),
]
| 1.679688 | 2 |
tests/dataset/test_features.py | sunlanchang/Automatic-Speech-Recognition-with-Vue | 0 | 12758499 | import os
import h5py
import pytest
import numpy as np
import pandas as pd
import automatic_speech_recognition as asr
@pytest.fixture
def dataset() -> asr.dataset.Features:
file_path = 'test.h5'
reference = pd.DataFrame({
'path': [f'dataset/{i}' for i in range(10)],
'transcript': [f'transcript-{i}' for i in range(10)],
})
with h5py.File(file_path, 'w') as store:
for path in reference.path:
store[path] = np.random.random([20, 10])
with pd.HDFStore(file_path, mode='r+') as store:
store['references'] = reference
return asr.dataset.Features.from_hdf(file_path, batch_size=3)
def test_get_batch(dataset):
batch_audio, transcripts = dataset.get_batch(index=1)
a, b, c = transcripts
assert b == 'transcript-4'
a, b, c = batch_audio
assert b.shape == (20, 10)
# Remove store at the end of tests
os.remove('test.h5')
| 2.359375 | 2 |
python/paddle/fluid/tests/unittests/test_fleet_elastic_collective.py | L-Net-1992/Paddle | 0 | 12758500 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import time
import json
import unittest
import argparse
import tempfile
import traceback
from warnings import catch_warnings
from paddle.distributed.fleet.elastic.collective import CollectiveLauncher
from paddle.distributed.fleet.launch import launch_collective
fake_python_code = """
print("test")
"""
class TestCollectiveLauncher(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
self.code_path = os.path.join(self.temp_dir.name,
"fake_python_for_elastic.py")
with open(self.code_path, "w") as f:
f.write(fake_python_code)
def tearDown(self):
self.temp_dir.cleanup()
def test_launch(self):
class Argument:
elastic_server = "127.0.0.1:2379"
job_id = "test_job_id_123"
np = "1"
gpus = "0"
nproc_per_node = 1
host = None
curr_host = None
ips = "127.0.0.1"
scale = None
force = None
backend = 'gloo'
enable_auto_mapping = False
run_mode = "cpuonly"
servers = None
rank_mapping_path = None
training_script = self.code_path
training_script_args = ["--use_amp false"]
log_dir = None
args = Argument()
launch = CollectiveLauncher(args)
try:
args.backend = "gloo"
launch.launch()
launch.stop()
except Exception as e:
pass
try:
args.backend = "gloo"
launch_collective(args)
except Exception as e:
pass
def test_stop(self):
class Argument:
elastic_server = "127.0.0.1:2379"
job_id = "test_job_id_123"
np = "1"
gpus = "0"
nproc_per_node = 1
host = None
curr_host = None
ips = "127.0.0.1"
scale = None
force = None
backend = 'gloo'
enable_auto_mapping = False
run_mode = "cpuonly"
servers = None
rank_mapping_path = None
training_script = self.code_path
training_script_args = ["--use_amp false"]
log_dir = None
args = Argument()
try:
launch = CollectiveLauncher(args)
launch.tmp_dir = tempfile.mkdtemp()
launch.stop()
except Exception as e:
pass
if __name__ == "__main__":
unittest.main()
| 1.9375 | 2 |
control/test/hexitec/package/test_rdma.py | stfc-aeg/hexitec-detector | 1 | 12758501 | <reponame>stfc-aeg/hexitec-detector
"""Test Cases for the Hexitec RdmaUDP in hexitec.
<NAME>, STFC Detector Systems Software Group
"""
from socket import error as socket_error
from hexitec.RdmaUDP import RdmaUDP
import pytest
import struct
import sys
if sys.version_info[0] == 3: # pragma: no cover
from unittest.mock import Mock, patch
else: # pragma: no cover
from mock import Mock, patch
class RdmaUDPTestFixture(object):
"""Test fixture class."""
def __init__(self):
"""Initialise object."""
self.master_ip = "127.0.0.1"
self.master_port = 8888
self.target_ip = "127.0.0.2"
self.fake_ip = "172.16.17.32"
self.target_port = 8080
self.UDPMTU = 8000
with patch("hexitec.RdmaUDP.socket"):
self.rdma = RdmaUDP(self.master_ip, self.master_port,
self.master_ip, self.master_port,
self.target_ip, self.target_port,
self.target_ip, self.target_port,
UDPMTU=self.UDPMTU)
self.tx_old_sock = self.rdma.txsocket
self.rx_old_sock = self.rdma.rxsocket
self.tx_socket = Mock()
self.rx_socket = Mock()
self.return_data = 256
return_struct = struct.pack('=IIIIQQQQQ', 5, 7, 5, self.return_data, 0, 0, 0, 0, 10)
self.rx_socket.recv = Mock(return_value=return_struct)
self.rdma.txsocket = self.tx_socket
self.rdma.rxsocket = self.rx_socket
self.rdma.ack = True
self.rdma.setDebug()
@pytest.fixture
def test_rdma():
"""Test Fixture for testing the RdmaUDP."""
test_rdma = RdmaUDPTestFixture()
yield test_rdma
class TestRdmaUDP():
"""Test suit."""
def test_init(self, test_rdma):
"""Tests that the sockets of the RDMA were bound correctly."""
test_rdma.rx_old_sock.bind.assert_called_with(
(test_rdma.master_ip, test_rdma.master_port)
)
test_rdma.tx_old_sock.bind.assert_called_with(
(test_rdma.master_ip, test_rdma.master_port)
)
def test_connect_tx_socket_fails(self, test_rdma):
"""Test unavailable IP will throw socket error."""
with patch('hexitec.HexitecFem.RdmaUDP') as rdma_mock:
rdma_mock.side_effect = socket_error()
with pytest.raises(socket_error) as exc_info:
self.rdma = RdmaUDP(test_rdma.fake_ip, test_rdma.master_port,
test_rdma.target_ip, test_rdma.master_port,
test_rdma.target_ip, test_rdma.target_port,
test_rdma.target_ip, test_rdma.target_port,
UDPMTU=test_rdma.UDPMTU)
error_message = "[Errno 99] Cannot assign requested address"
e = "Transmit socket IP:Port {}:8888 {}".format(test_rdma.fake_ip, error_message)
assert exc_info.type is socket_error
assert exc_info.value.args[0] == e
def test_connect_rx_socket_fails(self, test_rdma):
"""Test unavailable IP will throw socket error."""
with patch('hexitec.HexitecFem.RdmaUDP') as rdma_mock:
rdma_mock.side_effect = socket_error()
with pytest.raises(socket_error) as exc_info:
self.rdma = RdmaUDP(test_rdma.master_ip, test_rdma.master_port,
test_rdma.fake_ip, test_rdma.master_port,
test_rdma.target_ip, test_rdma.target_port,
test_rdma.target_ip, test_rdma.target_port,
UDPMTU=test_rdma.UDPMTU)
error_message = "[Errno 99] Cannot assign requested address"
e = "Receive socket IP:Port {}:8888 {}".format(test_rdma.fake_ip, error_message)
assert exc_info.type is socket_error
assert exc_info.value.args[0] == e
def test_read(self, test_rdma):
"""Test that the read method calls the relevant socket methods correctly."""
test_address = 256
read_command = struct.pack('=BBBBIQBBBBIQQQQQ', 1, 0, 0, 3, test_address,
0, 9, 0, 0, 255, 0, 0, 0, 0, 0, 0)
data = test_rdma.rdma.read(test_address)
test_rdma.tx_socket.sendto.assert_called_with(read_command,
(test_rdma.target_ip, test_rdma.target_port))
test_rdma.rx_socket.recv.assert_called_with(test_rdma.UDPMTU)
assert data == test_rdma.return_data
def test_write(self, test_rdma):
"""Test that the write method calls the relevant socket methods correctly."""
test_address = 256
test_data = 1024
write_command = struct.pack('=BBBBIQBBBBIQQQQQ', 1, 0, 0, 2, test_address,
test_data, 9, 0, 0, 255, 0, 0, 0, 0, 0, 0)
test_rdma.rdma.write(test_address, test_data)
test_rdma.tx_socket.sendto.assert_called_with(write_command,
(test_rdma.target_ip, test_rdma.target_port))
assert test_rdma.rdma.ack is True
def test_close(self, test_rdma):
"""Test sockets closed."""
test_rdma.rdma.close()
# TODO: rdma Mock object, amend to check sockets shut?
# assert test_rdma.rdma.rxsocket._closed is True
# assert test_rdma.rdma.txsocket._closed is True
| 2.15625 | 2 |
simplified/analyze_all.py | iorodeo/photogate_test | 0 | 12758502 | #!/usr/bin/env python
import sys
import scipy
import pylab
from analyze_trial import get_period
GRAV_CONST = 9.81
data_files = sys.argv[1:]
period_vals = []
length_vals = []
# Read data file and compute periods
for file_name in data_files:
print 'analyzing: ', file_name
pend_len, period = get_period(file_name)
print ' length: ', pend_len
print ' period: ', period
period_vals.append(period)
length_vals.append(pend_len)
period_vals = scipy.array(period_vals)
length_vals = scipy.array(length_vals)
length_max = length_vals.max()
length_min = length_vals.min()
length_model = scipy.linspace(length_min, length_max, 100)
period_model = 2.0*scipy.pi*scipy.sqrt(length_model/GRAV_CONST)
pylab.plot(length_model, period_model, 'b')
pylab.plot(length_vals, period_vals, 'or')
pylab.xlabel('length (m)')
pylab.ylabel('period (s)')
pylab.grid('on')
pylab.show()
| 2.609375 | 3 |
HTTPServer.py | okumusg/python | 0 | 12758503 | <filename>HTTPServer.py
'This is a simple http server'
#!/usr/bin/python
import SimpleHTTPServer
import SocketServer
port = 10000 # Server port
handler = SimpleHTTPServer.SimpleHTTPRequestHandler # Creating HTTP handler instance
httpd = SocketServer.TCPServer(("",port),handler) # Creating a TCP Server with HTTP handler
print "Http Server serving at ", port
httpd.serve_forever()
| 3.1875 | 3 |
equip/analysis/graph/io.py | neuroo/equip | 102 | 12758504 | <reponame>neuroo/equip
# -*- coding: utf-8 -*-
"""
equip.analysis.graph.io
~~~~~~~~~~~~~~~~~~~~~~~
Outputs the graph structures
:copyright: (c) 2014 by <NAME> (@rgaucher)
:license: Apache 2, see LICENSE for more details.
"""
from .graphs import DiGraph, Tree
DOT_STYLE = """
rankdir=TD; ordering=out;
graph[fontsize=10 fontname="Verdana"];
color="#efefef";
node[shape=box style=filled fontsize=8 fontname="Verdana" fillcolor="#efefef"];
edge[fontsize=8 fontname="Verdana"];
"""
class DotConverter(object):
def __init__(self, graph):
self.g = graph
self.buffer = ''
self.node_ids = {}
@staticmethod
def process(graph):
converter = DotConverter(graph)
converter.run()
return converter.buffer
def run(self):
self.buffer += 'digraph G {'
self.buffer += DOT_STYLE
if isinstance(self.g, DiGraph):
for edge in self.g.edges:
self.add_edge(edge)
elif isinstance(self.g, Tree):
root = self.g.root
worklist = [root]
while worklist:
current = worklist.pop(0)
if current.has_children():
num_children = current.num_children()
i = 0
while i < num_children:
child = current.children[i]
if child is None:
i += 1
continue
self.add_tree_edge(current, child)
worklist.insert(0, child)
i += 1
else:
nid = self.get_node_id(current)
self.buffer += '}\n'
def add_edge(self, edge):
labels = ''
if edge.kind is not None:
data = '' if edge.data is None else str(edge.data)
labels = '[label="%s - %s"]' % (edge.kind, data)
nid1 = self.get_node_id(edge.source)
nid2 = self.get_node_id(edge.dest)
self.buffer += '%s -> %s %s;\n' % (nid1, nid2, labels)
def add_tree_edge(self, node1, node2):
nid1 = self.get_node_id(node1)
nid2 = self.get_node_id(node2)
self.buffer += '%s -> %s;\n' % (nid1, nid2)
def get_node_id(self, node):
if node not in self.node_ids:
self.node_ids[node] = 'node_%d' % node.gid
self.add_node(node, self.node_ids[node])
return self.node_ids[node]
def add_node(self, node, node_id):
label = ''
if node.data is not None:
node_kind = ('%s - ' % node.kind) if node.kind is not None else ''
label = '[label="Node%d - %s%s"]' % (node.gid, node_kind, node.data)
self.buffer += '%s %s;\n' % (node_id, label)
| 2.359375 | 2 |
jumpscale/data/encryption/exceptions.py | zaibon/js-ng | 2 | 12758505 | from jumpscale.core.exceptions import JSException
class FailedChecksumError(JSException):
pass
| 1.171875 | 1 |
www/www/settings.py | mattvenn/cursivedata | 1 | 12758506 | <reponame>mattvenn/cursivedata
# Django settings for testsite project.
from os import path
import sys
import socket
hostname = socket.gethostname()
# Try and import pycairo or fallback to cairocffi and install as cairo
try:
import cairo
except ImportError:
import cairocffi
cairocffi.install_as_pycairo()
from django.core.urlresolvers import reverse_lazy
PROJECT_ROOT = path.dirname(path.dirname(__file__))
LOGIN_REDIRECT_URL = '/'
EMAIL_HOST = 'localhost'
# debug on dev machines
if hostname == 'vennzaa1.miniserver.com':
DEBUG = False
else:
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('<NAME>', '<EMAIL>'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'cursivedata',
'USER': 'cursivedata',
'HOST': 'localhost',
'PASSWORD': '<PASSWORD>',
},
'sqllite': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': path.join(PROJECT_ROOT, 'db', 'www.sqlite'),
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Greenwich'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
import warnings
warnings.filterwarnings(
'error', r"DateTimeField received a naive datetime",
RuntimeWarning, r'django\.db\.models\.fields')
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = '/home/polarsite/polargraphenergymonitor/testsite/media/admin/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/media/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'i@))&55xb)_981^88xtxtd6bds+bn_be&<KEY>'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages"
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'www.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'www.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
path.join(PROJECT_ROOT, 'www', 'templates'),
"cursivedata/templates"
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Third party libraries
'tastypie',
'django_nose',
'south',
# Our apps
'landing',
'cursivedata',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
if DEBUG:
default_logger = {
'handlers': ['console','file'],
'level': 'DEBUG',
}
else:
default_logger = {
'handlers': ['file'],
'level': 'INFO',
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '[%(asctime)s] [%(levelname)s] %(process)d %(module)s : %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'stream': sys.stdout,
'formatter': 'verbose',
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'log/info.log',
'formatter': 'verbose',
},
},
'loggers': {
'endpoint': default_logger,
'api': default_logger,
'graphics': default_logger,
'data': default_logger,
'generator': default_logger,
'views': default_logger,
'pipeline': default_logger,
},
}
LOGIN_URL = reverse_lazy('login')
LOGOUT_URL = reverse_lazy('logout')
| 2.296875 | 2 |
sentiment/absa/aspect_semeval.py | uZeroJ/nlps | 1 | 12758507 | """
This is an implementation of paper
"Attention-based LSTM for Aspect-level Sentiment Classification" with Keras.
Based on dataset from "SemEval 2014 Task 4".
"""
import os
from time import time
# TODO, Here we need logger!
import numpy as np
from lxml import etree
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Input, Embedding, LSTM, Dense
from keras.layers import RepeatVector, Dot, Concatenate, Reshape
from keras.activations import softmax
from keras.models import Model, load_model
from keras import regularizers, initializers, optimizers
from keras.layers import Lambda
import keras.backend as K
TEXT_KEY = 'text'
TERM_KEY = 'aspect_terms'
CATEGORY_KEY = 'aspect_categories'
I_TEXT, I_ASPECT, I_POLARITY = 0, 1, 2
# Correspond to settings in paper.
EMBEDDING_DIM = 300
ASPECT_EMBEDDING_DIM = 300
HIDDEN_LAYER_SIZE = 300
# Hyper-parameters for training.
L2_REGULARIZATION = 0.001
MOMENTUM = 0.9
LEARNING_RATE = 0.001
MINI_BATCH_SIZE = 25
RANDOM_UNIFORM = .01
POLARITY_TO_INDEX = {
'positive': 0,
'negative': 1,
'neutral': 2,
'conflict': 3
}
def extract_data(data_file='Restaurants_Train_v2.xml'):
"""
Extract train data from xml file provided buy 'SemEval 2014 Task 4."
:param file: XML file that contains training data.
:return: A list of dictionaries of training data with TEXT_KEY, 'aspect
terms' and 'aspect categories'.
"""
tree = etree.parse(data_file)
sents_root = tree.getroot()
data = []
def get_content(sent):
"""
Get all contents from a single 'sentence node', including TEXT_KEY,
values of 'aspect terms' and 'aspect categories'.
:param sent: a single xml node of sentence.
:type: _Element
:return: A dictionary of contents.
"""
content = {}
# We assume that there is must a text node here.
content[TEXT_KEY] = sent.xpath(TEXT_KEY)[0].text
terms = sent.xpath('aspectTerms')
if terms:
# As there is only one element of 'aspectTerms'.
# And we only need the first two values, 'aspect' and 'polarity'.
content[TERM_KEY] = list(map(lambda term: term.values()[:2],
terms[0].iterchildren()))
else:
pass
categories = sent.xpath('aspectCategories')
if categories:
content[CATEGORY_KEY] = list(
map(lambda category: category.values(),
categories[0].iterchildren()))
else:
pass
return content
for sent in sents_root.iterchildren():
data.append(get_content(sent))
return data
def check_absent(data):
"""
Checking absent 'aspect terms' or 'aspect categories'.
And check if there is sentence missing both 'terms' and 'categories'.
:param data: dataset with all contents. And the max length of all sentence.
:type: list of dictionary.
:return: sentence indices that with absent terms, categories and flag of
both missing as well as their count separately in tuple.
:type: tuple of (list, list, boolean)
"""
exist_both_missing = False
term_absent_indices = []
term_absent_cnt = 0
category_absent_indices = []
category_absent_cnt = 0
max_len = 0
for idx, sent in enumerate(data):
max_len = max(len(sent[TEXT_KEY]), max_len)
term_absent = TERM_KEY not in sent.keys()
category_absent = CATEGORY_KEY not in sent.keys()
if term_absent and category_absent:
exist_both_missing = True
if term_absent:
term_absent_indices.append(idx)
term_absent_cnt += 1
if category_absent:
category_absent_indices.append(idx)
category_absent_cnt += 1
return (term_absent_indices, term_absent_cnt,
category_absent_indices, category_absent_cnt,
exist_both_missing, max_len)
def combine_data(data, mess=True, replace_space=True, replace_space_char='_'):
"""
If `mess` is True, means we would mess all data together.
Combine text with all aspects related to it, both aspect
terms and aspect categories. And mess them up.
But if `mess` is False. we will combined TEXT_KEY and aspect separately
with 'terms' or 'categories', and return them as tuple.
And also return the max length of sentence per term or category
if `mess` is True or separate max length if `mess` is False.
:param data: all data with TEXT_KEY and lists of 'aspect terms' and
'categories'.
:return: all combined data or combined data with 'aspect terms' and
'categories' separately along with their max length or in all.
"""
term_data, category_data = [], []
term_max_len, category_max_len = 0, 0
# TODO, How do we treat multi-word token as aspect term?
# 1. take whole as one token an replace space with other mask.
# 2. split into multiple tokens and average all embeddings.
# 3. only take one word into consideration.
# Note for aspect terms, it could contains spaces in the word, so should
# not use space to split tokenizer, and take all as one token.
# And also, there are other special characters in the phrase, like '-'.
# They should be keep.
for sent in data:
text = sent[TEXT_KEY]
is_term_exist = TERM_KEY in sent.keys()
is_category_exist = CATEGORY_KEY in sent.keys()
if is_term_exist:
term_max_len = max(term_max_len, len(sent[TEXT_KEY]))
for term, polarity in sent[TERM_KEY]:
if replace_space:
term = term.replace(' ', replace_space_char)
term_data.append([text, term, polarity])
if is_category_exist:
category_max_len = max(category_max_len, len(sent[TEXT_KEY]))
for category, polarity in sent[CATEGORY_KEY]:
if replace_space:
category = category.replace(' ', replace_space_char)
category_data.append([text, category, polarity])
# print(len(term_data), len(category_data))
if mess:
max_len = max(term_max_len, category_max_len)
term_data.extend(category_data)
return term_data, max_len
else:
return (term_data, term_max_len), (category_data, category_max_len)
def convert_data(data, max_len=None, with_label=True, extra_data=False):
"""
Convert data to tuples of (word_vectors, aspect_indices, polarity) to
word indices sequences and labels to one hot. In order to lookup in
embedding layer.
And convert polarity to class identifier, as defined by default in
polarity to index.
NOTE: keep in mind to match label and 'text' and 'aspect'!
:param data: List of data with element of (text, aspect, polarity).
:param word_vectors: Word Vector lookup table.
:param with_label: Whether it is training data with label or
test/customized data without label.
:return: Arrays contain (word vectors, aspect indices, polarity class
index), and each of them is a numpy array, along with the word to index
dictionary.
:type: numpy array.
"""
# Set indicator for 'text', 'aspect' and 'polarity(label)'.
converted_data, lookups = [], []
texts, aspects, labels = [], [], []
# TODO, we should count max length here?!
for d in data:
texts.append(d[I_TEXT])
aspects.append(d[I_ASPECT])
if with_label:
labels.append(d[I_POLARITY])
def convert_to_indices(examples, max_len=None, need_tokenizer=False,
customized_filter='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n'):
"""
Fit and convert word to indices sequences and word index lookup, and if
needed, return tokenizer as well.
:param examples: list of words or sentences.
:param max_len: the max length of indices sequences.
:param need_tokenizer: return tokenizer or not.
:type: boolean
:return: (indices sequence, word index lookup, <tokenizer>)
:type: tuple
"""
tokenizer = Tokenizer(filters=customized_filter)
tokenizer.fit_on_texts(examples)
seqs = tokenizer.texts_to_sequences(examples)
word_idx = tokenizer.word_index
# TODO, do we need to pad, if yes, 'pre' or 'post'?
if max_len:
seqs = pad_sequences(seqs, maxlen=max_len)
if need_tokenizer:
return seqs, word_idx, tokenizer
else:
return seqs, word_idx
text_seqs, text_word_idx = convert_to_indices(texts, max_len)
converted_data.append(np.asarray(text_seqs, dtype='int32'))
lookups.append(text_word_idx)
# For aspect term maybe we should not use tokenizer and filter.
aspects_seqs, aspects_idx = convert_to_indices(
aspects,
# TODO, should use less filter.
customized_filter='#$%&/:;<=>?@[\\]^`{|}~\t\n')
converted_data.append(np.asarray(aspects_seqs, dtype='int32'))
lookups.append(aspects_idx)
if with_label:
labels_seqs, labels_idx = convert_to_indices(labels)
# Normalize label sequences as we only need '4' classes and do not need
# extra class for 'other'.
labels_arr = np.asarray(labels_seqs, dtype='int') - 1
labels_one_hot = to_categorical(labels_arr) # aspects_seqs,
# [:, np.newaxis],
converted_data.append(labels_one_hot)
lookups.append(labels_idx)
# print(aspects_seqs)
# # Preprocessing text without max number of words.
# text_tokenizer = Tokenizer()
# text_tokenizer.fit_on_texts(texts)
# text_seqs = text_tokenizer.texts_to_sequences(texts)
# text_word_idx = text_tokenizer.word_index
# # Just get indices of words, and does not categorize it as we won't
# # multiply one-hot vector in practice as it is computation costly.
# # Instead we just lookup with embedding layer.
# text_data = pad_sequences(text_seqs, maxlen=max_len)
#
# # Preprocessing aspects.
# # The same as word in text, it will be lookup in embedding layer.
# aspects_tokenizer = Tokenizer()
# aspects_tokenizer.fit_on_texts(aspects)
# aspects_seqs = aspects_tokenizer.texts_to_sequences(aspects)
# aspects_idx = aspects_tokenizer.word_index
#
# # Processing labels
# # Convert labels from words into indices and then to one-hot categorical
# # indices.
# labels_tokenizer = Tokenizer()
# labels_tokenizer.fit_on_texts(labels)
# labels_seqs = labels_tokenizer.texts_to_sequences(labels)
# labels_idx = labels_tokenizer.
return converted_data, lookups
def load_w2v(idxes, emb_file, save_to_file=None):
"""
Load pre-trained embedding and match words in training data to form a
small set of word embedding matrix with OOV with all '0's.
NOTE: Keras tokenizer.word_index start from 1, in order to use '0'
padding in pad_sequence and mask_zero in embedding layer and following
layer.
:param idxes: the word loopup dictionary of word indices.
:param emb_file: pre-trained embedding file.
:return: word embedding matrix fit for the training data.
"""
# Only need the lookup for 'text'.
idx = idxes[I_TEXT]
# Initial word embedding matrix with all '0's.
# TODO, here we could set embedding dimesion automatically.
emb_matrix = np.zeros((len(idx) + 1, EMBEDDING_DIM))
# Timing it.
start_time = time()
with open(emb_file) as emb:
for line in emb:
pieces = line.strip().split()
word, coef = pieces[0].strip(), pieces[1:]
begin_idx = 0
for elem_idx, elem in enumerate(coef):
# In case there is space in the word,
# continuously test if the string could be interpret as float,
# if yes, it means this piece element is the beginning of the
# coefficient and if no, then append to word as part of the
# token.
try:
# Test if an element in coefficient is an actual
# coefficient of a part of key token.
float(elem)
# Record begin index of actual coefficient.
begin_idx = elem_idx + 1
# Only break when we find the begin index of actual
# coefficient.
break
except Exception as e:
word += elem
# print(e)
# TODO, we could record the trail and error in log.
# print("Filed to load record with word: '{}' and "
# "coefficient: {}".format(word, coef))
# print(word)
coef = np.asarray(pieces[begin_idx:], dtype=np.float32)
if word in idx.keys():
# Lookup the indices(index) of word and set the corresponding
# vector to the one in pre-trained embedding matrix.
emb_matrix[idx[word]] = coef
print('Loaded word embedding matrix within {}'.format(
time() - start_time))
# Save loaded subset of word embedding into files.
if save_to_file:
np.save(save_to_file, emb_matrix)
return emb_matrix
def build_net(data, max_len, w2is, atae=True, extra_outputs=True,
emb_mtrx_file=None, save_to_file=None):
"""
Build ATAE-LSTM mentioned in paper 'Attention-based LSTM for Aspect-level
Sentiment Classification', with uniform randomly initialized aspect
embedding and word embedding subset according training data and given
pre-trained embedding file.
Adapt 'inter' attention before do multiple classes classification by
softmax, which introduce aspect-level attention as part of the encoding
of source sentence.`
:param data: Indices of training data including (sentences, aspect,
polarity(one-hot label))
:param max_len: the max length of sentence as it has been padding with
'0's and need to set for the input shape with mini-batch.
:param w2is: Index lookup table of components above.
:param atae: If 'False' then only use 'AE'.
:param extra_outputs: return extra outputs like attention weights,
aspect embeddings or so.
:param emb_mtrx_file: Pre-saved embedding matrix corresponding to
training data and given pre-trained embedding. If 'None' is set,
then reload from embedding file.
:param save_to_file: File path to save model, if 'None' is set, then its
a one way training.
:return: Training loss and accuracy for all classes?
"""
# TODO, max length should be fixed.
sents, aspects, labels = data
sents_idx, aspects_idx, _ = w2is
emb_mtrx = np.load(emb_mtrx_file)
# Input of sentences.
sents_tensor_input = Input(shape=(sents.shape[1],), dtype='int32')
# Do not retrain embedding of sentences.
sents_tensor = Embedding(len(sents_idx) + 1,
# EMBEDDING_DIM
emb_mtrx.shape[1],
weights=[emb_mtrx],
input_length=max_len,
trainable=False)(sents_tensor_input)
# Input of aspect
# As we use ATAE-LSTM, aspect embedding need to be concated to each time
# steps in sentences.
# Aspect is a single index of integer.
aspects_tensor_input = Input(shape=(1,), dtype='int32')
# Randomly initialize aspect embedding.
aspects_emb_initializer = initializers.RandomUniform(minval=-RANDOM_UNIFORM,
maxval=RANDOM_UNIFORM)
aspects_emb_layer = Embedding(len(aspects_idx) + 1,
ASPECT_EMBEDDING_DIM,
embeddings_initializer=aspects_emb_initializer,
trainable=True,
name='asp_emb_layer')
# In order to get embedding weights.
# aspects_emb_matrix = Lambda(lambda x: x, name='asp_emb_weight')(
# aspects_emb_layer.weights)
aspects_emb = aspects_emb_layer(aspects_tensor_input)
# Here, before repeat we need reshape aspect_tensor act as 'squeeze' with
# the dimension with '1', say Reshape((10, ), input_shape=(1, 10))(...)
# then got keras tensor with shape of (10,), which will then feed into
# `RepeatVector`.
aspects_tensor = Reshape((ASPECT_EMBEDDING_DIM,))(aspects_emb)
# Repeat aspects tensor in order to correspond to the time step of
# sentences, with shape of (max_len, ASPECT_EMBEDDNING_DIM).
# TODO, could use Timedistributed?
aspects_tensor = RepeatVector(max_len)(aspects_tensor)
lstm_input = Concatenate()([sents_tensor, aspects_tensor])
if atae:
lstm_output = LSTM(HIDDEN_LAYER_SIZE, return_sequences=True)(lstm_input)
# Attention with concatenation of sequential output of LSTM and
# aspect embedding.
attention_input = Concatenate()([lstm_output, aspects_tensor])
attention_score = Dense(EMBEDDING_DIM + ASPECT_EMBEDDING_DIM,
use_bias=False,
name='attention_score_1')(attention_input)
# We need an extra `Dense/Activation` layer here for axis related
# softmax with should be align on time step instead the last axis.
attention_weight = Dense(1, use_bias=False,
name='attention_score_2')(attention_score)
attention_weight = Lambda(lambda x: softmax(x, axis=1))(
attention_weight, name='attention_weights')
# permuted_weight = Permute((2, 1))(attention_weight)
# attention_represent = Multiply(name='r')([lstm_output, permuted_weight])
# attention_represent = Multiply(name='r')([lstm_output, attention_weight])
attention_represent = Dot(axes=1, name='r')([lstm_output,
attention_weight])
attention_represent = Reshape((EMBEDDING_DIM,))(attention_represent)
last_hidden = Lambda(lambda tensor: tensor[:, -1, :])(lstm_output)
final_represent = Concatenate(name='final_concatenate')([
attention_represent, last_hidden])
final_represent = Dense(EMBEDDING_DIM, activation='tanh',
use_bias=False, name='final_representation')(
final_represent)
model_output = Dense(labels.shape[1],
activation='softmax',
activity_regularizer=regularizers.l2(
L2_REGULARIZATION),
name='ATAE_LSTM_output')(final_represent)
# outs = [model_output]
# if extra_outputs:
# outs.append(attention_weight)
# TODO, get from model outside
# outs.append(aspects_emb_matrix)
# print(outs)
else:
lstm_output = LSTM(HIDDEN_LAYER_SIZE,
return_sequences=False)(lstm_input)
model_output = Dense(labels.shape[1],
activation='softmax',
name='Simple_AE_LSTM_ouptut')(lstm_output)
# outs = [model_output]
model = Model(inputs=[sents_tensor_input,
aspects_tensor_input],
outputs=model_output)
if save_to_file:
model.save(save_to_file)
return model
def train(data, model, model_optimizer=None, metrics=None, valid_ratio=0.1,
epoch=10, mini_batch=25, save_to_file=None):
"""
:param data: Training data in tuples of lists with form of (sentences,
aspect word, polarity).
:param model: Predefined model generated by `build_net`, if None,
then if will be build with default values.
:param optimizer: Optimizer used to train/compile model. Default is
Adagrad with learning rate as '0.001'.
:param metrics: Metrics are interested in list. If not set then default
is ['accuracy']
:return: None
"""
if not model and not data:
print('Please passed in data and model!')
return
if not metrics:
metrics = ['accuracy']
if not model_optimizer:
model_optimizer = optimizers.Adagrad(lr=0.001)
print("Training Model ...")
print(model.summary())
# print('\t\twith data as')
# print('\t\t{}'.format(check_absent(data)))
print('\t\twith hyper-parametes as')
print('\t\t\tMini-Batch : {}'.format(mini_batch))
print('\t\t\tEpoch : {}'.format(epoch))
model.compile(model_optimizer, 'categorical_crossentropy', metrics=metrics)
model.fit([seqs_data[I_TEXT], seqs_data[I_ASPECT]], seqs_data[I_POLARITY],
mini_batch, epochs=epoch, validation_split=valid_ratio)
if save_to_file:
model.save(save_to_file)
def train_dev_split(data, ratio=0.8, seed=42):
"""
Function to split train and dev set with given ratio.
:param data: whole dataset.
:param ratio: percentage that training data occupied.
:return: tuple of list of (training, dev), and each of them should be
formed as (sentences, aspect word, polarity)
"""
np.random.seed(42)
sents, aspects, labels = data[I_TEXT], data[I_ASPECT], data[I_POLARITY]
idx = np.arange(sents.shape[0])
np.random.shuffle(idx)
sents = sents[idx]
aspects = aspects[idx]
labels = labels[idx]
# Calculate split boundary.
bnd = int(len(idx) * ratio)
train_set = [sents[:bnd], aspects[:bnd], labels[:bnd]]
dev_set = [sents[bnd:], aspects[bnd:], labels[bnd:]]
return train_set, dev_set
def predict(data, lookup, max_len, model=None, save_to_file=None,
extra_output=True):
"""
Predict with given data and model or load model from saved pre-trained
model in file.
:param data: data in tuple or list (sentence, aspect)
:param w2is: index to lookup for predictions.
:param max_len: length to padding to.
:param model: pre-trained model, if not set loaded from file,
and if file for model is also not set, return with error.
:param save_to_file: file saved with model.
:return: prediction
"""
# Omit word index lookups.
converted_data, _ = convert_data(data, max_len, with_label=False)
# print(converted_data)
if not model:
if save_to_file:
model = load_model(save_to_file,
custom_objects={'softmax': softmax})
else:
# TODO, should raise exception?
raise ValueError('Please pass in model instance or '
'the path of file model saved to.')
pred_vec = model.predict([converted_data[I_TEXT],
converted_data[I_ASPECT]])
pred_idx = np.argmax(pred_vec, axis=1)
func_get_label = np.vectorize(lambda p: lookup.get(p))
# print(pred_idx, func_get_label(pred_idx), lookup.get(0))
# Need to add '1' for keras labels start from '0'.
pred = func_get_label(pred_idx + 1)
# if extra_output:
# model.layers
return pred
def get_layer(model, layer_name):
"""
Get layer from model by name or index.
:param layer_name: the name or index of layer.
:return: layer instance extract from model.
"""
if isinstance(layer_name, int):
return model.layers[layer_name]
elif isinstance(layer_name, str):
return model.get_layer(layer_name)
else:
raise ValueError('The layer name should only be `int` or `str`.')
def get_aspect_embeddings(model, layer_name, save_to_file=None):
"""
Get aspect embedding from specific layer with given name.
:param model: the pre-trained model, if not set, reload form saved model
file. If it also failed to load model from file, 'ValueError' will be thrown.
:param layer_name: the name or index of embedding layer, or ValueError
will be thrown.
:param save_to_file: file saved pre-trained model, load model if model is 'None'.
:return: tensor of apsect embeddings.
"""
if not model:
if not save_to_file:
raise ValueError('No model found from parameter or file!')
else:
model = load_model(save_to_file)
# Get embeddings of aspect words.
emb_layer = get_layer(model, layer_name)
return K.eval(emb_layer.embeddings)
def get_attention_weighs(data, att_layer_name, input_layers_names: list,
model=None, save_to_file=None):
"""
Get attention weights(intermediate) from specific layer with given layer
name and input layers.
:param data: data to attendant to.
:param model: the pre-trained model, if not set, reload form saved model
file. If it also failed to load model from file, 'ValueError' will be thrown.
:param att_layer_name: the name or index of embedding layer, or ValueError
will be thrown.
:param input_layers: the name or index list of all input layer in order.
:param save_to_file: file saved pre-trained model, load model if model is 'None'.
:return: tensor of attention indices.
"""
if not model:
if not save_to_file:
raise ValueError('No model found from parameter or file!')
else:
model = load_model(save_to_file,
custom_objects={'softmax': softmax})
# Must be sure input layers are in order.
att_layer = get_layer(model, att_layer_name)
input_layers = []
for layer_name in input_layers_names:
layer = get_layer(model, layer_name)
if layer:
input_layers.append(layer.input)
get_attention_weights = K.function(input_layers, [att_layer.output])
weights = get_attention_weights([data[I_TEXT], data[I_ASPECT]])[0]
# print(weights.shape)
return weights
def plot_attention_weight(weights, focus_len):
"""
Plot attention weights within the focus length.
:param weights: attention weights.
:param focus_len: the length to focus to, usually the length of sentences.
:return: None
"""
# score_file = os.path.join(RAW_DATA_FILE_BASE, 'intermeidate_score')
# np.save(score_file, weights)
# score_input = Input(shape=(term_max_len, 600))
# get_weights = Dense(1, use_bias=False)(score_input)
# get_weights = Activation('softmax', axis=1)(get_weights)
# get_weights = Lambda(lambda x: tf.nn.softmax())
# from keras.activations import softmax
# # # get_weights = Lambda(lambda x: softmax(x, axis=1))(get_weights)
# # # score_model = Model(score_input, get_weights)
# # # print(score_model.summary())
# #
# # score_model.compile(optimizer='adam', loss='categorical_crossentropy')
# weight_result = score_model.predict(weights)
# print(weight_result[0].shape)
# begin_idx = len(converted_data[I_TEXT][0])
# print(begin_idx)
import matplotlib.pyplot as plt
# hist, bins = np.histogram(weight_result[0].reshape((1, -1)))
# We have to remember the length of input sentences in order to align the
# attention weights.
# plt.imshow(weight_result[0][-20:].reshape((1, -1)), cmap="plasma",
# aspect="auto", extent=[0, 20, 0, 1])
# TODO, Here is 'pre pad', so its '-focus_len' for the actual token.
attentions = weights.reshape((1, -1))[:, -focus_len:]
print(attentions.shape)
plt.imshow(attentions, cmap='plasma',
aspect='auto', extent=[0, focus_len, 0, 1])
# plt.grid(True)
plt.colorbar()
plt.show()
if __name__ == '__main__':
RAW_DATA_FILE_BASE = '/Users/jiazhen/datasets/SemEval' \
'/SemEval_2014_task4/ABSA_v2'
RES_RAW_DATA_FILE = os.path.join(RAW_DATA_FILE_BASE,
'Restaurants_Train_v2.xml')
LAP_RAW_DATA_FILE = os.path.join(RAW_DATA_FILE_BASE, 'Laptop_Train_v2.xml')
WORD_EMB_BASE = '/Users/jiazhen/datasets'
WORD_EMB_FILE = os.path.join(WORD_EMB_BASE, 'glove.840B.300d.txt')
SAVED_EMB_FILE = os.path.join(RAW_DATA_FILE_BASE, 'glove_res_emb.npy')
SAVED_MDL_FILE = os.path.join(RAW_DATA_FILE_BASE, 'atae_model.keras')
res_data = extract_data(RES_RAW_DATA_FILE)
# print(res_data[7])
check_absent(res_data)
(term_data, term_max_len), _ = combine_data(res_data, mess=False)
# print(term_data[7])
# No padding here according to the paper.
# Need padding for mini-batch.
seqs_data, w2is = convert_data(term_data, max_len=term_max_len)
# emb_matrix = load_w2v(w2is, WORD_EMB_FILE, SAVED_EMB_FILE)
# print(emb_matrix[1])
# print(len(seqs_data))
# print(seqs_data[0].shape, seqs_data[1].shape, seqs_data[2].shape)
# print(seqs_data[1])
# for i, d in enumerate(seqs_data[1]):
# if len(d) > 1:
# print(i, d)
# print(term_data[i][I_ASPECT])
# print('raw data', res_data[92]['aspect_terms'])
# print(type(seqs_data[1][0][0]))
# print(type(seqs_data[2][0][0]))
# print(w2is[0])
# reloaded_emb = np.load(SAVED_EMB_FILE)
# print(reloaded_emb[1])
# Train model.
# model = build_net(seqs_data, term_max_len, w2is,
# atae=True, extra_outputs=True,
# emb_mtrx_file=SAVED_EMB_FILE,
# save_to_file=SAVED_MDL_FILE + '2')
# train(seqs_data, model, epoch=3)
label_lookup = {idx: polarity
for polarity, idx in w2is[I_POLARITY].items()}
# print(label_lookup)
customized_data = [['The food is really delicious but '
'I hate the service', 'food'],
['The food is really delicious but '
'I hate the service', 'serivce'],
['I have to say there is no on could be faster than '
'him, but he need to take care of his bad motion as '
'a bar attendant, which will impact his serivce.',
'serivce']]
pred = predict(customized_data, label_lookup, term_max_len,
save_to_file=SAVED_MDL_FILE + '2')
print(pred)
# Get attention weights for sentences.
converted_data, _ = convert_data(customized_data,
term_max_len,
with_label=False)
weights = get_attention_weighs(converted_data,
att_layer_name='attention_weight',
# att_layer_name='attention_weights',
input_layers_names=[2, 0],
save_to_file=SAVED_MDL_FILE + '2')
# print(weights[0])
print(len(customized_data[0][I_TEXT].split()))
focus_len = len(customized_ata[0][I_TEXT].split())
plot_attention_weight(weights[0], focus_len=focus_len)
# for weight in weights:
# print(weight.shape)
# TODO, Use gemsim to visualize aspect word embeddings.
| 2.671875 | 3 |
operators/snapscheduler/python/pulumi_pulumi_kubernetes_crds_operators_snapscheduler/snapscheduler/v1/outputs.py | pulumi/pulumi-kubernetes-crds | 0 | 12758508 | <gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'SnapshotScheduleSpec',
'SnapshotScheduleSpecClaimSelector',
'SnapshotScheduleSpecClaimSelectorMatchExpressions',
'SnapshotScheduleSpecRetention',
'SnapshotScheduleSpecSnapshotTemplate',
'SnapshotScheduleStatus',
'SnapshotScheduleStatusConditions',
]
@pulumi.output_type
class SnapshotScheduleSpec(dict):
"""
SnapshotScheduleSpec defines the desired state of SnapshotSchedule
"""
def __init__(__self__, *,
claim_selector: Optional['outputs.SnapshotScheduleSpecClaimSelector'] = None,
disabled: Optional[bool] = None,
retention: Optional['outputs.SnapshotScheduleSpecRetention'] = None,
schedule: Optional[str] = None,
snapshot_template: Optional['outputs.SnapshotScheduleSpecSnapshotTemplate'] = None):
"""
SnapshotScheduleSpec defines the desired state of SnapshotSchedule
:param 'SnapshotScheduleSpecClaimSelectorArgs' claim_selector: ClaimSelector selects which PVCs will be snapshotted according to this schedule.
:param bool disabled: Disabled determines whether this schedule is currently disabled.
:param 'SnapshotScheduleSpecRetentionArgs' retention: Retention determines how long this schedule's snapshots will be kept.
:param str schedule: Schedule is a Cronspec specifying when snapshots should be taken. See https://en.wikipedia.org/wiki/Cron for a description of the format.
:param 'SnapshotScheduleSpecSnapshotTemplateArgs' snapshot_template: SnapshotTemplate is a template description of the Snapshots to be created.
"""
if claim_selector is not None:
pulumi.set(__self__, "claim_selector", claim_selector)
if disabled is not None:
pulumi.set(__self__, "disabled", disabled)
if retention is not None:
pulumi.set(__self__, "retention", retention)
if schedule is not None:
pulumi.set(__self__, "schedule", schedule)
if snapshot_template is not None:
pulumi.set(__self__, "snapshot_template", snapshot_template)
@property
@pulumi.getter(name="claimSelector")
def claim_selector(self) -> Optional['outputs.SnapshotScheduleSpecClaimSelector']:
"""
ClaimSelector selects which PVCs will be snapshotted according to this schedule.
"""
return pulumi.get(self, "claim_selector")
@property
@pulumi.getter
def disabled(self) -> Optional[bool]:
"""
Disabled determines whether this schedule is currently disabled.
"""
return pulumi.get(self, "disabled")
@property
@pulumi.getter
def retention(self) -> Optional['outputs.SnapshotScheduleSpecRetention']:
"""
Retention determines how long this schedule's snapshots will be kept.
"""
return pulumi.get(self, "retention")
@property
@pulumi.getter
def schedule(self) -> Optional[str]:
"""
Schedule is a Cronspec specifying when snapshots should be taken. See https://en.wikipedia.org/wiki/Cron for a description of the format.
"""
return pulumi.get(self, "schedule")
@property
@pulumi.getter(name="snapshotTemplate")
def snapshot_template(self) -> Optional['outputs.SnapshotScheduleSpecSnapshotTemplate']:
"""
SnapshotTemplate is a template description of the Snapshots to be created.
"""
return pulumi.get(self, "snapshot_template")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SnapshotScheduleSpecClaimSelector(dict):
"""
ClaimSelector selects which PVCs will be snapshotted according to this schedule.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.SnapshotScheduleSpecClaimSelectorMatchExpressions']] = None,
match_labels: Optional[Mapping[str, str]] = None):
"""
ClaimSelector selects which PVCs will be snapshotted according to this schedule.
:param Sequence['SnapshotScheduleSpecClaimSelectorMatchExpressionsArgs'] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param Mapping[str, str] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.SnapshotScheduleSpecClaimSelectorMatchExpressions']]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[Mapping[str, str]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SnapshotScheduleSpecClaimSelectorMatchExpressions(dict):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: key is the label key that the selector applies to.
:param str operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param Sequence[str] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SnapshotScheduleSpecRetention(dict):
"""
Retention determines how long this schedule's snapshots will be kept.
"""
def __init__(__self__, *,
expires: Optional[str] = None,
max_count: Optional[int] = None):
"""
Retention determines how long this schedule's snapshots will be kept.
:param str expires: Expires is the length of time (time.Duration) after which a given Snapshot will be deleted.
"""
if expires is not None:
pulumi.set(__self__, "expires", expires)
if max_count is not None:
pulumi.set(__self__, "max_count", max_count)
@property
@pulumi.getter
def expires(self) -> Optional[str]:
"""
Expires is the length of time (time.Duration) after which a given Snapshot will be deleted.
"""
return pulumi.get(self, "expires")
@property
@pulumi.getter(name="maxCount")
def max_count(self) -> Optional[int]:
return pulumi.get(self, "max_count")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SnapshotScheduleSpecSnapshotTemplate(dict):
"""
SnapshotTemplate is a template description of the Snapshots to be created.
"""
def __init__(__self__, *,
labels: Optional[Mapping[str, str]] = None,
snapshot_class_name: Optional[str] = None):
"""
SnapshotTemplate is a template description of the Snapshots to be created.
:param Mapping[str, str] labels: Labels is a list of labels that should be added to each Snapshot created by this schedule.
:param str snapshot_class_name: SnapshotClassName is the name of the VSC to be used when creating Snapshots.
"""
if labels is not None:
pulumi.set(__self__, "labels", labels)
if snapshot_class_name is not None:
pulumi.set(__self__, "snapshot_class_name", snapshot_class_name)
@property
@pulumi.getter
def labels(self) -> Optional[Mapping[str, str]]:
"""
Labels is a list of labels that should be added to each Snapshot created by this schedule.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter(name="snapshotClassName")
def snapshot_class_name(self) -> Optional[str]:
"""
SnapshotClassName is the name of the VSC to be used when creating Snapshots.
"""
return pulumi.get(self, "snapshot_class_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SnapshotScheduleStatus(dict):
"""
SnapshotScheduleStatus defines the observed state of SnapshotSchedule
"""
def __init__(__self__, *,
conditions: Optional[Sequence['outputs.SnapshotScheduleStatusConditions']] = None,
last_snapshot_time: Optional[str] = None,
next_snapshot_time: Optional[str] = None):
"""
SnapshotScheduleStatus defines the observed state of SnapshotSchedule
:param Sequence['SnapshotScheduleStatusConditionsArgs'] conditions: Conditions is a list of conditions related to operator reconciliation.
:param str last_snapshot_time: LastSnapshotTime is the time of the most recent set of snapshots generated by this schedule.
:param str next_snapshot_time: NextSnapshotTime is the time when this schedule should create the next set of snapshots.
"""
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
if last_snapshot_time is not None:
pulumi.set(__self__, "last_snapshot_time", last_snapshot_time)
if next_snapshot_time is not None:
pulumi.set(__self__, "next_snapshot_time", next_snapshot_time)
@property
@pulumi.getter
def conditions(self) -> Optional[Sequence['outputs.SnapshotScheduleStatusConditions']]:
"""
Conditions is a list of conditions related to operator reconciliation.
"""
return pulumi.get(self, "conditions")
@property
@pulumi.getter(name="lastSnapshotTime")
def last_snapshot_time(self) -> Optional[str]:
"""
LastSnapshotTime is the time of the most recent set of snapshots generated by this schedule.
"""
return pulumi.get(self, "last_snapshot_time")
@property
@pulumi.getter(name="nextSnapshotTime")
def next_snapshot_time(self) -> Optional[str]:
"""
NextSnapshotTime is the time when this schedule should create the next set of snapshots.
"""
return pulumi.get(self, "next_snapshot_time")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SnapshotScheduleStatusConditions(dict):
"""
Condition represents the state of the operator's reconciliation functionality.
"""
def __init__(__self__, *,
status: str,
type: str,
last_heartbeat_time: Optional[str] = None,
last_transition_time: Optional[str] = None,
message: Optional[str] = None,
reason: Optional[str] = None):
"""
Condition represents the state of the operator's reconciliation functionality.
:param str type: ConditionType is the state of the operator's reconciliation functionality.
"""
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "type", type)
if last_heartbeat_time is not None:
pulumi.set(__self__, "last_heartbeat_time", last_heartbeat_time)
if last_transition_time is not None:
pulumi.set(__self__, "last_transition_time", last_transition_time)
if message is not None:
pulumi.set(__self__, "message", message)
if reason is not None:
pulumi.set(__self__, "reason", reason)
@property
@pulumi.getter
def status(self) -> str:
return pulumi.get(self, "status")
@property
@pulumi.getter
def type(self) -> str:
"""
ConditionType is the state of the operator's reconciliation functionality.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="lastHeartbeatTime")
def last_heartbeat_time(self) -> Optional[str]:
return pulumi.get(self, "last_heartbeat_time")
@property
@pulumi.getter(name="lastTransitionTime")
def last_transition_time(self) -> Optional[str]:
return pulumi.get(self, "last_transition_time")
@property
@pulumi.getter
def message(self) -> Optional[str]:
return pulumi.get(self, "message")
@property
@pulumi.getter
def reason(self) -> Optional[str]:
return pulumi.get(self, "reason")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| 1.695313 | 2 |
utils/helper.py | parksunwoo/daanet | 145 | 12758509 | <gh_stars>100-1000
import importlib
import logging
import math
import os
import re
import shutil
import subprocess
import sys
import time
import traceback
from collections import defaultdict
from random import shuffle
import GPUtil
import tensorflow as tf
from ruamel.yaml import YAML
from ruamel.yaml.comments import CommentedMap
from tensorflow.contrib.training import HParams
from tensorflow.python.ops.image_ops_impl import ResizeMethod
from gpu_env import APP_NAME, DEVICE_ID, IGNORE_PATTERNS
millnames = ['', ' K', ' M', ' BL', ' TL']
regex_title_source = re.compile(r'^([^_\-—]*).*?[_\-—]\s?([^_\-—]+)[\s_\-—]?$')
def set_logger(model_id=None):
logger = logging.getLogger(APP_NAME)
logger.setLevel(logging.INFO)
if model_id:
formatter = logging.Formatter(
'%(levelname)-.1s:' + model_id + ':[%(filename).3s:%(funcName).3s:%(lineno)3d]:%(message)s', datefmt=
'%m-%d %H:%M:%S')
else:
formatter = logging.Formatter(
'%(levelname)-.1s:[%(filename)s:%(lineno)d]:%(message)s', datefmt=
'%m-%d %H:%M:%S')
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(formatter)
logger.handlers = []
logger.addHandler(console_handler)
return logger
def touch(fname: str, times=None, create_dirs: bool = False):
import os
if create_dirs:
base_dir = os.path.dirname(fname)
if not os.path.exists(base_dir):
os.makedirs(base_dir)
with open(fname, 'a'):
os.utime(fname, times)
def touch_dir(base_dir: str) -> None:
import os
if not os.path.exists(base_dir):
os.makedirs(base_dir)
def millify(n):
n = float(n)
millidx = max(0, min(len(millnames) - 1,
int(math.floor(0 if n == 0 else math.log10(abs(n)) / 3))))
return '{:.0f}{}'.format(n / 10 ** (3 * millidx), millnames[millidx])
def args2hparam(args, vocab):
params = vars(args)
params['vocab'] = vocab
p = HParams()
for k, v in params.items():
p.add_hparam(k, v)
return p
def runner(main, *done):
logger = logging.getLogger(APP_NAME)
try:
main()
except (tf.errors.OutOfRangeError, IndexError) as e:
logger.warning('Data has been exhausted! Done!')
finally:
[f() for f in done]
def parse_yaml(yaml_path, model_id):
from tensorflow.contrib.training import HParams
from ruamel.yaml import YAML
hparams = HParams()
hparams.add_hparam('model_id', model_id)
with open(yaml_path) as fp:
customized = YAML().load(fp)
for k, v in customized.items():
if k in hparams:
hparams.set_hparam(k, v)
else:
hparams.add_hparam(k, v)
return hparams
def parse_args(yaml_path, model_id, default_set, followup=None):
logger = logging.getLogger(APP_NAME)
hparams = HParams()
hparams.add_hparam('model_id', model_id)
with open('default.yaml') as fp:
configs = YAML().load(fp)
default_cfg = configs[default_set]
add_param_recur(hparams, default_cfg)
if yaml_path:
logger.info('loading parameters...')
with open(yaml_path) as fp:
customized = YAML().load(fp)
for k, v in customized.items():
if k in hparams and hparams.get(k) != v:
logger.info('%20s: %20s -> %20s' % (k, hparams.get(k), v))
hparams.set_hparam(k, v)
elif k not in hparams: # add new parameter
hparams.add_hparam(k, v)
logger.info('%30s %20s: %20s' % ("[add from %s]" % yaml_path, k, hparams.get(k)))
if followup:
# useful when changing args for prediction
logger.info('override args with follow-up args...')
for k, v in followup.items():
if k in hparams and hparams.get(k) != v:
logger.info('%20s: %20s -> %20s' % (k, hparams.get(k), v))
hparams.set_hparam(k, v)
elif k not in hparams:
logger.warning('%s is not a valid attribute! ignore!' % k)
if 'save_dir' not in hparams:
hparams.add_hparam('save_dir', os.path.join(hparams.get('model_dir'), hparams.get('model_id')))
if 'code_dir' not in hparams:
hparams.add_hparam('code_dir', os.path.join(hparams.get('save_dir'), 'code'))
hparams.set_hparam('summary_dir', os.path.join(hparams.get('save_dir'), 'summary'))
# reset logger model id
logger = set_logger(model_id='%s:%s' % (DEVICE_ID, hparams.get('model_id')))
try:
shutil.copytree('./', hparams.get('code_dir'), ignore=shutil.ignore_patterns(*IGNORE_PATTERNS))
logger.info('current code base is copied to %s' % hparams.get('save_dir'))
except FileExistsError:
logger.info('code base exist, no need to copy!')
# if hparams.get('model_id') != model_id:
# logger.warning('model id is changed %s -> %s! '
# 'This happens when you train a pretrained model' % (
# hparams.get('model_id'), model_id))
# hparams.set_hparam('model_id', model_id)
if 'loss_csv_file' not in hparams:
hparams.add_hparam('loss_csv_file', os.path.join(hparams.get('save_dir'), 'loss.csv'))
if 'is_serving' not in hparams:
hparams.add_hparam('is_serving', False)
logger.info('current parameters')
for k, v in sorted(vars(hparams).items()):
if not k.startswith('_'):
logger.info('%20s = %-20s' % (k, v))
return hparams
def add_param_recur(root, p_tree):
for k, v in p_tree.items():
if isinstance(v, CommentedMap):
new_node = HParams()
add_param_recur(new_node, v)
root.add_hparam(k, new_node)
else:
root.add_hparam(k, v)
def fill_gpu_jobs(all_jobs, logger, job_parser,
wait_until_next=300, retry_delay=300, do_shuffle=False):
if do_shuffle:
shuffle(all_jobs)
all_procs = []
while all_jobs:
logger.info('number of jobs in the queue: %d' % len(all_jobs))
j = all_jobs.pop()
logger.info('will start the job: %s ...' % job_parser(j))
try:
GPUtil.getFirstAvailable()
# check if there is a free GPU!
process = subprocess.Popen(job_parser(j), shell=True)
all_procs.append((process, j))
time.sleep(wait_until_next)
except FileNotFoundError:
logger.warning('there is no gpu, running on cpu!')
process = subprocess.Popen(job_parser(j), shell=True)
all_procs.append((process, j))
except RuntimeError as e:
logger.error(str(e))
logger.warning('all gpus are busy! waiting for a free slot...')
# add job back
all_jobs.append(j)
time.sleep(retry_delay)
exit_codes = [(p.wait(), j) for p, j in all_procs]
return [v for p, v in exit_codes if p != 0]
def get_args_cli(args):
d = defaultdict(list)
if args:
for k, v in ((k.lstrip('-'), v) for k, v in (a.split('=') for a in args)):
d[k].append(v)
for k, v in d.items():
parsed_v = [s for s in (parse_arg(vv) for vv in v) if s is not None]
if len(parsed_v) > 1:
d[k] = parsed_v
if len(parsed_v) == 1:
d[k] = parsed_v[0]
return d
def parse_arg(v: str):
if v.startswith('[') and v.endswith(']'):
# function args must be immutable tuples not list
tmp = v.replace('[', '').replace(']', '').strip().split(',')
if len(tmp) > 0:
return [parse_arg(vv.strip()) for vv in tmp]
else:
return []
try:
v = int(v) # parse int parameter
except ValueError:
try:
v = float(v) # parse float parameter
except ValueError:
if len(v) == 0:
# ignore it when the parameter is empty
v = None
elif v.lower() == 'true': # parse boolean parameter
v = True
elif v.lower() == 'false':
v = False
return v
def get_scope_name():
return tf.get_variable_scope().name.split('/')[0]
def sparse_nll_loss(probs, labels, epsilon=1e-9, scope=None):
"""
negative log likelihood loss
"""
with tf.name_scope(scope, "log_loss"):
labels = tf.one_hot(labels, tf.shape(probs)[1], axis=1, dtype=tf.float32)
losses = - tf.reduce_sum(labels * tf.log(probs + epsilon), 1)
return losses
def normalize_distribution(p, eps=1e-9):
p += eps
norm = tf.reduce_sum(p, axis=1)
return tf.cast(p, tf.float32) / tf.reshape(norm, (-1, 1))
def kl_divergence(p, q, eps=1e-9):
p = normalize_distribution(p, eps)
q = normalize_distribution(q, eps)
return tf.reduce_sum(p * tf.log(p / q), axis=1)
def get_kl_loss(start_label, start_probs, bandwidth=1.0):
a = tf.reshape(tf.range(tf.shape(start_probs)[1]), (1, -1))
b = tf.reshape(start_label, (-1, 1))
start_true_probs = tf.exp(-tf.cast(tf.squared_difference(a, b), tf.float32) / bandwidth)
return sym_kl_divergence(start_true_probs, start_probs)
def sym_kl_divergence(p, q, eps=1e-9):
return (kl_divergence(p, q, eps) + kl_divergence(q, p, eps)) / 2.0
def get_conv1d(x, out_dim, window_len, name, act_fn):
return tf.layers.conv1d(x, out_dim, window_len, strides=1, padding='SAME', name=name, activation=act_fn)
def upsampling_a2b(a, b, D_a):
return tf.squeeze(tf.image.resize_images(tf.expand_dims(a, axis=-1), [tf.shape(b)[1], D_a],
method=ResizeMethod.NEAREST_NEIGHBOR), axis=-1)
def dropout(args, keep_prob, is_train, mode="recurrent"):
if keep_prob < 1.0:
noise_shape = None
scale = 1.0
shape = tf.shape(args)
if mode == "embedding":
noise_shape = [shape[0], 1]
scale = keep_prob
if mode == "recurrent" and len(args.get_shape().as_list()) == 3:
noise_shape = [shape[0], 1, shape[-1]]
args = tf.cond(is_train, lambda: tf.nn.dropout(
args, keep_prob, noise_shape=noise_shape) * scale, lambda: args)
return args
def get_tmp_yaml(par, prefix=None):
import tempfile
with tempfile.NamedTemporaryFile('w', delete=False, prefix=prefix) as tmp:
YAML().dump(par, tmp)
return tmp.name
def build_model(args, reset_graph=True):
rccore = importlib.import_module(args.package_rccore)
if reset_graph:
tf.reset_default_graph()
return rccore.RCCore(args)
def get_last_output(output, sequence_length, name):
"""Get the last value of the returned output of an RNN.
http://disq.us/p/1gjkgdr
output: [batch x number of steps x ... ] Output of the dynamic lstm.
sequence_length: [batch] Length of each of the sequence.
"""
rng = tf.range(0, tf.shape(sequence_length)[0])
indexes = tf.stack([rng, sequence_length - 1], 1)
return tf.gather_nd(output, indexes, name)
def import_class(import_str):
mod_str, _sep, class_str = import_str.rpartition('.')
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, cur_dir)
__import__(mod_str)
sys.path.remove(cur_dir)
try:
return getattr(sys.modules[mod_str], class_str)
except AttributeError:
raise ImportError('Class %s cannot be found (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))
def delete_module(modname):
from sys import modules
del_keys = []
for mod_key, mod_value in modules.items():
if modname in mod_key:
del_keys.append(mod_key)
elif modname in str(mod_value):
del_keys.append(mod_key)
for key in del_keys:
del modules[key]
| 1.84375 | 2 |
tests/test_dte_constants.py | fyntex/lib-cl-sii-python | 8 | 12758510 | <gh_stars>1-10
import unittest
from cl_sii.dte import constants # noqa: F401
from cl_sii.dte.constants import TipoDteEnum
class TipoDteEnumTest(unittest.TestCase):
def test_members(self):
self.assertSetEqual(
{x for x in TipoDteEnum},
{
TipoDteEnum.FACTURA_ELECTRONICA,
TipoDteEnum.FACTURA_NO_AFECTA_O_EXENTA_ELECTRONICA,
TipoDteEnum.LIQUIDACION_FACTURA_ELECTRONICA,
TipoDteEnum.FACTURA_COMPRA_ELECTRONICA,
TipoDteEnum.GUIA_DESPACHO_ELECTRONICA,
TipoDteEnum.NOTA_DEBITO_ELECTRONICA,
TipoDteEnum.NOTA_CREDITO_ELECTRONICA,
}
)
def test_FACTURA_ELECTRONICA(self):
value = TipoDteEnum.FACTURA_ELECTRONICA
self.assertEqual(value.name, 'FACTURA_ELECTRONICA')
self.assertEqual(value.value, 33)
assertions = [
(value.is_factura, True),
(value.is_factura_venta, True),
(value.is_factura_compra, False),
(value.is_nota, False),
(value.emisor_is_vendedor, True),
(value.receptor_is_vendedor, False),
]
for (result, expected) in assertions:
self.assertEqual(result, expected)
def test_FACTURA_NO_AFECTA_O_EXENTA_ELECTRONICA(self):
value = TipoDteEnum.FACTURA_NO_AFECTA_O_EXENTA_ELECTRONICA
self.assertEqual(value.name, 'FACTURA_NO_AFECTA_O_EXENTA_ELECTRONICA')
self.assertEqual(value.value, 34)
assertions = [
(value.is_factura, True),
(value.is_factura_venta, True),
(value.is_factura_compra, False),
(value.is_nota, False),
(value.emisor_is_vendedor, True),
(value.receptor_is_vendedor, False),
]
for (result, expected) in assertions:
self.assertTrue(result is expected)
def test_LIQUIDACION_FACTURA_ELECTRONICA(self):
value = TipoDteEnum.LIQUIDACION_FACTURA_ELECTRONICA
self.assertEqual(value.name, 'LIQUIDACION_FACTURA_ELECTRONICA')
self.assertEqual(value.value, 43)
assertions = [
(value.is_factura, True),
(value.is_factura_venta, True),
(value.is_factura_compra, False),
(value.is_nota, False),
(value.emisor_is_vendedor, True),
(value.receptor_is_vendedor, False),
]
for (result, expected) in assertions:
self.assertEqual(result, expected)
def test_FACTURA_COMPRA_ELECTRONICA(self):
value = TipoDteEnum.FACTURA_COMPRA_ELECTRONICA
self.assertEqual(value.name, 'FACTURA_COMPRA_ELECTRONICA')
self.assertEqual(value.value, 46)
assertions = [
(value.is_factura, True),
(value.is_factura_venta, False),
(value.is_factura_compra, True),
(value.is_nota, False),
(value.emisor_is_vendedor, False),
(value.receptor_is_vendedor, True),
]
for (result, expected) in assertions:
self.assertTrue(result is expected)
def test_GUIA_DESPACHO_ELECTRONICA(self):
value = TipoDteEnum.GUIA_DESPACHO_ELECTRONICA
self.assertEqual(value.name, 'GUIA_DESPACHO_ELECTRONICA')
self.assertEqual(value.value, 52)
assertions = [
(value.is_factura, False),
(value.is_factura_venta, False),
(value.is_factura_compra, False),
(value.is_nota, False),
(value.emisor_is_vendedor, False),
(value.receptor_is_vendedor, False),
]
for (result, expected) in assertions:
self.assertTrue(result is expected)
def test_NOTA_DEBITO_ELECTRONICA(self):
value = TipoDteEnum.NOTA_DEBITO_ELECTRONICA
self.assertEqual(value.name, 'NOTA_DEBITO_ELECTRONICA')
self.assertEqual(value.value, 56)
assertions = [
(value.is_factura, False),
(value.is_factura_venta, False),
(value.is_factura_compra, False),
(value.is_nota, True),
(value.emisor_is_vendedor, False),
(value.receptor_is_vendedor, False),
]
for (result, expected) in assertions:
self.assertTrue(result is expected)
def test_NOTA_CREDITO_ELECTRONICA(self):
value = TipoDteEnum.NOTA_CREDITO_ELECTRONICA
self.assertEqual(value.name, 'NOTA_CREDITO_ELECTRONICA')
self.assertEqual(value.value, 61)
assertions = [
(value.is_factura, False),
(value.is_factura_venta, False),
(value.is_factura_compra, False),
(value.is_nota, True),
(value.emisor_is_vendedor, False),
(value.receptor_is_vendedor, False),
]
for (result, expected) in assertions:
self.assertTrue(result is expected)
| 2.4375 | 2 |
bin/csv_latency_parser_bqr.py | icsa-caps/Odyssey | 7 | 12758511 | #!/usr/bin/python
import sys, os, ntpath, getopt
"""
========
Parser for aggregated over time results
========
"""
class LatencyParser:
def __init__(self):
self.latency_values = []
self.reads = []
self.max_read_latency = 0
self.max_write_latency = 0
self.writes = []
self.all_reqs = []
self.parseInputStats()
self.printAllStats()
# self.printStats(all_reqs)
def printStats(self, array, max_latency):
self.avgLatency(array)
#self.percentileLatency(array, 20)
self.percentileLatency(array, 50)
self.percentileLatency(array, 90)
self.percentileLatency(array, 95)
self.percentileLatency(array, 99)
#self.percentileLatency(array, 99.9)
#self.percentileLatency(array, 99.99)
#self.percentileLatency(array, 99.999)
#self.percentileLatency(array, 99.9999)
#self.percentileLatency(array, 100)
print "Max Latency: ", max_latency, "us"
def printAllStats(self):
#print "~~~~~~ Write Stats ~~~~~~~"
#self.printStats(self.writes, self.max_write_latency)
print "\n~~~~~~ Read Stats ~~~~~~~~"
self.printStats(self.reads, self.max_read_latency)
print "\n~~~~~~ Overall Stats ~~~~~~~~~"
self.printStats(self.all_reqs, max(self.max_read_latency, self.max_write_latency))
def avgLatency(self, array):
cummulative = 0
total_reqs = 0
for x in xrange(len(self.latency_values)):
cummulative = self.latency_values[x] * array[x] + cummulative
total_reqs += array[x]
if total_reqs > 0:
print "Reqs measured: ", total_reqs, "| Avg Latency: ", cummulative / total_reqs
else:
print "No reqs measured"
def percentileLatency(self, array, percentage):
total_reqs = 0
sum_reqs = 0
for x in xrange(len(self.latency_values)):
#cummulative = self.latency_values[x] * array[x] + cummulative
total_reqs += array[x]
if total_reqs > 0:
if percentage == 100:
for x in reversed(xrange(len(self.latency_values))):
if array[x] > 0:
if self.latency_values[x] == -1:
print percentage, "%: >", self.latency_values[x-1], "us"
else:
print percentage, "%: ", self.latency_values[x], "us"
return
else:
for x in xrange(len(self.latency_values)):
sum_reqs += array[x]
if ((100.0 * sum_reqs) / total_reqs) >= percentage:
if self.latency_values[x] == -1:
print percentage, "%: >", self.latency_values[x-1], "us"
else:
print percentage, "% : ", self.latency_values[x], "us"
return
else:
print "No reqs measured"
def parseInputStats(self):
lr_lines = 0
for line in sys.stdin: # input from standard input
if line[0] == '#':
continue
(command, words) = line.strip().split(":",1)
command = command.strip()
if command == 'reads':
words = words.strip().split(",")
#if int(words[0].strip()) != -1:
self.latency_values.append(int(words[0].strip()))
self.reads.append(int(words[1].strip()))
self.all_reqs.append(int(words[1].strip()))
elif command == 'writes':
words = words.strip().split(",")
self.writes.append(int(words[1].strip()))
self.all_reqs[lr_lines] = self.all_reqs[lr_lines] + self.writes[-1]
lr_lines = lr_lines + 1
elif command == 'reads-hl':
words = words.strip().split(",")
self.max_read_latency = int(words[0].strip())
elif command == 'writes-hl':
words = words.strip().split(",")
self.max_write_latency = int(words[0].strip())
if __name__ == '__main__':
LatencyParser()
| 2.578125 | 3 |
preferences/migrations/0003_auto_20181223_1440.py | nanoy42/coope | 3 | 12758512 | # Generated by Django 2.1 on 2018-12-23 13:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('preferences', '0002_auto_20181221_2151'),
]
operations = [
migrations.AddField(
model_name='generalpreferences',
name='lost_pintes_allowed',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='historicalgeneralpreferences',
name='lost_pintes_allowed',
field=models.PositiveIntegerField(default=0),
),
]
| 1.523438 | 2 |
backend/batch_api.py | HendrikStrobelt/LMdiff | 22 | 12758513 | <filename>backend/batch_api.py
import numpy as np
import torch
from torch.nn import functional as F
from transformers import (
AutoTokenizer,
AutoModelWithLMHead,
PreTrainedModel,
PreTrainedTokenizer,
GPT2Tokenizer, GPT2LMHeadModel)
from typing import *
class ModelManager:
def __init__(self):
super().__init__()
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu")
self.models = {}
self.tokenizers = {}
def get_model_and_tokenizer(self, model_name: str):
model = self.models.get(model_name, None)
tokenizer = self.tokenizers.get(model_name, None)
if (model is not None) and (tokenizer is not None):
return model, tokenizer
elif model_name.find('arxiv') >= 0:
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
model = GPT2LMHeadModel.from_pretrained(model_name)\
.to(self.device)
return model, tokenizer
else:
model = AutoModelWithLMHead.from_pretrained( model_name).to(self.device)
print(f"Model is using {self.device}")
self.models[model_name] = model
tokenizer = AutoTokenizer.from_pretrained(model_name)
self.tokenizers[model_name] = tokenizer
return model, tokenizer
def format_attn(attention_tuples: tuple):
"""
Input: N tuples (N = layer num)
Each tuple item is Tensor of shape
Batch x num heads x from x to
Output: Tensor of shape layer x from x to
(averaged over heads)
"""
# Combine tuples into large Tensor, then avg
return torch.cat([l for l in attention_tuples], dim=0).mean(dim=1)
class LMComparer:
def __init__(
self,
m1: PreTrainedModel,
m2: PreTrainedModel,
t1: PreTrainedTokenizer,
t2: PreTrainedTokenizer,
):
super().__init__()
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu")
self.m1 = m1
self.m1.eval()
self.m2 = m2
self.m2.eval()
self.tokenizer = t1
self.bos_token_id = self.tokenizer.bos_token_id
self.tokenizer.pad_token = self.tokenizer.eos_token
# Check that both use same tokenizer
assert type(self.tokenizer) == type(
t2
), "Please use models with same tokenization scheme"
def get_rank_prob_topk( self, y: torch.Tensor, probs: torch.Tensor, k: int = 5):
"""
Args:
y: IDs of the tokenized input (no generation token at the beginning)
probs: Probabilities of every token in the vocabulary at that position
tokenizer: Tokenizer that generated the probabilities
k: how many top tokens to report
Returns:
Payload containing information needed for diffing language models
"""
# Vocabulary sorted by logits
top_voc = torch.argsort(probs, descending=True)
# Returning `as_tuple=True` allows indexing with output
yrank_idx = torch.eq(y.unsqueeze(-1), top_voc).nonzero(as_tuple=True)
# Assigning ranks to each input_id
yranks = torch.zeros_like(y)
yranks[yrank_idx[:2]] = yrank_idx[-1]
# Probabilities of actual inputs
yrank_idx_og = (yrank_idx[0], yrank_idx[1], top_voc[yrank_idx])
yprobs = probs[yrank_idx_og].view(y.shape) # TODO: CHECK that reshape is correctly done
topk = top_voc[:, :, :k]
# I expect this list comprehension to be pretty slow. Should maybe do once at the end?
topk_words = [[self.tokenizer.convert_ids_to_tokens(preds) for preds in sentence] for sentence in topk]
return yranks, yprobs, topk_words
def batch_forward(self, text: List[str], k: int = 7):
"""Batched processing of all the information needed to analyze a language model
Args:
text: Sentence batch to analyze
k: How many predictions we care to analyze
Returns:
Payload containing information needed to diff models
"""
encoded = self.tokenizer.batch_encode_plus(text, pad_to_max_length=True, return_tensors="pt")
ids = encoded["input_ids"]
start_token = self.bos_token_id
start_tokens = (torch.ones(ids.shape[0], dtype=torch.int64) * start_token).view((-1, 1))
start_1s = torch.ones((ids.shape[0], 1), dtype=torch.int64)
gen_ids = torch.cat((start_tokens, ids), dim=1)
# Start all inputs with GPT2's EOS token
encoded['input_ids'] = gen_ids
# Allow attention to EOS token
encoded['attention_mask'] = torch.cat((start_1s, encoded['attention_mask']), dim=1)
m1_logits, m1_embeds, atts1 = self.m1(**encoded, output_attentions=True)
m2_logits, m2_embeds, atts2 = self.m2(**encoded, output_attentions=True)
attn1 = format_attn(atts1)
attn2 = format_attn(atts2)
probs1 = F.softmax(m1_logits[:, :-1], dim=-1)
probs2 = F.softmax(m2_logits[:, :-1], dim=-1)
assert probs1.shape == probs2.shape, "Vocab sizes not the same"
ranks1, probs1, topk_words1 = self.get_rank_prob_topk(ids, probs1, k)
ranks2, probs2, topk_words2 = self.get_rank_prob_topk(ids, probs2, k)
rank_diff = ranks2 - ranks1
probs_diff = probs2 - probs1
attn_diff = attn2 - attn1 if attn1.shape == attn2.shape else None
kl = F.kl_div(probs1, probs2, reduction="none") # Elementwise KL Div
return {
"prob": {
"m1": probs1,
"m2": probs2,
"diff": probs_diff
},
"rank": {
"m1": ranks1,
"m2": ranks2,
"diff": rank_diff
},
"topk": {
"m1": topk_words1,
"m2": topk_words2
},
"attn": {
"m1": attn1,
"m2": attn2,
"diff": attn_diff
},
"kl": kl,
"ids": ids,
"tokens": [self.tokenizer.convert_ids_to_tokens(id) for id in ids],
"text": text,
"attention_mask": encoded['attention_mask']
}
def __call__(self, text: Union[List[str], str], k: int=7):
"""Handle single inputs or batched inputs"""
if type(text) == str:
return self.batch_forward([text], k)
return self.batch_forward(text)
if __name__ == "__main__":
mm = ModelManager()
m1, t1 = mm.get_model_and_tokenizer("gpt2")
m2, t2 = mm.get_model_and_tokenizer("distilgpt2")
# Example of how to run comparison of models
comparer = LMComparer(m1, m2, t1, t2)
print("loading successful!")
comparer("this is a test of a single sentence!")
comparer(["this is a test!", "and this is yet another test for the books!", "yeah dude"])
print("checking successful!")
| 2.59375 | 3 |
python/network/Foundations-of-Python-Network-Programming/foundations-of-python-network-programming/foundations-of-python-network-programming/python2/06/sslclient.py | bosserbosser/codetest | 1 | 12758514 | #!/usr/bin/env python
# Foundations of Python Network Programming - Chapter 6 - sslclient.py
# Using SSL to protect a socket in Python 2.6 or later
import os, socket, ssl, sys
from backports.ssl_match_hostname import match_hostname, CertificateError
try:
script_name, hostname = sys.argv
except ValueError:
print >>sys.stderr, 'usage: sslclient.py <hostname>'
sys.exit(2)
# First we connect, as usual, with a socket.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((hostname, 443))
# Next, we turn the socket over to the SSL library!
ca_certs_path = os.path.join(os.path.dirname(script_name), 'certfiles.crt')
sslsock = ssl.wrap_socket(sock, ssl_version=ssl.PROTOCOL_SSLv3,
cert_reqs=ssl.CERT_REQUIRED, ca_certs=ca_certs_path)
# Does the certificate that the server proffered *really* match the
# hostname to which we are trying to connect? We need to check.
try:
match_hostname(sslsock.getpeercert(), hostname)
except CertificateError, ce:
print 'Certificate error:', str(ce)
sys.exit(1)
# From here on, our `sslsock` works like a normal socket. We can, for
# example, make an impromptu HTTP call.
sslsock.sendall('GET / HTTP/1.0\r\n\r\n')
result = sslsock.makefile().read() # quick way to read until EOF
sslsock.close()
print 'The document https://%s/ is %d bytes long' % (hostname, len(result))
| 3.4375 | 3 |
examples/tutorial/tutorial_6_neighbor.py | infobloxopen/infoblox_netmri | 12 | 12758515 | <filename>examples/tutorial/tutorial_6_neighbor.py
import argparse
from infoblox_netmri.client import InfobloxNetMRI
parser = argparse.ArgumentParser(description='run jobs for specific devices')
parser.add_argument('device_name', help="script name")
args = parser.parse_args()
defaults = {
"host": "1.2.3.4",
"username": "your_username",
"password": "<PASSWORD>",
}
client = InfobloxNetMRI(
defaults.get("host"),
defaults.get("username"),
defaults.get("password"),
)
devices_broker = client.get_broker('Device')
device = devices_broker.index(
DeviceName=args.device_name,
select=['DeviceID', 'DeviceName']
)[0]
print(device.DeviceName)
# find the neighbor relationships where our device
# is the source
source_relations = client.get_broker('Neighbor').index(
DeviceID=device.DeviceID,
select=['NeighborDeviceID']
)
# find the neighbor relationships where our device
# is the destination.
destination_relations = client.get_broker('Neighbor').index(
NeighborDeviceID=device.DeviceID,
select=['DeviceID', ]
)
source_relations_ids = [x.NeighborDeviceID for x in source_relations]
for s_id in source_relations_ids:
neighbor_device = devices_broker.index(
DeviceID=s_id,
select=['DeviceID', 'DeviceName', 'DeviceType']
)
print(" -> {} {}\n".format(neighbor_device.DeviceType, neighbor_device.DeviceName))
destination_ids = [x.DeviceID for x in destination_relations]
for d_id in destination_ids:
neighbor_device = devices_broker.index(
DeviceID=s_id,
select=['DeviceID', 'DeviceName', 'DeviceType']
)
print(" -> {} {}\n".format(neighbor_device.DeviceType, neighbor_device.DeviceName))
| 2.765625 | 3 |
src/jobResub_lxplus.py | GilesStrong/Delphes_Event_Selection_YR-2018 | 0 | 12758516 | import os, glob
failures = [x[:x.rfind("/")] for x in glob.glob("*/STDOUT") if "Data saved" not in open(x).read()]
print len(failures), failures | 2.53125 | 3 |
test/modules/ravestate_nlp/test_triple.py | ro-boy/ravestate | 0 | 12758517 | <gh_stars>0
import logging
import spacy
import pytest
from ravestate_nlp import Triple
from testfixtures import LogCapture
nlp = spacy.load('en_core_web_sm')
def create_token(text: str):
if not text:
return None
return nlp(text)[0]
def create_triple(subject: str = None, predicate: str = None, object: str = None):
s = create_token(subject)
p = create_token(predicate)
o = create_token(object)
return Triple(subject=s, predicate=p, object=o)
@pytest.fixture
def test_token():
return create_token('test')
@pytest.fixture
def test_triple1():
return create_triple('subject', 'predicate', 'test')
@pytest.fixture
def test_triple2():
return create_triple('subject', 'predicate', 'test')
@pytest.fixture
def test_triple3():
return create_triple('subject', 'predicate', 'test')
@pytest.fixture
def full_triple():
return create_triple('subject', 'predicate', 'object')
def test_comparison(full_triple):
assert full_triple == full_triple
@pytest.mark.parametrize('compared_triple',
[create_triple('subject', 'predicate', 'test'),
create_triple('subject', 'test', 'object'),
create_triple('test', 'predicate', 'test')]
)
def test_comparison_negative(full_triple, compared_triple):
assert full_triple != compared_triple
def test_comparison_tuple(full_triple):
assert full_triple == full_triple.to_tuple()
@pytest.mark.parametrize('compared_tuple',
[create_triple('subject', 'predicate', 'test').to_tuple(),
create_triple('subject', 'test', 'object').to_tuple(),
create_triple('test', 'predicate', 'test').to_tuple()]
)
def test_comparison_negative_tuple(full_triple, compared_tuple):
assert full_triple != compared_tuple
def test_comparison_wrong_type(full_triple):
assert full_triple != ''
@pytest.mark.parametrize('triple, expected_log',
[(create_triple('subject', 'predicate', 'object'), f'subject:predicate:object'),
(create_triple('subject', 'predicate', None), f'subject:predicate:'),
(create_triple('subject', None, 'object'), f'subject::object'),
(create_triple(None, 'predicate', 'object'), f':predicate:object'),
(create_triple('subject', None, None), f'subject::'),
(create_triple(None, 'predicate', None), f':predicate:'),
(create_triple(None, None, 'object'), f'::object')]
)
def test_print(triple, expected_log):
with LogCapture() as log_capture:
logging.info(triple)
log_capture.check(('root', 'INFO', expected_log,))
@pytest.mark.parametrize('triple, expected_log',
[(create_triple('subject', 'predicate', 'object'), f'<Triple object subject:predicate:object>'),
(create_triple('subject', 'predicate', None), f'<Triple object subject:predicate:>'),
(create_triple('subject', None, 'object'), f'<Triple object subject::object>'),
(create_triple(None, 'predicate', 'object'), f'<Triple object :predicate:object>'),
(create_triple('subject', None, None), f'<Triple object subject::>'),
(create_triple(None, 'predicate', None), f'<Triple object :predicate:>'),
(create_triple(None, None, 'object'), f'<Triple object ::object>')]
)
def test_repr(triple, expected_log):
with LogCapture() as log_capture:
logging.info([triple])
log_capture.check(('root', 'INFO', f'[{expected_log}]',))
| 2.15625 | 2 |
Curso/POO/Persona.py | jsalmoralp/Python-Proyecto-Apuntes | 0 | 12758518 | <filename>Curso/POO/Persona.py
# Apartado 24 (Clases)
"""
¿En qué consiste la Programación Orientada a Objetos (POO)?
- En trasladar la naturaleza de los objetos de la vida real a código
de programación (en algún lenguaje de programación, como Python).
Los objetos de la realidad tienen características (atributos o propiedades)
y funcionalidades o comportamientos ( funciones o métodos).
Ventajas:
- Modularización ( división en pequeñas partes) de un programa completo.
- Código fuente muy reutilizable.
- Código fuente más fácil de incrementar en el futuro y de mantener.
- Si existe un fallo en una pequeña parte del código el programa completo
no debe fallar necesariamente. Además, es más fácil de corregir esos fallos.
- Encapsulamiento: Ocultamiento del funcionamiento interno de un objeto.
"""
class Persona:
# Propiedades, características o atributos:
apellidos = ""
nombre = ""
edad = 0
despierta = False
# Funcionalidades:
def despertar(self):
# self: Parámetro que hace referencia a la instancia perteneciente a la clase.
self.despierta = True
print("Buen día.")
persona1 = Persona()
persona1.apellidos = "<NAME>"
print(persona1.apellidos)
persona1.despertar()
print(persona1.despierta)
persona2 = Persona()
persona2.apellidos = "<NAME>"
print(persona2.apellidos)
print(persona2.despierta)
| 3.6875 | 4 |
setup.py | wallowind/classification-of-depression-by-EEG-signals-using-neural-networks | 4 | 12758519 | <gh_stars>1-10
from setuptools import setup, find_packages
setup(
name="cdenn",
version="0.0.1",
author="<NAME>",
author_email="<EMAIL>",
packages=["cdenn", "cdenn.lib"],
url="https://github.com/wallowind/classification-of-depression-by-EEG-signals-using-neural-networks",
description="A collection of neural networks for the classification of an open EEG dataset in depression.",
license="MIT",
install_requires=["torch == 1.4.0",
"mne >= 0.22.1",
"numpy >= 1.19.1",
"tqdm >= 4.48.0"]
)
| 1.484375 | 1 |
examples/hacker_news/hacker_news/jobs/hacker_news_api_download.py | Jiafi/dagster | 0 | 12758520 | <reponame>Jiafi/dagster<gh_stars>0
import os
from datetime import datetime
from dagster import ResourceDefinition, graph, hourly_partitioned_config
from dagster_aws.s3 import s3_pickle_io_manager, s3_resource
from dagster_pyspark import pyspark_resource
from hacker_news.ops.download_items import build_comments, build_stories, download_items
from hacker_news.ops.id_range_for_time import id_range_for_time
from hacker_news.resources.hn_resource import hn_api_subsample_client
from hacker_news.resources.parquet_io_manager import partitioned_parquet_io_manager
from hacker_news.resources.snowflake_io_manager import time_partitioned_snowflake_io_manager
# the configuration we'll need to make our Snowflake-based IOManager work
SNOWFLAKE_CONF = {
"account": os.getenv("SNOWFLAKE_ACCOUNT", ""),
"user": os.getenv("SNOWFLAKE_USER", ""),
"password": os.getenv("SNOWFLAKE_PASSWORD", ""),
"database": "DEMO_DB",
"warehouse": "TINY_WAREHOUSE",
}
# the configuration we'll need to make spark able to read from / write to s3
configured_pyspark = pyspark_resource.configured(
{
"spark_conf": {
"spark.jars.packages": ",".join(
[
"net.snowflake:snowflake-jdbc:3.8.0",
"net.snowflake:spark-snowflake_2.12:2.8.2-spark_3.0",
"com.amazonaws:aws-java-sdk:1.7.4,org.apache.hadoop:hadoop-aws:2.7.7",
]
),
"spark.hadoop.fs.s3.impl": "org.apache.hadoop.fs.s3native.NativeS3FileSystem",
"spark.hadoop.fs.s3.awsAccessKeyId": os.getenv("AWS_ACCESS_KEY_ID", ""),
"spark.hadoop.fs.s3.awsSecretAccessKey": os.getenv("AWS_SECRET_ACCESS_KEY", ""),
"spark.hadoop.fs.s3.buffer.dir": "/tmp",
}
}
)
DOWNLOAD_RESOURCES_STAGING = {
"io_manager": s3_pickle_io_manager.configured({"s3_bucket": "hackernews-elementl-dev"}),
"s3": s3_resource,
"partition_start": ResourceDefinition.string_resource(),
"partition_end": ResourceDefinition.string_resource(),
"parquet_io_manager": partitioned_parquet_io_manager.configured(
{"base_path": "s3://hackernews-elementl-dev"}
),
"warehouse_io_manager": time_partitioned_snowflake_io_manager.configured(SNOWFLAKE_CONF),
"pyspark": configured_pyspark,
"hn_client": hn_api_subsample_client.configured({"sample_rate": 10}),
}
DOWNLOAD_RESOURCES_PROD = {
"io_manager": s3_pickle_io_manager.configured({"s3_bucket": "hackernews-elementl-prod"}),
"s3": s3_resource,
"partition_start": ResourceDefinition.string_resource(),
"partition_end": ResourceDefinition.string_resource(),
"parquet_io_manager": partitioned_parquet_io_manager.configured(
{"base_path": "s3://hackernews-elementl-prod"}
),
"warehouse_io_manager": time_partitioned_snowflake_io_manager.configured(SNOWFLAKE_CONF),
"pyspark": configured_pyspark,
"hn_client": hn_api_subsample_client.configured({"sample_rate": 10}),
}
DEFAULT_PARTITION_RESOURCE_CONFIG = {
"partition_start": {"config": "2020-12-30 00:00:00"},
"partition_end": {"config": "2020-12-30 01:00:00"},
}
DOWNLOAD_TAGS = {
"dagster-k8s/config": {
"container_config": {
"resources": {
"requests": {"cpu": "500m", "memory": "2Gi"},
}
},
}
}
@graph(
description="#### Owners:\n"
"<EMAIL>, <EMAIL>\n "
"#### About\n"
"Downloads all items from the HN API for a given day, "
"splits the items into stories and comment types using Spark, and uploads filtered items to "
"the corresponding stories or comments Snowflake table",
)
def hacker_news_api_download():
items = download_items(id_range_for_time())
build_comments(items)
build_stories(items)
@hourly_partitioned_config(start_date=datetime(2021, 1, 1))
def hourly_download_config(start: datetime, end: datetime):
return {
"resources": {
"partition_start": {"config": start.strftime("%Y-%m-%d %H:%M:%S")},
"partition_end": {"config": end.strftime("%Y-%m-%d %H:%M:%S")},
}
}
download_prod_job = hacker_news_api_download.to_job(
resource_defs=DOWNLOAD_RESOURCES_PROD,
tags=DOWNLOAD_TAGS,
config=hourly_download_config,
)
download_staging_job = hacker_news_api_download.to_job(
resource_defs=DOWNLOAD_RESOURCES_STAGING,
tags=DOWNLOAD_TAGS,
config=hourly_download_config,
)
| 1.914063 | 2 |
deepnet/sparse_code_layer.py | airingzhang/deepnet | 626 | 12758521 | from layer import *
class SparseCodeLayer(Layer):
def AllocateBatchsizeDependentMemory(self, batchsize):
super(SparseCodeLayer, self).AllocateBatchsizeDependentMemory(batchsize)
self.approximator = cm.empty(self.state.shape)
self.temp3 = cm.empty(self.state.shape)
self.grad = cm.empty(self.state.shape)
self.grad_scale = cm.CUDAMatrix(np.zeros((self.state.shape[0], 1)))
self.m_by_m = cm.empty((self.state.shape[0], self.state.shape[0]))
def ApplyActivation(self, state):
if self.activation == deepnet_pb2.Hyperparams.LOGISTIC:
cm.sigmoid(state)
elif self.activation == deepnet_pb2.Hyperparams.TANH:
cm.tanh(state)
elif self.activation == deepnet_pb2.Hyperparams.RECTIFIED_LINEAR:
state.greater_than(0, target=self.temp)
state.mult(self.temp)
elif self.activation == deepnet_pb2.Hyperparams.RECTIFIED_LINEAR_SMOOTH:
cm.log_1_plus_exp(state)
elif self.activation == deepnet_pb2.Hyperparams.LINEAR:
pass
def ComputeDeriv(self, state):
"""Compute derivative w.r.t input given derivative w.r.t output."""
if self.activation == deepnet_pb2.Hyperparams.LOGISTIC:
self.deriv.apply_logistic_deriv(state)
elif self.activation == deepnet_pb2.Hyperparams.TANH:
self.deriv.apply_tanh_deriv(state)
if self.hyperparams.dropout:
self.deriv.mult(self.mask)
elif self.activation == deepnet_pb2.Hyperparams.RECTIFIED_LINEAR:
self.deriv.apply_rectified_linear_deriv(state)
elif self.activation == deepnet_pb2.Hyperparams.RECTIFIED_LINEAR_SMOOTH:
self.deriv.apply_rectified_linear_smooth_deriv(state)
elif self.activation == deepnet_pb2.Hyperparams.LINEAR:
if self.hyperparams.dropout:
self.deriv.mult(self.mask)
elif self.activation == deepnet_pb2.Hyperparams.SOFTMAX:
raise Exception('Not implemented.')
else:
raise Exception('Unknown activation.')
| 2.34375 | 2 |
2020/day_03.py | lbreede/adventofcode | 2 | 12758522 | # --- Day 3: Toboggan Trajectory ---
line_list = [line.rstrip("\n") for line in open("input.txt")]
def slopecheck(hori, vert):
pos = 0
found = 0
i = 0
for line in line_list:
if i % vert == 0:
if line[pos % len(line)] == "#":
found += 1
pos += hori
i += 1
return found
a = slopecheck(1, 1)
b = slopecheck(3, 1)
c = slopecheck(5, 1)
d = slopecheck(7, 1)
e = slopecheck(1, 2)
"""
print(a)
print(b)
print(c)
print(d)
print(e)
print(a*b*c*d*e)
""" | 3.5625 | 4 |
093_Cadastro_de_jogadores.py | fabioeomedeiros/Python-Base | 0 | 12758523 | #093_Cadastro_de_jogadores.py
jogador = {}
gols = []
totgols = 0
print("")
jogador['Nome'] = str(input("Nome: "))
jogador['Partidas Jogadas'] = int(input("Quantidades de partidas: "))
for i in range(0,jogador['Partidas Jogadas']):
g = int(input(f" Quantidades de gols na {i+1}º partida: "))
gols.append(g)
totgols += g
jogador['Gols'] = gols[:]
jogador['Total de Gols'] = totgols # ou sum(gols)
print("")
print(jogador)
print("")
for k, v in jogador.items():
print(f"{k}: {v}")
print("")
print(f"O jogador {jogador['Nome']} jogou {len(jogador['Gols'])} partidas")
for i, v in enumerate(jogador['Gols']):
print(f" -> na {i+1}º partida fez {v} gols")
print(f" No total de {totgols} gols")
| 3.65625 | 4 |
pygen_structures/test/test_command_line.py | avanteijlingen/pygen-structures | 7 | 12758524 | import os
import sys
import io
import warnings
from pygen_structures.convenience_functions import (
load_charmm_dir,
pdb_to_mol
)
from pygen_structures import __main__ as cmd_interface
FILE_DIR, _ = os.path.split(__file__)
TEST_TOPPAR = os.path.join(FILE_DIR, 'test_toppar')
def test_arg_parsing():
argv = ["HEY", "-o", "HEY_out", "--histidine", "HSP"]
args = cmd_interface.parse_args(argv)
assert(args.sequence == "HEY")
assert(args.segid == "PROT")
assert(args.patches == None)
assert(args.toppar == None)
assert(args.verify == True)
assert(args.output == "HEY_out")
assert(args.histidine == "HSP")
assert(args.use_charmm_names == False)
argv = [
"-u", "HSE-TRP-LYS", "-o", "HWK", "--patches", "CT2", "LAST",
"-v", "--segid", "HWK"
]
args = cmd_interface.parse_args(argv)
assert(args.sequence == "HSE-TRP-LYS")
assert(args.segid == "HWK")
assert(args.patches == ["CT2", "LAST"])
assert(args.toppar == None)
assert(args.verify == False)
assert(args.output == "HWK")
assert(args.histidine == "HSE")
assert(args.use_charmm_names == True)
def test_molecule_creation_raff():
argv = [
"-u", "AGLC-BFRU-AGAL", "-o", "RAFF",
"--patches", "RAFF", "0", "1", "2",
"--segid", "RAFF", "--name", "Raffinose"
]
cmd_interface.main(argv)
assert(os.path.exists("RAFF.psf"))
os.remove('RAFF.psf')
assert(os.path.exists("RAFF.pdb"))
rtf, prm = load_charmm_dir()
with warnings.catch_warnings():
warnings.simplefilter('ignore')
molecule = pdb_to_mol("RAFF.pdb", rtf, patches={"RAFF": (0, 1, 2)})
os.remove('RAFF.pdb')
assert(molecule.name == "Raffinose")
assert(molecule.segment == "RAFF")
assert(molecule.check_parameters(prm))
ref_atoms = {
(0, 'C1'),
(0, 'H1'),
(0, 'O1'),
(0, 'C5'),
(0, 'H5'),
(0, 'O5'),
(0, 'C2'),
(0, 'H2'),
(0, 'O2'),
(0, 'HO2'),
(0, 'C3'),
(0, 'H3'),
(0, 'O3'),
(0, 'HO3'),
(0, 'C4'),
(0, 'H4'),
(0, 'O4'),
(0, 'HO4'),
(0, 'C6'),
(0, 'H61'),
(0, 'H62'),
(1, 'O5'),
(1, 'C2'),
(1, 'C5'),
(1, 'H5'),
(1, 'C6'),
(1, 'H61'),
(1, 'H62'),
(1, 'O6'),
(1, 'HO6'),
(1, 'C1'),
(1, 'H11'),
(1, 'H12'),
(1, 'O1'),
(1, 'HO1'),
(1, 'C3'),
(1, 'H3'),
(1, 'O3'),
(1, 'HO3'),
(1, 'C4'),
(1, 'H4'),
(1, 'O4'),
(1, 'HO4'),
(2, 'C1'),
(2, 'H1'),
(2, 'O1'),
(2, 'C5'),
(2, 'H5'),
(2, 'O5'),
(2, 'C2'),
(2, 'H2'),
(2, 'O2'),
(2, 'HO2'),
(2, 'C3'),
(2, 'H3'),
(2, 'O3'),
(2, 'HO3'),
(2, 'C4'),
(2, 'H4'),
(2, 'O4'),
(2, 'HO4'),
(2, 'C6'),
(2, 'H61'),
(2, 'H62'),
(2, 'O6'),
(2, 'HO6')
}
ref_bonds = {
((0, 'C1'), (0, 'O1')),
((0, 'C1'), (0, 'H1')),
((0, 'C1'), (0, 'O5')),
((0, 'C1'), (0, 'C2')),
((0, 'C2'), (0, 'H2')),
((0, 'C2'), (0, 'O2')),
((0, 'O2'), (0, 'HO2')),
((0, 'C2'), (0, 'C3')),
((0, 'C3'), (0, 'H3')),
((0, 'C3'), (0, 'O3')),
((0, 'O3'), (0, 'HO3')),
((0, 'C3'), (0, 'C4')),
((0, 'C4'), (0, 'H4')),
((0, 'C4'), (0, 'O4')),
((0, 'O4'), (0, 'HO4')),
((0, 'C4'), (0, 'C5')),
((0, 'C5'), (0, 'H5')),
((0, 'C5'), (0, 'C6')),
((0, 'C6'), (0, 'H61')),
((0, 'C6'), (0, 'H62')),
((0, 'C5'), (0, 'O5')),
((0, 'O1'), (1, 'C2')),
((1, 'O5'), (1, 'C2')),
((1, 'C2'), (1, 'C1')),
((1, 'C2'), (1, 'C3')),
((1, 'C3'), (1, 'H3')),
((1, 'C3'), (1, 'O3')),
((1, 'O3'), (1, 'HO3')),
((1, 'C3'), (1, 'C4')),
((1, 'C4'), (1, 'H4')),
((1, 'C4'), (1, 'O4')),
((1, 'O4'), (1, 'HO4')),
((1, 'C4'), (1, 'C5')),
((1, 'C5'), (1, 'H5')),
((1, 'C5'), (1, 'C6')),
((1, 'C5'), (1, 'O5')),
((1, 'C6'), (1, 'H61')),
((1, 'C6'), (1, 'H62')),
((1, 'C6'), (1, 'O6')),
((1, 'O6'), (1, 'HO6')),
((1, 'C1'), (1, 'H11')),
((1, 'C1'), (1, 'H12')),
((1, 'C1'), (1, 'O1')),
((1, 'O1'), (1, 'HO1')),
((2, 'C1'), (2, 'O1')),
((2, 'C1'), (2, 'H1')),
((2, 'C1'), (2, 'O5')),
((2, 'C1'), (2, 'C2')),
((2, 'C2'), (2, 'H2')),
((2, 'C2'), (2, 'O2')),
((2, 'O2'), (2, 'HO2')),
((2, 'C2'), (2, 'C3')),
((2, 'C3'), (2, 'H3')),
((2, 'C3'), (2, 'O3')),
((2, 'O3'), (2, 'HO3')),
((2, 'C3'), (2, 'C4')),
((2, 'C4'), (2, 'H4')),
((2, 'C4'), (2, 'O4')),
((2, 'O4'), (2, 'HO4')),
((2, 'C4'), (2, 'C5')),
((2, 'C5'), (2, 'H5')),
((2, 'C5'), (2, 'C6')),
((2, 'C6'), (2, 'H61')),
((2, 'C6'), (2, 'H62')),
((2, 'C6'), (2, 'O6')),
((2, 'O6'), (2, 'HO6')),
((2, 'C5'), (2, 'O5')),
((2, 'O1'), (0, 'C6')),
}
atoms = set()
for atom in molecule.atoms:
atoms.add((atom.residue_number - 1, atom.atom_name))
assert(atoms == ref_atoms)
bonds = set()
for residue in molecule.residues:
for bond in residue.bonds:
if bond in ref_bonds:
bonds.add(bond)
else:
bonds.add((bond[1], bond[0]))
assert(bonds == ref_bonds)
def test_molecule_creation_hey():
argv = [
"HEY", "-o", "HEY", "-t", TEST_TOPPAR, "--histidine", "HSP"
]
cmd_interface.main(argv)
assert(os.path.exists("HEY.psf"))
os.remove('HEY.psf')
assert(os.path.exists("HEY.pdb"))
rtf, prm = load_charmm_dir()
with warnings.catch_warnings():
warnings.simplefilter('ignore')
molecule = pdb_to_mol("HEY.pdb", rtf)
os.remove('HEY.pdb')
assert(molecule.name == "H[+]EY")
assert(molecule.segment == "PROT")
assert(molecule.check_parameters(prm))
def test_verify():
old_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
argv = [
"PdP", "-o", "PdP", "-t", TEST_TOPPAR
]
try:
cmd_interface.main(argv)
except SystemExit:
# Missing parameters call exit()
pass
assert(not os.path.exists("PdP.psf"))
assert(not os.path.exists("PdP.pdb"))
argv = [
"PdP", "-o", "PdP", "-t", TEST_TOPPAR, "-v"
]
cmd_interface.main(argv)
assert(os.path.exists("PdP.psf"))
os.remove("PdP.psf")
assert(os.path.exists("PdP.pdb"))
os.remove("PdP.pdb")
sys.stdout.close()
sys.stdout = io.StringIO()
argv = [
"PdP"
]
cmd_interface.main(argv)
sys.stdout.seek(0)
assert(sys.stdout.read() != "")
sys.stdout.close()
sys.stdout = old_stdout | 2.328125 | 2 |
wagtail/admin/models.py | wlcrs/wagtail | 3 | 12758525 | # The edit_handlers module extends Page with some additional attributes required by
# wagtailadmin (namely, base_form_class and get_edit_handler). Importing this within
# wagtailadmin.models ensures that this happens in advance of running wagtailadmin's
# system checks.
from wagtail.admin import edit_handlers # NOQA
| 1.390625 | 1 |
fortlab/kgextra.py | grnydawn/fortlab | 0 | 12758526 | <filename>fortlab/kgextra.py<gh_stars>0
# kgen_extra.py
kgen_file_header = \
"""
! KGEN-generated Fortran source file
!
! Filename : %s
! Generated at: %s
! KGEN version: %s
"""
kgen_subprograms = \
"""FUNCTION kgen_get_newunit() RESULT(new_unit)
INTEGER, PARAMETER :: UNIT_MIN=100, UNIT_MAX=1000000
LOGICAL :: is_opened
INTEGER :: nunit, new_unit, counter
new_unit = -1
DO counter=UNIT_MIN, UNIT_MAX
inquire(UNIT=counter, OPENED=is_opened)
IF (.NOT. is_opened) THEN
new_unit = counter
EXIT
END IF
END DO
END FUNCTION
SUBROUTINE kgen_error_stop( msg )
IMPLICIT NONE
CHARACTER(LEN=*), INTENT(IN) :: msg
WRITE (*,*) msg
STOP 1
END SUBROUTINE """
kgen_print_counter = \
"""SUBROUTINE kgen_print_counter(counter)
INTEGER, INTENT(IN) :: counter
PRINT *, "KGEN writes input state variables at count = ", counter
END SUBROUTINE
SUBROUTINE kgen_print_mpirank_counter(rank, counter)
INTEGER, INTENT(IN) :: rank, counter
PRINT *, "KGEN writes input state variables at count = ", counter, " on mpirank = ", rank
END SUBROUTINE"""
kgen_verify_intrinsic_checkpart = \
"""check_status%%numTotal = check_status%%numTotal + 1
IF ( var %s ref_var ) THEN
check_status%%numIdentical = check_status%%numIdentical + 1
if(kgen_verboseLevel == 3) then
WRITE(*,*)
WRITE(*,*) trim(adjustl(varname)), " is IDENTICAL( ", var, " )."
endif
ELSE
if(kgen_verboseLevel > 0) then
WRITE(*,*)
WRITE(*,*) trim(adjustl(varname)), " is NOT IDENTICAL."
if(kgen_verboseLevel == 3) then
WRITE(*,*) "KERNEL: ", var
WRITE(*,*) "REF. : ", ref_var
end if
end if
check_status%%numOutTol = check_status%%numOutTol + 1
END IF"""
kgen_verify_numeric_array = \
"""check_status%%numTotal = check_status%%numTotal + 1
IF ( ALL( var %(eqtest)s ref_var ) ) THEN
check_status%%numIdentical = check_status%%numIdentical + 1
if(kgen_verboseLevel == 3) then
WRITE(*,*)
WRITE(*,*) "All elements of ", trim(adjustl(varname)), " are IDENTICAL."
!WRITE(*,*) "KERNEL: ", var
!WRITE(*,*) "REF. : ", ref_var
IF ( ALL( var == 0 ) ) THEN
if(kgen_verboseLevel == 3) then
WRITE(*,*) "All values are zero."
end if
END IF
end if
ELSE
allocate(temp(%(allocshape)s))
allocate(temp2(%(allocshape)s))
n = count(var/=ref_var)
where(abs(ref_var) > kgen_minvalue)
temp = ((var-ref_var)/ref_var)**2
temp2 = (var-ref_var)**2
elsewhere
temp = (var-ref_var)**2
temp2 = temp
endwhere
nrmsdiff = sqrt(sum(temp)/real(n))
rmsdiff = sqrt(sum(temp2)/real(n))
if (nrmsdiff > kgen_tolerance) then
check_status%%numOutTol = check_status%%numOutTol+1
else
check_status%%numInTol = check_status%%numInTol+1
endif
deallocate(temp,temp2)
END IF"""
kgen_verify_nonreal_array = \
"""check_status%%numTotal = check_status%%numTotal + 1
IF ( ALL( var %(eqtest)s ref_var ) ) THEN
check_status%%numIdentical = check_status%%numIdentical + 1
if(kgen_verboseLevel == 3) then
WRITE(*,*)
WRITE(*,*) "All elements of ", trim(adjustl(varname)), " are IDENTICAL."
!WRITE(*,*) "KERNEL: ", var
!WRITE(*,*) "REF. : ", ref_var
IF ( ALL( var == 0 ) ) THEN
WRITE(*,*) "All values are zero."
END IF
end if
ELSE
if(kgen_verboseLevel > 0) then
WRITE(*,*)
WRITE(*,*) trim(adjustl(varname)), " is NOT IDENTICAL."
WRITE(*,*) count( var /= ref_var), " of ", size( var ), " elements are different."
end if
check_status%%numOutTol = check_status%%numOutTol+1
END IF"""
kgen_utils_file_head = \
"""
INTEGER, PARAMETER :: kgen_dp = selected_real_kind(15, 307)
INTEGER, PARAMETER :: CHECK_IDENTICAL = 1
INTEGER, PARAMETER :: CHECK_IN_TOL = 2
INTEGER, PARAMETER :: CHECK_OUT_TOL = 3
REAL(kind=kgen_dp) :: kgen_tolerance = 1.0D-15, kgen_minvalue = 1.0D-15
INTEGER :: kgen_verboselevel = 1
interface kgen_tostr
module procedure kgen_tostr_args1
module procedure kgen_tostr_args2
module procedure kgen_tostr_args3
module procedure kgen_tostr_args4
module procedure kgen_tostr_args5
module procedure kgen_tostr_args6
end interface
! PERTURB: add following interface
interface kgen_perturb_real
module procedure kgen_perturb_real4_dim1
module procedure kgen_perturb_real4_dim2
module procedure kgen_perturb_real4_dim3
module procedure kgen_perturb_real8_dim1
module procedure kgen_perturb_real8_dim2
module procedure kgen_perturb_real8_dim3
end interface
type check_t
logical :: Passed
integer :: numOutTol
integer :: numTotal
integer :: numIdentical
integer :: numInTol
integer :: rank
end type check_t
public kgen_dp, check_t, kgen_init_verify, kgen_init_check, kgen_tolerance
public kgen_minvalue, kgen_verboselevel, kgen_print_check, kgen_perturb_real
public CHECK_NOT_CHECKED, CHECK_IDENTICAL, CHECK_IN_TOL, CHECK_OUT_TOL
public kgen_get_newunit, kgen_error_stop
"""
kgen_utils_array_sumcheck = \
"""
subroutine kgen_array_sumcheck(varname, sum1, sum2, finish)
character(*), intent(in) :: varname
real(kind=8), intent(in) :: sum1, sum2
real(kind=8), parameter :: max_rel_diff = 1.E-10
real(kind=8) :: diff, rel_diff
logical, intent(in), optional :: finish
logical checkresult
if ( sum1 == sum2 ) then
checkresult = .TRUE.
else
checkresult = .FALSE.
diff = ABS(sum2 - sum1)
if ( .NOT. (sum1 == 0._8) ) then
rel_diff = ABS(diff / sum1)
if ( rel_diff > max_rel_diff ) then
print *, ''
print *, 'SUM of array, "', varname, '", is different.'
print *, 'From file : ', sum1
print *, 'From array: ', sum2
print *, 'Difference: ', diff
print *, 'Normalized difference: ', rel_diff
if ( present(finish) .AND. finish ) then
stop
end if
end if
else
print *, ''
print *, 'SUM of array, "', varname, '", is different.'
print *, 'From file : ', sum1
print *, 'From array: ', sum2
print *, 'Difference: ', diff
if ( present(finish) .AND. finish ) then
stop
end if
end if
end if
end subroutine
"""
kgen_utils_file_tostr = \
"""
function kgen_tostr_args1(idx1) result(tostr)
integer, intent(in) :: idx1
character(len=64) :: str_idx1
character(len=64) :: tostr
write(str_idx1, *) idx1
tostr = trim(adjustl(str_idx1))
end function
function kgen_tostr_args2(idx1, idx2) result(tostr)
integer, intent(in) :: idx1, idx2
character(len=64) :: str_idx1, str_idx2
character(len=128) :: tostr
write(str_idx1, *) idx1
write(str_idx2, *) idx2
tostr = trim(adjustl(str_idx1)) // ", " // trim(adjustl(str_idx2))
end function
function kgen_tostr_args3(idx1, idx2, idx3) result(tostr)
integer, intent(in) :: idx1, idx2, idx3
character(len=64) :: str_idx1, str_idx2, str_idx3
character(len=192) :: tostr
write(str_idx1, *) idx1
write(str_idx2, *) idx2
write(str_idx3, *) idx3
tostr = trim(adjustl(str_idx1)) // ", " // trim(adjustl(str_idx2)) &
// ", " // trim(adjustl(str_idx3))
end function
function kgen_tostr_args4(idx1, idx2, idx3, idx4) result(tostr)
integer, intent(in) :: idx1, idx2, idx3, idx4
character(len=64) :: str_idx1, str_idx2, str_idx3, str_idx4
character(len=256) :: tostr
write(str_idx1, *) idx1
write(str_idx2, *) idx2
write(str_idx3, *) idx3
write(str_idx4, *) idx4
tostr = trim(adjustl(str_idx1)) // ", " // trim(adjustl(str_idx2)) &
// ", " // trim(adjustl(str_idx3)) // ", " // trim(adjustl(str_idx4))
end function
function kgen_tostr_args5(idx1, idx2, idx3, idx4, idx5) result(tostr)
integer, intent(in) :: idx1, idx2, idx3, idx4, idx5
character(len=64) :: str_idx1, str_idx2, str_idx3, str_idx4, str_idx5
character(len=320) :: tostr
write(str_idx1, *) idx1
write(str_idx2, *) idx2
write(str_idx3, *) idx3
write(str_idx4, *) idx4
write(str_idx5, *) idx5
tostr = trim(adjustl(str_idx1)) // ", " // trim(adjustl(str_idx2)) &
// ", " // trim(adjustl(str_idx3)) // ", " // trim(adjustl(str_idx4)) &
// ", " // trim(adjustl(str_idx5))
end function
function kgen_tostr_args6(idx1, idx2, idx3, idx4, idx5, idx6) result(tostr)
integer, intent(in) :: idx1, idx2, idx3, idx4, idx5, idx6
character(len=64) :: str_idx1, str_idx2, str_idx3, str_idx4, str_idx5, str_idx6
character(len=384) :: tostr
write(str_idx1, *) idx1
write(str_idx2, *) idx2
write(str_idx3, *) idx3
write(str_idx4, *) idx4
write(str_idx5, *) idx5
write(str_idx6, *) idx6
tostr = trim(adjustl(str_idx1)) // ", " // trim(adjustl(str_idx2)) &
// ", " // trim(adjustl(str_idx3)) // ", " // trim(adjustl(str_idx4)) &
// ", " // trim(adjustl(str_idx5)) // ", " // trim(adjustl(str_idx6))
end function
"""
kgen_utils_file_checksubr = \
"""
subroutine kgen_perturb_real4_dim1(var, pertlim)
real*4, intent(inout), dimension(:) :: var
real*4, intent(in) :: pertlim
integer, allocatable :: rndm_seed(:)
integer :: rndm_seed_sz
real*4 :: pertval
integer :: idx1
call random_seed(size=rndm_seed_sz)
allocate(rndm_seed(rndm_seed_sz))
rndm_seed = 121869
call random_seed(put=rndm_seed)
do idx1=1,size(var, dim=1)
call random_number(pertval)
pertval = 2.0_4*pertlim*(0.5_4 - pertval)
var(idx1) = var(idx1)*(1.0_4 + pertval)
end do
deallocate(rndm_seed)
end subroutine
subroutine kgen_perturb_real4_dim2(var, pertlim)
real*4, intent(inout), dimension(:,:) :: var
real*4, intent(in) :: pertlim
integer, allocatable :: rndm_seed(:)
integer :: rndm_seed_sz
real*4 :: pertval
integer :: idx1,idx2
call random_seed(size=rndm_seed_sz)
allocate(rndm_seed(rndm_seed_sz))
rndm_seed = 121869
call random_seed(put=rndm_seed)
do idx1=1,size(var, dim=1)
do idx2=1,size(var, dim=2)
call random_number(pertval)
pertval = 2.0_4*pertlim*(0.5_4 - pertval)
var(idx1,idx2) = var(idx1,idx2)*(1.0_4 + pertval)
end do
end do
deallocate(rndm_seed)
end subroutine
subroutine kgen_perturb_real4_dim3(var, pertlim)
real*4, intent(inout), dimension(:,:,:) :: var
real*4, intent(in) :: pertlim
integer, allocatable :: rndm_seed(:)
integer :: rndm_seed_sz
real*4 :: pertval
integer :: idx1,idx2,idx3
call random_seed(size=rndm_seed_sz)
allocate(rndm_seed(rndm_seed_sz))
rndm_seed = 121869
call random_seed(put=rndm_seed)
do idx1=1,size(var, dim=1)
do idx2=1,size(var, dim=2)
do idx3=1,size(var, dim=3)
call random_number(pertval)
pertval = 2.0_4*pertlim*(0.5_4 - pertval)
var(idx1,idx2,idx3) = var(idx1,idx2,idx3)*(1.0_4 + pertval)
end do
end do
end do
deallocate(rndm_seed)
end subroutine
subroutine kgen_perturb_real8_dim1(var, pertlim)
real*8, intent(inout), dimension(:) :: var
real*8, intent(in) :: pertlim
integer, allocatable :: rndm_seed(:)
integer :: rndm_seed_sz
real*8 :: pertval
integer :: idx1
call random_seed(size=rndm_seed_sz)
allocate(rndm_seed(rndm_seed_sz))
rndm_seed = 121869
call random_seed(put=rndm_seed)
do idx1=1,size(var, dim=1)
call random_number(pertval)
pertval = 2.0_8*pertlim*(0.5_8 - pertval)
var(idx1) = var(idx1)*(1.0_8 + pertval)
end do
deallocate(rndm_seed)
end subroutine
subroutine kgen_perturb_real8_dim2(var, pertlim)
real*8, intent(inout), dimension(:,:) :: var
real*8, intent(in) :: pertlim
integer, allocatable :: rndm_seed(:)
integer :: rndm_seed_sz
real*8 :: pertval
integer :: idx1,idx2
call random_seed(size=rndm_seed_sz)
allocate(rndm_seed(rndm_seed_sz))
rndm_seed = 121869
call random_seed(put=rndm_seed)
do idx1=1,size(var, dim=1)
do idx2=1,size(var, dim=2)
call random_number(pertval)
pertval = 2.0_8*pertlim*(0.5_8 - pertval)
var(idx1,idx2) = var(idx1,idx2)*(1.0_8 + pertval)
end do
end do
deallocate(rndm_seed)
end subroutine
subroutine kgen_perturb_real8_dim3(var, pertlim)
real*8, intent(inout), dimension(:,:,:) :: var
real*8, intent(in) :: pertlim
integer, allocatable :: rndm_seed(:)
integer :: rndm_seed_sz
real*8 :: pertval
integer :: idx1,idx2,idx3
call random_seed(size=rndm_seed_sz)
allocate(rndm_seed(rndm_seed_sz))
rndm_seed = 121869
call random_seed(put=rndm_seed)
do idx1=1,size(var, dim=1)
do idx2=1,size(var, dim=2)
do idx3=1,size(var, dim=3)
call random_number(pertval)
pertval = 2.0_8*pertlim*(0.5_8 - pertval)
var(idx1,idx2,idx3) = var(idx1,idx2,idx3)*(1.0_8 + pertval)
end do
end do
end do
deallocate(rndm_seed)
end subroutine
subroutine kgen_init_verify(verboseLevel, tolerance, minValue)
integer, intent(in), optional :: verboseLevel
real(kind=kgen_dp), intent(in), optional :: tolerance
real(kind=kgen_dp), intent(in), optional :: minValue
if(present(verboseLevel)) then
kgen_verboseLevel = verboseLevel
end if
if(present(tolerance)) then
kgen_tolerance = tolerance
end if
if(present(minvalue)) then
kgen_minvalue = minvalue
end if
end subroutine kgen_init_verify
subroutine kgen_init_check(check, rank)
type(check_t), intent(inout) :: check
integer, intent(in), optional :: rank
check%Passed = .TRUE.
check%numOutTol = 0
check%numInTol = 0
check%numTotal = 0
check%numIdentical = 0
if(present(rank)) then
check%rank = rank
else
check%rank = 0
endif
end subroutine kgen_init_check
subroutine kgen_print_check(kname, check)
character(len=*) :: kname
type(check_t), intent(in) :: check
write (*,*) TRIM(kname),': Tolerance for normalized RMS: ',kgen_tolerance
!write (*,*) TRIM(kname),':',check%numFatal,'fatal errors,',check%numWarning,'warnings detected, and',check%numIdentical,'identical out of',check%numTotal,'variables checked'
write (*,*) TRIM(kname),': Number of variables checked: ',check%numTotal
write (*,*) TRIM(kname),': Number of Identical results: ',check%numIdentical
write (*,*) TRIM(kname),': Number of variables within tolerance(not identical): ',check%numInTol
write (*,*) TRIM(kname),': Number of variables out of tolerance: ', check%numOutTol
if (check%numOutTol> 0) then
write(*,*) TRIM(kname),': Verification FAILED'
else
write(*,*) TRIM(kname),': Verification PASSED'
endif
end subroutine kgen_print_check
"""
kgen_get_newunit = \
"""
FUNCTION kgen_get_newunit() RESULT ( new_unit )
INTEGER, PARAMETER :: UNIT_MIN=100, UNIT_MAX=1000000
LOGICAL :: is_opened
INTEGER :: nunit, new_unit, counter
REAL :: r
CALL RANDOM_SEED
new_unit = -1
DO counter=1, UNIT_MAX
CALL RANDOM_NUMBER(r)
nunit = INT(r*UNIT_MAX+UNIT_MIN)
INQUIRE (UNIT=nunit, OPENED=is_opened)
IF (.NOT. is_opened) THEN
new_unit = nunit
EXIT
END IF
END DO
END FUNCTION kgen_get_newunit
"""
kgen_error_stop = \
"""
SUBROUTINE kgen_error_stop( msg )
IMPLICIT NONE
CHARACTER(LEN=*), INTENT(IN) :: msg
WRITE (*,*) msg
STOP 1
END SUBROUTINE
"""
kgen_rankthread = \
"""
SUBROUTINE kgen_rankthreadinvoke( str, rank, thread, invoke )
CHARACTER(*), INTENT(IN) :: str
INTEGER, INTENT(OUT) :: rank, thread, invoke
INTEGER :: pos1, pos2, i, e
pos1 = 1
rank = -1
thread = -1
invoke = -1
DO
pos2 = INDEX(str(pos1:), ".")
IF (pos2 == 0) THEN
READ(str(pos1:),*,IOSTAT=e) i
IF ( e == 0 ) THEN
rank = thread
thread = invoke
READ(str(pos1:), *) invoke
END IF
EXIT
END IF
READ(str(pos1:pos1+pos2-2),*,IOSTAT=e) i
IF ( e == 0 ) THEN
rank = thread
thread = invoke
READ(str(pos1:pos1+pos2-2), *) invoke
END IF
pos1 = pos2+pos1
END DO
END SUBROUTINE
"""
rdtsc = \
""" .file "rdtsc.s"
.text
.globl rdtsc_
.type rdtsc_, @function
rdtsc_:
rdtsc
movl %eax,%ecx
movl %edx,%eax
shlq $32,%rax
addq %rcx,%rax
ret
.size rdtsc_, .-rdtsc_"""
| 2.15625 | 2 |
pool_automation/roles/aws_manage/library/test_stateful_set.py | Rob-S/indy-node | 627 | 12758527 | <reponame>Rob-S/indy-node<gh_stars>100-1000
import random
import string
import json
import boto3
import pytest
from stateful_set import (
AWS_REGIONS, InstanceParams, find_ubuntu_ami,
AwsEC2Launcher, AwsEC2Terminator, find_instances,
valid_instances, get_tag, manage_instances
)
class EC2TestCtx(object):
def __init__(self, region, resource, client, prices=None):
self.region = region
self.resource = resource
self.client = client
self.prices = prices
############
# FIXTURES #
############
@pytest.fixture
def ec2(regions, ec2_all):
return [ec2_all[r]['rc'] for r in regions]
@pytest.fixture
def ec2cl(regions, ec2_all):
return [ec2_all[r]['cl'] for r in regions]
@pytest.fixture
def ec2_resources(request, regions, ec2):
def gen_params(group_suffix=None, key_name_suffix=None,
security_group_suffix=None):
def _random(N=7):
return ''.join(random.choice(string.ascii_uppercase + string.digits)
for _ in range(N))
return InstanceParams(
project='Indy-PA-dev',
add_tags={'Purpose': 'Test Pool Automation'},
namespace='test_stateful_set',
group="group_{}"
.format(group_suffix if group_suffix
else _random()),
key_name="test_stateful_set_key_{}"
.format(key_name_suffix if key_name_suffix
else _random()),
security_group="test_stateful_set_security_group_{}"
.format(security_group_suffix
if security_group_suffix
else _random()),
type_name='t2.micro',
# TODO docs
market_spot=(request.config.getoption("--market-type") == 'spot'),
spot_max_price=None,
# TODO docs
ebs_volume_size=9,
ebs_volume_type='gp2',
)
def manage_key_pair(ec2, present, params):
count = 0
for key in ec2.key_pairs.all():
if key.key_name != params.key_name:
continue
if present and count == 0:
count = 1
else:
key.delete()
if present and count == 0:
ec2.create_key_pair(KeyName=params.key_name)
def manage_security_group(ec2, present, params):
count = 0
for sgroup in ec2.security_groups.all():
if sgroup.group_name != params.security_group:
continue
if present and count == 0:
count = 1
else:
sgroup.delete()
if present and count == 0:
sg = ec2.create_security_group(
GroupName=params.security_group,
Description='Test security group')
sg.create_tags(Tags=[
{'Key': 'Name', 'Value': "{}-{}-{}"
.format(params.project,
params.namespace,
params.group)},
{'Key': 'Project', 'Value': params.project},
{'Key': 'Namespace', 'Value': params.namespace},
{'Key': 'Group', 'Value': params.group}])
params = gen_params(
group_suffix=request.node.name,
key_name_suffix=request.node.name,
security_group_suffix=request.node.name
)
for rc in ec2:
manage_key_pair(rc, True, params)
manage_security_group(rc, True, params)
yield params
terminator = AwsEC2Terminator()
for region, rc in zip(regions, ec2):
for inst in find_instances(
rc, params.project, params.namespace, params.group):
terminator.terminate(inst, region)
terminator.wait(False)
for rc in ec2:
manage_key_pair(rc, False, params)
manage_security_group(rc, False, params)
@pytest.fixture(scope="session")
def pricing_client():
# pricing API is available only through us-east-1 and ap-south-1
# https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/using-pelong.html
return boto3.client('pricing', region_name='us-east-1')
@pytest.fixture
def on_demand_prices(request, pricing_client, ec2_prices,
regions, ec2_resources):
marker = request.node.get_closest_marker('prices')
if not (marker and ('on-demand' in marker.kwargs.get('term', []))):
return
for region_code in regions:
res = ec2_prices[region_code]['on-demand'].get(ec2_resources.type_name)
if res is None:
# Search product filters
# https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_pricing_Filter.html
# https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/using-ppslong.html
filters = [
{'Field': k, 'Type': 'TERM_MATCH', 'Value': v} for k, v in
(('tenancy', 'shared'),
('capacitystatus', 'UnusedCapacityReservation'),
('location', AWS_REGIONS[region_code].location),
('operatingSystem', 'Linux'), # TODO might be parametrized
('instanceType', ec2_resources.type_name),
('preInstalledSw', 'NA'))
]
products = pricing_client.get_products(
ServiceCode='AmazonEC2', Filters=filters)
price_info = json.loads(products['PriceList'][0])
# https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/reading-an-offer.html
#
# "terms": {
# "OnDemand": {
# "<sku.offerTermCode>": {
# "offerTermCode":"The term code of the product",
# "sku":"The SKU of the product",
# ...
# "priceDimensions": {
# "<sku.offerTermCode.rateCode>": {
# "rateCode":"The rate code of the price",
# ...
# "pricePerUnit": {
# "currencyCode":"currencyRate",
# }
# }
# }
# }
# }
# }
offer = price_info['terms']['OnDemand'].popitem()[1]
price_tier = offer['priceDimensions'].popitem()[1]
res = float(price_tier['pricePerUnit']['USD'])
ec2_prices[region_code]['on-demand'][ec2_resources.type_name] = res
@pytest.fixture
def ec2ctxs(regions, ec2, ec2cl, on_demand_prices, ec2_prices):
assert len(set([len(l) for l in (regions, ec2, ec2cl)])) == 1
return [EC2TestCtx(r, rc, cl, ec2_prices[r]) for r, rc, cl
in zip(regions, ec2, ec2cl)]
@pytest.fixture
def ec2ctx(ec2ctxs):
assert len(ec2ctxs) == 1
return ec2ctxs[0]
#########
# TESTS #
#########
def test_find_ubuntu_image(ec2ctx):
image_id = find_ubuntu_ami(ec2ctx.resource)
assert image_id is not None
image = ec2ctx.resource.Image(image_id)
assert image.owner_id == '099720109477' # Canonical
assert image.state == 'available'
assert image.architecture == 'x86_64'
assert 'Canonical' in image.description
assert 'Ubuntu' in image.description
assert '16.04' in image.description
assert 'UNSUPPORTED' not in image.description
# TODO split test_AwsEC2Launcher tests into multiple more focused ones
def check_instance_params(inst, params, ec2cl=None, price=None):
# https://stackoverflow.com/questions/5595425/what-is-the-best-way-to-compare-floats-for-almost-equality-in-python
# https://www.python.org/dev/peps/pep-0485/#proposed-implementation
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def check_tags(obj):
assert {'Key': 'Project', 'Value': params.project} in obj.tags
assert {'Key': 'Namespace', 'Value': params.namespace} in obj.tags
assert {'Key': 'Group', 'Value': params.group} in obj.tags
for tag_key, tag_value in params.add_tags.iteritems():
assert tag_value == get_tag(obj, tag_key)
# general
assert inst.instance_type == params.type_name
assert inst.state['Name'] == 'running'
# tags
check_tags(inst)
# linked resources
assert inst.key_name == params.key_name
assert len(inst.security_groups) == 1
assert inst.security_groups[0]['GroupName'] == params.security_group
# ebs options
volumes = list(inst.volumes.all())
assert len(volumes) == 1
assert volumes[0].size == params.ebs_volume_size
assert volumes[0].volume_type == params.ebs_volume_type
check_tags(volumes[0])
# market options
if params.market_spot:
assert inst.instance_lifecycle == 'spot'
assert inst.spot_instance_request_id is not None
spot_params = ec2cl.describe_spot_instance_requests(
SpotInstanceRequestIds=[inst.spot_instance_request_id])
assert isclose(
float(spot_params['SpotInstanceRequests'][0]['SpotPrice']),
price
)
@pytest.mark.regions([['us-east-2', 'eu-west-1']])
def test_AwsEC2Launcher_wait(ec2ctxs, ec2_resources):
launcher = AwsEC2Launcher()
instances = []
params = ec2_resources._replace(market_spot=False)
for ctx in ec2ctxs:
_instances = launcher.launch(
params, 1, region=ctx.region, ec2=ctx.resource)
assert len(_instances) == 1
instances += _instances
assert len(launcher.awaited) > 0
launcher.wait()
assert len(launcher.awaited) == 0
for inst in instances:
check_instance_params(inst, params)
def idfn_test_AwsEC2Launcher(max_price):
if max_price is None:
return 'max_price_default'
else:
return "max_price_{}".format(max_price)
@pytest.mark.prices(term="on-demand")
@pytest.mark.regions([['us-east-2'], ['eu-west-1']])
@pytest.mark.parametrize(
'max_price_factor', [None, 0.7], ids=idfn_test_AwsEC2Launcher)
def test_AwsEC2Launcher_spot(ec2ctx, ec2_resources, max_price_factor):
launcher = AwsEC2Launcher()
default_price = ec2ctx.prices['on-demand'][ec2_resources.type_name]
price = default_price * (1 if max_price_factor is None else
max_price_factor)
params = ec2_resources._replace(
market_spot=True,
spot_max_price=(None if max_price_factor is None else
"{}".format(price))
)
instances = launcher.launch(
params, 1, region=ec2ctx.region, ec2=ec2ctx.resource)
launcher.wait()
for inst in instances:
check_instance_params(inst, params, ec2ctx.client, price)
@pytest.mark.regions([['us-east-2', 'eu-west-1']])
def test_AwsEC2Terminator_wait(ec2ctxs, ec2_resources):
launcher = AwsEC2Launcher()
terminator = AwsEC2Terminator()
instances = []
params = ec2_resources._replace(market_spot=False)
for ctx in ec2ctxs:
_instances = launcher.launch(
params, 1, region=ctx.region, ec2=ctx.resource)
assert len(_instances) == 1
instances += _instances
launcher.wait()
for instance in instances:
terminator.terminate(instance)
assert len(terminator.awaited) > 0
terminator.wait()
assert len(terminator.awaited) == 0
for instance in instances:
assert instance.state['Name'] == 'terminated'
@pytest.mark.regions([['us-east-2'], ['eu-west-1']])
def test_AwsEC2Terminator_spot(ec2ctx, ec2_resources):
launcher = AwsEC2Launcher()
terminator = AwsEC2Terminator()
params = ec2_resources._replace(market_spot=True, spot_max_price=None)
instances = launcher.launch(
params, 1, region=ec2ctx.region, ec2=ec2ctx.resource)
launcher.wait()
for instance in instances:
terminator.terminate(instance)
for instance in instances:
assert instance.spot_instance_request_id is not None
spot_params = ec2ctx.client.describe_spot_instance_requests(
SpotInstanceRequestIds=[instance.spot_instance_request_id])
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html#get-spot-instance-bid-status
assert (spot_params['SpotInstanceRequests'][0]['State'] in
('closed', 'cancelled'))
assert (spot_params['SpotInstanceRequests'][0]['Status']['Code'] in (
'instance-terminated-by-user',
'request-canceled-and-instance-running'
))
terminator.wait()
@pytest.mark.regions([['us-east-1']])
def test_find_instances(ec2ctx, ec2_resources):
launcher = AwsEC2Launcher()
terminator = AwsEC2Terminator()
params1 = ec2_resources._replace(
group="{}_{}".format(ec2_resources.group, 'aaa'))
params2 = ec2_resources._replace(
group="{}_{}".format(ec2_resources.group, 'bbb'))
for group in (params1.group, params2.group):
for inst in find_instances(
ec2ctx.resource, ec2_resources.project,
ec2_resources.namespace, group):
terminator.terminate(inst, ec2ctx.region)
terminator.wait(False)
launcher.launch(params1, 2, ec2=ec2ctx.resource)
launcher.launch(params2, 3, ec2=ec2ctx.resource)
aaa = find_instances(
ec2ctx.resource, params1.project, params1.namespace, params1.group)
bbb = find_instances(
ec2ctx.resource, params2.project, params2.namespace, params2.group)
aaa_and_bbb = [i for i in find_instances(
ec2ctx.resource, ec2_resources.project, ec2_resources.namespace)
if get_tag(i, 'Group') in (params1.group, params2.group)]
assert len(aaa) == 2
assert len(bbb) == 3
assert len(aaa_and_bbb) == 5
assert set(aaa).union(bbb) == set(aaa_and_bbb)
for inst in aaa_and_bbb:
terminator.terminate(inst, ec2ctx.region)
terminator.wait(False)
def test_valid_instances():
regions = ['us', 'eu']
instances = valid_instances(regions, 0)
assert instances['us'] == []
assert instances['eu'] == []
instances = valid_instances(regions, 1)
assert instances['us'] == ['1']
assert instances['eu'] == []
instances = valid_instances(regions, 2)
assert instances['us'] == ['1']
assert instances['eu'] == ['2']
instances = valid_instances(regions, 3)
assert instances['us'] == ['1', '3']
assert instances['eu'] == ['2']
instances = valid_instances(regions, 4)
assert instances['us'] == ['1', '3']
assert instances['eu'] == ['2', '4']
@pytest.mark.regions(
[['us-east-2', 'ca-central-1', 'eu-west-1']], ids=['3regions'])
def test_manage_instances(ec2ctxs, ec2_resources):
regions = [ctx.region for ctx in ec2ctxs]
def check_hosts(hosts):
assert len(set(host.tag_id for host in hosts)) == len(hosts)
assert len(set(host.public_ip for host in hosts)) == len(hosts)
def check_tags(instances):
for inst_group in instances:
for inst in inst_group:
inst_tag_id = get_tag(inst, 'ID')
assert inst_tag_id is not None
inst_tag_name = get_tag(inst, 'Name')
assert inst_tag_name == "{}-{}-{}-{}".format(
ec2_resources.project,
ec2_resources.namespace,
ec2_resources.group,
inst_tag_id.zfill(3)).lower()
res = manage_instances(regions, ec2_resources, 4)
instances = [find_instances(ctx.resource, ec2_resources.project,
ec2_resources.namespace, ec2_resources.group)
for ctx in ec2ctxs]
assert res.changed
assert len(res.active) == 4
assert len(res.terminated) == 0
check_hosts(res.active + res.terminated)
check_tags(instances)
assert len(instances[0]) == 2
assert len(instances[1]) == 1
assert len(instances[2]) == 1
assert set([get_tag(instances[0][0], 'ID'),
get_tag(instances[0][1], 'ID')]) == set(['1', '4'])
assert get_tag(instances[1][0], 'ID') == '2'
assert get_tag(instances[2][0], 'ID') == '3'
res = manage_instances(regions, ec2_resources, 4)
instances = [find_instances(ctx.resource, ec2_resources.project,
ec2_resources.namespace, ec2_resources.group)
for ctx in ec2ctxs]
assert not res.changed
assert len(res.active) == 4
assert len(res.terminated) == 0
check_hosts(res.active + res.terminated)
check_tags(instances)
assert len(instances[0]) == 2
assert len(instances[1]) == 1
assert len(instances[2]) == 1
assert set([get_tag(instances[0][0], 'ID'),
get_tag(instances[0][1], 'ID')]) == set(['1', '4'])
assert get_tag(instances[1][0], 'ID') == '2'
assert get_tag(instances[2][0], 'ID') == '3'
res = manage_instances(regions, ec2_resources, 2)
instances = [find_instances(ctx.resource, ec2_resources.project,
ec2_resources.namespace, ec2_resources.group)
for ctx in ec2ctxs]
assert res.changed
assert len(res.active) == 2
assert len(res.terminated) == 2
check_hosts(res.active + res.terminated)
check_tags(instances)
assert len(instances[0]) == 1
assert len(instances[1]) == 1
assert len(instances[2]) == 0
assert get_tag(instances[0][0], 'ID') == '1'
assert get_tag(instances[1][0], 'ID') == '2'
res = manage_instances(regions, ec2_resources, 0)
instances = [find_instances(ctx.resource, ec2_resources.project,
ec2_resources.namespace, ec2_resources.group)
for ctx in ec2ctxs]
assert res.changed
assert len(res.active) == 0
assert len(res.terminated) == 2
check_hosts(res.active + res.terminated)
check_tags(instances)
assert len(instances[0]) == 0
assert len(instances[1]) == 0
assert len(instances[2]) == 0
res = manage_instances(regions, ec2_resources, 0)
instances = [find_instances(ctx.resource, ec2_resources.project,
ec2_resources.namespace, ec2_resources.group)
for ctx in ec2ctxs]
assert not res.changed
assert len(res.active) == 0
assert len(res.terminated) == 0
check_hosts(res.active + res.terminated)
check_tags(instances)
assert len(instances[0]) == 0
assert len(instances[1]) == 0
assert len(instances[2]) == 0
| 2.1875 | 2 |
cameratest.py | KaiJin1995/MTCNN-VGG-face | 23 | 12758528 | #coding:utf-8
import cv2
import os
import sys
#测试相机能否使用
cap = cv2.VideoCapture(0)
while True:
ret,frame=cap.read()
cv2.imshow('MyVideo',frame)
cv2.waitKey(25)
| 2.828125 | 3 |
manage.py | KaitoRyouga/CTFd | 0 | 12758529 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from CTFd import create_app
from CTFd.utils import get_config as get_config_util, set_config as set_config_util
from CTFd.models import *
app = create_app()
manager = Manager(app)
manager.add_command("db", MigrateCommand)
def jsenums():
from CTFd.constants import JS_ENUMS
import json
import os
path = os.path.join(app.root_path, "themes/core/assets/js/constants.js")
with open(path, "w+") as f:
for k, v in JS_ENUMS.items():
f.write("const {} = Object.freeze({});".format(k, json.dumps(v)))
BUILD_COMMANDS = {"jsenums": jsenums}
@manager.command
def get_config(key):
with app.app_context():
print(get_config_util(key))
@manager.command
def set_config(key, value):
with app.app_context():
print(set_config_util(key, value).value)
@manager.command
def build(cmd):
with app.app_context():
cmd = BUILD_COMMANDS.get(cmd)
cmd()
if __name__ == "__main__":
manager.run()
| 2.234375 | 2 |
problema_plano/graficacion.py | raulsaavedr/problema_plano | 0 | 12758530 | from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
from matplotlib.patches import Rectangle, PathPatch
from matplotlib.text import TextPath
from matplotlib.transforms import Affine2D
import mpl_toolkits.mplot3d.art3d as art3d
import numpy as np
import pandas as pd
from config import conf
import eigen as eig
import region as reg
import hiperbolica as hyp
import matrices_acoplamiento as m_acop
import distorsionador as v_dist
import matriz_gauss as m_gauss
import v_transpuestos as v_trans
import potencial as pot
import flujo as flj
__doc__ = """
Este modulo esta hecho para graficar los valores con error y sin error de los
diferentes componentes del calculo del problema plano. Es equivalente a todas
las funciones de prueba.
"""
def prueba_valor_eigen(valores_eigen, vectores_valores_eigen, vectores_valores_eigen_err,\
n_dimension=100, error=1):
"""
Funcion encargada de graficar con matplotlib los vectores eigen versus los
vectores eigen calculados con error.Equivalente a : f03_Valor_Eigen_Prueba()
Pametros de entrada:
* valores_eigen : DataFrame encargado de guardar los valores eigen
representativos de todas las regiones.
* vectores_valores_eigen: DataFrame que almacena los valores calculados
de cada vector eigen. Es decir Pn = [Veig[0],..., Veig[n_dimension]] ,
Qn = [Veig[0],..., Veig[n_dimension]] y así conlos demas vectores.
Salida:
* Guarda las figuras en la carpeta ../graficas/vectores eigen . Esta car
peta debe estar previamente creada para que no haya conflictos al mo-
mento de guardar las graficas.
Nota: Para un ejemplo dado remitirse a la funcion main de graficacion.py.
"""
error_t = 'sin error ' if error == 1 else 'con error'
N = range(1, n_dimension + 1) # Solo para propositos de graficacion
for chr_eigen in valores_eigen.index:
fig = plt.Figure()
ax = fig.add_subplot(111)
# Encuentre el maximo de valor eigen entre el comparado y el original (para efectos de graficacion)
maximo = np.max((vectores_valores_eigen.loc[chr_eigen].max(), vectores_valores_eigen_err.loc[chr_eigen].max()))
fig.suptitle(f"Control {error_t} nr={n_dimension} del Valor Eigen {chr_eigen+'='+valores_eigen.loc[chr_eigen]['calcular_str']}")
ax.text(n_dimension / 8, 0.95 * maximo, """Prueba correcta si se imprime una sola curva.
Error si imprime dos curvas""")
# Grafique vector transpuesto de Valores eigen de la funcion (sin error) Color rojo
ax.plot(N, vectores_valores_eigen.loc[chr_eigen], 'r', label='sin error')
# Grafique vector transpuesto de Valores eigen de la ecuacion (con error) Color azul
ax.plot(N, vectores_valores_eigen_err.loc[chr_eigen], 'b', label='con error')
ax.legend(loc='lower right')
filename = 'graficas/' + conf.data['env']['path'] + '/vectores eigen/control ' +\
error_t + " " + chr_eigen + ".png"
canvas = FigureCanvas(fig)
canvas.print_figure(filename)
# Salida por consola del proceso que se esta realizando
print(f"* {chr_eigen+'='+valores_eigen.loc[chr_eigen]['calcular_str']}")
def prueba_matrices_diagonal_valores_eigen(valores_eigen, vectores_valores_eigen,\
vectores_valores_eigen_err_matriz, n_dimension=100, error=1):
"""
Funcion encargada de graficar con matplotlib las matrices diagonales de va-
lores eigen versus las matrices de valores eigen calculados con error.
Equivalente a: f04_Diag_Valor_Eigen_Prueba()
Pametros de entrada:
* valores_eigen : DataFrame encargado de guardar los valores eigen
representativos de todas las regiones.
* vectores_valores_eigen: DataFrame que almacena los valores calculados
de cada vector eigen. Es decir Pn = [Veig[0],..., Veig[n_dimension]] ,
Qn = [Veig[0],..., Veig[n_dimension]] y así conlos demas vectores.
* vectores_valores_eigen_err_matriz: Dataframe en donde esta almacenado
la matriz diagonal de los valores eigen calculados con un error dado.
Salida:
* Guarda las figuras en la carpeta ../graficas/matrices diagonales de
valores eigen . Esta carpeta debe estar previamente creada para que no
haya conflictos al momento de guardar las graficas.
"""
error_t = 'sin error ' if error == 1 else 'con error'
N = range(1, n_dimension + 1) # Solo para propositos de graficacion
for chr_eigen in vectores_valores_eigen.index:
fig = plt.Figure()
ax = fig.add_subplot(111)
# Encuentre el maximo de valor eigen entre el comparado y el original (para efectos de graficacion del texto)
maximo = np.max((vectores_valores_eigen.loc[chr_eigen].max(), vectores_valores_eigen_err_matriz.loc[chr_eigen].max()))
fig.suptitle(f"{error_t.capitalize()} - nr={n_dimension} - Matriz diagonal del valor Eigen: {chr_eigen+'='+valores_eigen.loc[chr_eigen]['calcular_str']}")
ax.text(n_dimension / 8, 0.95 * maximo, """Prueba correcta si se imprime una sola curva.
Error si imprime dos curvas""")
ax.plot(N, vectores_valores_eigen.loc[chr_eigen], 'r', label='sin error')
ax.plot(N, vectores_valores_eigen_err_matriz.loc[chr_eigen], 'b', label='con error')
ax.legend(loc='lower right')
filename = 'graficas/' + conf.data['env']['path'] + '/matrices diagonales de valores eigen/' +\
'control ' + error_t + " " + chr_eigen + ".png"
canvas = FigureCanvas(fig)
canvas.print_figure(filename)
print(f"* Matriz diagonal {chr_eigen+'='+valores_eigen.loc[chr_eigen]['calcular_str']}")
def prueba_matrices_diagonal_funciones_hiperbolicas(funciones_hiperbolicas, vectores_funciones_hiperbolicas,\
vectores_funciones_hiperbolicas_err, n_dimension=100, error=1):
"""
Funcion encargada de graficar con matplotlib las matrices diagonal de las
funciones hiperbólicas eigen versus las matrices de valores eigen calculados
con error. Equivalente a: f05_Diag_Func_Hiper_Prueba()
Pametros de entrada:
* funciones_hiperbolicas: Es el DataFrame creado en donde estan almacena
dos todos los valores necesarios para poder calcular los vectores de
funciones hiperbolicas.
* vectores_funciones_hiperbolicas: Es un DataFrame que contiene todos
los valores calculados de las funciones hiperbolicas de todos los
vectores.
* vectores_funciones_hiperbolicas_err: Es un DataFrame que contiene todos
los valores calculados dado un error de las funciones hiperbolicas de
todos los vectores.
Salida:
* Guarda las figuras en la carpeta "../graficas/matrices diagonales de
funciones hiperbolicas". Esta carpeta debe estar previamente creada
para que no haya conflictos al momento de guardar las graficas.
"""
error_t = 'sin error ' if error == 1 else 'con error'
N = range(1, n_dimension + 1) # Solo para propositos de graficacion
# Se obtiene un index a partir de la matrices diagonales
for nro_diagonal in funciones_hiperbolicas.index:
# Encuentre el maximo de valor eigen entre el comparado y el original (para efectos de graficacion)
fig = plt.Figure()
ax = fig.add_subplot(111)
maximo = np.max((vectores_funciones_hiperbolicas.loc[nro_diagonal].max(), vectores_funciones_hiperbolicas_err.loc[nro_diagonal].max()))
minimo = np.min((vectores_funciones_hiperbolicas.loc[nro_diagonal].min(), vectores_funciones_hiperbolicas_err.loc[nro_diagonal].min()))
fig.suptitle(f"{error_t.capitalize()} - nr={n_dimension} - Control de la matriz diagonal: {nro_diagonal+'='+funciones_hiperbolicas.loc[nro_diagonal]['calcular_str']}")
ax.text( 0.1 * n_dimension, minimo + ((maximo - minimo) / 2), """Prueba correcta si se imprime una sola curva.
Error si imprime dos curvas""")
# plt.axvline(0.1 * n_dimension, color='k', linestyle='solid')
# plt.axhline(.00005 * maximo, color='k', linestyle='solid')
ax.plot(N, vectores_funciones_hiperbolicas.loc[nro_diagonal], 'r', label='sin error')
ax.plot(N, vectores_funciones_hiperbolicas_err.loc[nro_diagonal], 'b', label='con error')
ax.legend(loc='lower right')
filename = 'graficas/' + conf.data['env']['path'] + '/matrices diagonales de funciones hiperbolicas/' +\
'control ' + error_t + " " + nro_diagonal + ".png"
canvas = FigureCanvas(fig)
canvas.print_figure(filename)
print(f"* Matriz diagonal {nro_diagonal+'='+funciones_hiperbolicas.loc[nro_diagonal]['calcular_str']}")
def prueba_matrices_cuadradas_acoplamiento(integrandos_matrices_acoplamiento, matrices_acoplamiento_int,\
matrices_acoplamiento_sol, n_dimension=100, error=1):
"""
Funcion encargada de graficar con matplotlib las matrices cuadradas de aco-
acoplamiento solucion analitica versus solucion por quad de scipy.
Equivalente a: f05_Diag_Func_Hiper_Prueba()
Pametros de entrada:
Salida:
* Guarda las figuras en la carpeta "../graficas/matrices cuadradas de
acoplamiento". Esta carpeta debe estar previamente creada
para que no haya conflictos al momento de guardar las graficas.
"""
error_t = 'sin error ' if error == 1 else 'con error'
N = range(1, (n_dimension * n_dimension) + 1) # Solo para propositos de graficacion
# Se obtiene un index a partir del df integrandos_matrices_acoplamiento
matrices_acoplamiento_sol = error * matrices_acoplamiento_sol
for M in integrandos_matrices_acoplamiento.index:
fig = plt.Figure()
ax = fig.add_subplot(111)
# Encuentre el maximo de valor de las dos matrices (la matriz sin error y la matriz con error) (para efectos de graficacion)
# Nota importante: A cada matriz se le hace un stack durante todo el proceso para obtener un vector de todos los valores de la Matriz
maximo = np.max((matrices_acoplamiento_int.loc[M].stack().loc[:n_dimension,:n_dimension].max(), matrices_acoplamiento_sol.loc[M].stack().loc[:n_dimension,:n_dimension].max()))
fig.suptitle(f"{error_t.capitalize()} - nr={n_dimension} - {M + '=' + integrandos_matrices_acoplamiento.loc[M, 'calcular_str']}")
# ax.text( 0.5 * (n_dimension ** 2), maximo, """Prueba correcta si se imprime una sola grafica.
# Error si imprime dos graficas""")
ax.plot(N, matrices_acoplamiento_int.loc[M].stack().loc[:n_dimension,:n_dimension], 'r', label='sol. integrate.quad')
ax.plot(N, matrices_acoplamiento_sol.loc[M].stack().loc[:n_dimension,:n_dimension], 'b', label='sol. analitica ' + error_t)
ax.legend(loc='lower right')
filename = 'graficas/' + conf.data['env']['path'] + '/matrices cuadradas de acoplamiento/' +\
'control ' + error_t + " " + M + ".png"
canvas = FigureCanvas(fig)
canvas.print_figure(filename)
print(f"* Matriz acompladora {M + '=' + integrandos_matrices_acoplamiento.loc[M, 'calcular_str']}")
def prueba_vectores_distorsionadores(integrandos_vectores_distorsionadores, vectores_distorsionadores_int,\
vectores_distorsionadores_sol, n_dimension=100, error=1):
"""
Funcion encargada de graficar con matplotlib los vectores distorsionadores
versus solucion por quad de scipy.
Equivalente a: f05_Diag_Func_Hiper_Prueba()
Pametros de entrada:
Salida:
* Guarda las figuras en la carpeta "../graficas/matrices cuadradas de
acoplamiento". Esta carpeta debe estar previamente creada
para que no haya conflictos al momento de guardar las graficas.
"""
error_t = 'sin error ' if error == 1 else 'con error'
N = range(1, n_dimension + 1) # Solo para propositos de graficacion
# Se agrega un error a la solucion analitica
vectores_distorsionadores_sol = error * vectores_distorsionadores_sol
# Se obtiene un index a partir del df integrandos_vectores_distorsionadores
for Sm in integrandos_vectores_distorsionadores.index:
fig = plt.Figure()
ax = fig.add_subplot(111)
# Encuentre el maximo de valor de las dos matrices (la matriz sin error y la matriz con error) (para efectos de graficacion)
maximo = np.max((vectores_distorsionadores_int.loc[Sm][:n_dimension].max(), vectores_distorsionadores_sol.loc[Sm][:n_dimension].max()))
plt.xticks(N)
fig.suptitle(f"{error_t.capitalize()} - nr={n_dimension} - Vector Dist.: {Sm + '=' + integrandos_vectores_distorsionadores.loc[Sm, 'calcular_str']}")
# ax.text( 0.5 * (n_dimension ** 2), maximo, """Prueba correcta si se imprime una sola grafica.
# Error si imprime dos graficas""")
ax.plot(N, vectores_distorsionadores_int.loc[Sm][:n_dimension], 'r', label='sol. integrate.quad')
ax.plot(N, vectores_distorsionadores_sol.loc[Sm][:n_dimension], 'b', label='sol. analitica '+error_t)
ax.legend(loc='upper right')
filename = 'graficas/' + conf.data['env']['path'] + '/vectores distorsionadores/' +\
'control ' + error_t + " " + Sm + ".png"
canvas = FigureCanvas(fig)
canvas.print_figure(filename)
print(f"* Vector distorsionador {Sm + '=' + integrandos_vectores_distorsionadores.loc[Sm, 'calcular_str']}")
def prueba_potencial(regiones, recursos_potencial, potenciales, potenciales_err, dimension_mesh,\
n_dimension=100, error =1):
"""
Funcion encargada de graficar con matplotlib los vectores eigen versus los
vectores eigen calculados con error.Equivalente a : f12_V_dV_Prueba()
Pametros de entrada:
Salida:
* Guarda las figuras en la carpeta ../graficas/potenciales . Esta car-
peta debe estar previamente creada para que no haya conflictos al mo-
mento de guardar las graficas.
"""
error_t = 'sin error ' if error == 1 else 'con error'
N = range(1, dimension_mesh + 1) # Solo para propositos de graficacion
for n_potencial in potenciales.index:
# Reg.1, Reg.2, ... Reg.n - Iteradores de las regiones
index_reg_actual = "Reg." + n_potencial.split('V')[1]
# Encuentre el maximo de valor eigen entre el comparado y el original (para efectos de graficacion)
fig = plt.Figure()
ax = fig.add_subplot(111)
maximo = np.max((potenciales.loc[n_potencial].max(), potenciales_err.loc[n_potencial].max()))
minimo = np.max((potenciales.loc[n_potencial].min(), potenciales_err.loc[n_potencial].min()))
fig.suptitle(f"Con nr={n_dimension}- Prueba del potencial {error_t} de la {index_reg_actual}-{regiones.loc[index_reg_actual, 'eps']}.")
ax.text(n_dimension / 8, 0.95 * maximo, """Prueba correcta si se imprime una sola curva.
Error si imprime dos curvas""")
# Grafique potenciales (con error) Color rojo
ax.plot(N, potenciales_err.loc[n_potencial], 'r', label='con error')
# Grafique potenciales (sin error) Color negro
ax.plot(N, potenciales.loc[n_potencial], 'k', label='sin error')
ax.legend(loc='lower right')
filename = 'graficas/' + conf.data['env']['path'] + '/potenciales/' +\
'control ' + error_t + " " + n_potencial + ".png"
canvas = FigureCanvas(fig)
canvas.print_figure(filename)
# Salida por consola del proceso que se esta realizando
print(f"* {n_potencial}={recursos_potencial.loc[n_potencial,'calcular_str']}")
def prueba_flujo(regiones, recursos_flujo, flujos, flujos_err, dimension_mesh,\
n_dimension=100, error=1):
"""
Funcion encargada de graficar con matplotlib los vectores eigen versus los
vectores eigen calculados con error.Equivalente a : f12_V_dV_Prueba()
Pametros de entrada:
Salida:
* Guarda las figuras en la carpeta ../graficas/flujos. Esta carpeta
debe estar previamente creada para que no haya conflictos al momento
de guardar las graficas.
"""
error_t = 'sin error ' if error == 1 else 'con error'
N = range(1, dimension_mesh + 1) # Solo para propositos de graficacion
for n_flujo in flujos.index:
# Reg.1, Reg.2, ... Reg.n - Iteradores de las regiones
index_reg_actual = "Reg." + n_flujo.split('V')[1]
# Encuentre el maximo de valor eigen entre el comparado y el original (para efectos de graficacion)
fig = plt.Figure()
ax = fig.add_subplot(111)
maximo = np.max((flujos.loc[n_flujo].max(), flujos_err.loc[n_flujo].max()))
minimo = np.max((flujos.loc[n_flujo].min(), flujos_err.loc[n_flujo].min()))
fig.suptitle(f"Con nr={n_dimension}- Prueba del flujo {error_t} de la {index_reg_actual}-{regiones.loc[index_reg_actual, 'eps']}.")
# ax.text(n_dimension / 8, 0.95 * maximo, """Prueba correcta si se imprime una sola curva.
# Error si imprime dos curvas""")
# Grafique flujos (con error) Color rojo
ax.plot(N, flujos_err.loc[n_flujo], 'r', label='con error')
# Grafique flujos (sin error) Color negro
ax.plot(N, flujos.loc[n_flujo], 'k', label='sin error')
ax.legend(loc='lower right')
filename = 'graficas/' + conf.data['env']['path'] + '/flujos/' +\
'control ' + error_t + " " + n_flujo + ".png"
canvas = FigureCanvas(fig)
canvas.print_figure(filename)
# Salida por consola del proceso que se esta realizando
print(f"* {n_flujo}={recursos_flujo.loc[n_flujo,'calcular_str']}")
def control_de_continuidad(regiones, potenciales, mesh_regiones, n_dimension):
continuidad = pd.read_csv('csv/' + conf.data['env']['path'] + '/continuidad.csv')
for index in continuidad.index:
fig = plt.figure()
R_sup = continuidad.loc[index,'region_superior'].split('R')[1]
R_inf = continuidad.loc[index,'region_inferior'].split('R')[1]
X_sup = mesh_regiones.loc['Reg.'+R_sup,'x'].to_numpy()
X_sup = np.reshape(X_sup, (int(np.sqrt(len(X_sup))),int(np.sqrt(len(X_sup)))))[0]
X_inf = mesh_regiones.loc['Reg.'+R_inf,'x'].to_numpy()
X_inf = np.reshape(X_inf, (int(np.sqrt(len(X_inf))),int(np.sqrt(len(X_inf)))))[0]
pot_superior = potenciales.loc['V'+R_sup].to_numpy()
pot_superior = np.reshape(pot_superior , (int(np.sqrt(len(pot_superior))),int(np.sqrt(len(pot_superior)))))[0]
pot_inferior = potenciales.loc['V'+R_inf].to_numpy()
pot_inferior = np.reshape(pot_inferior, (int(np.sqrt(len(pot_inferior))),int(np.sqrt(len(pot_inferior)))))[-1]
left_bar = [continuidad.loc[index,'xi'],continuidad.loc[index,'xi']]
right_bar = [continuidad.loc[index,'xf'],continuidad.loc[index,'xf']]
plt.title(f"Con nr={n_dimension}- Prueba de continuidad potencial de la Reg.{R_inf} a la Reg.{R_sup}")
plt.plot(X_sup, pot_superior, 'r')
plt.plot(X_inf, pot_inferior, 'b')
#ESTO SON LOS PUNTOS DONDE DEBEN COINCIDIR LAS GRAFICAS
plt.plot(left_bar, [-2,2])
plt.plot(right_bar, [-2,2])
filename ='graficas/' + conf.data['env']['path'] + '/continuidad de potencial/'+ f'Reg.{R_inf} a la Reg.{R_sup}.png'
canvas = FigureCanvas(fig)
canvas.print_figure(filename)
plt.close()
def graficas_potencial(regiones, potenciales, mesh_regiones, n_dimension):
for n_potencial in potenciales.index:
index_reg_actual = "Reg." + n_potencial.split('V')[1]
pot = potenciales.loc[n_potencial].to_numpy()
pot = np.reshape(pot, (int(np.sqrt(len(pot))),int(np.sqrt(len(pot)))))
x_flat = mesh_regiones.loc[index_reg_actual,'x'].to_numpy()
x_flat = np.reshape(x_flat, (int(np.sqrt(len(x_flat))),int(np.sqrt(len(x_flat)))))
y_flat = mesh_regiones.loc[index_reg_actual,'y'].to_numpy()
y_flat = np.reshape(y_flat, (int(np.sqrt(len(y_flat))),int(np.sqrt(len(y_flat)))))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
fig.suptitle(f"Con nr={n_dimension}- surf del potencial de la {index_reg_actual}-{regiones.loc[index_reg_actual, 'eps']}.")
ax.plot_surface(x_flat,y_flat,pot,cmap=cm.autumn)
#ax.view_init(0,-90)
filename = 'graficas/' + conf.data['env']['path'] + '/potenciales/surf/' +\
'Surf' + " " + n_potencial + ".png"
canvas = FigureCanvas(fig)
canvas.print_figure(filename)
plt.close()
print('.', end='')
print()
def grafica_de_potencial_total(regiones, potenciales, mesh_regiones, n_dimension):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
plt.title(f"Grafica de los niveles de potencial de todas las regiones")
for n_potencial in potenciales.index:
index_reg_actual = "Reg." + n_potencial.split('V')[1]
pot = potenciales.loc[n_potencial].to_numpy()
pot = np.reshape(pot, (int(np.sqrt(len(pot))),int(np.sqrt(len(pot)))))
x_flat = mesh_regiones.loc[index_reg_actual,'x'].to_numpy()
x_flat = np.reshape(x_flat, (int(np.sqrt(len(x_flat))),int(np.sqrt(len(x_flat)))))
y_flat = mesh_regiones.loc[index_reg_actual,'y'].to_numpy()
y_flat = np.reshape(y_flat, (int(np.sqrt(len(y_flat))),int(np.sqrt(len(y_flat)))))
ax.plot_surface(x_flat,y_flat,pot,cmap=cm.autumn)
ax.view_init(0,-90)
filename ='graficas/' + conf.data['env']['path'] + '/Grafica de Potencial total.png'
canvas = FigureCanvas(fig)
canvas.print_figure(filename)
plt.close()
def draw_rectangle(ax, inicio= 0, ancho= 2,direction = 'y',desp= 2, alto= 3, fill= True):
rect = Rectangle((inicio,0), width= ancho, height=alto, fill= fill)
if not fill:
rect.set_edgecolor('r')
else:
rect.set_edgecolor('k')
ax.add_patch(rect)
art3d.pathpatch_2d_to_3d(rect, z=desp, zdir=direction)
def draw_text(ax, x, y, z=1, cadena=''):
text_path = TextPath((0, 0), cadena, size=.35)
trans = Affine2D().translate(x, y)
t1 = PathPatch(trans.transform_path(text_path), fc='k')
ax.add_patch(t1)
art3d.pathpatch_2d_to_3d(t1, z=z, zdir='z')
def draw_region3d(ax, xi, xf, yi, yf, fronteras, n_region, material, z=1, xmax=None):
#texto
x_texto = xi if xi<xf else xf
desp_t = abs(xf-xi)*.2 if abs(xf-xi)==1 else abs(xf-xi)*.4
draw_text(ax,x_texto+desp_t,yi + (yf-yi)*.3,z,f'R{n_region} {material}')
for lugar, valor in fronteras.items():
if lugar=='arriba':
x_t = xi+(xf-xi)/2
y_t = yf
elif lugar=='abajo':
x_t = xi+(xf-xi)/2
y_t = yi
elif lugar=='derecha':
x_t = xf
y_t = yi+(yf-yi)/2
elif lugar=='izquierda':
x_t = xi
y_t = yi+(yf-yi)/2
if valor == 'Uno': texto = f'V{n_region}=1'
elif valor == 'Cero': texto = f'V{n_region}=0'
elif valor == 'SIM': texto = 'SIM'
if valor in ['Uno','Cero','SIM']: draw_text(ax,x_t,y_t,z,texto)
direccion = 'y' if lugar=='arriba' or lugar=='abajo' else 'x'
punto_inicial = xi if lugar=='arriba' or lugar=='abajo' else yi
ancho = (xf-xi) if lugar=='arriba' or lugar=='abajo' else (yf-yi)
if lugar=='arriba': desp=yf
elif lugar=='abajo': desp=yi
elif lugar=='derecha': desp=xf
elif lugar=='izquierda': desp=xi
if valor == 'Uno' or valor == 'Cero':
draw_rectangle(ax, inicio= punto_inicial, ancho= ancho, direction= direccion, desp= desp, alto= z)
elif valor=='no' or valor=='SIM':
draw_rectangle(ax, inicio= punto_inicial, ancho= ancho, direction= direccion, desp= desp, alto= z,fill=False)
else:
#Fronteras Compuestas:
front_list = [x.split('-') for x in valor.split('/')]
for pseudo_frontera in front_list:
punto_inicial = int(pseudo_frontera[1])
ancho = (int(pseudo_frontera[2])-int(pseudo_frontera[1]))
if pseudo_frontera[0] == 'Uno' or pseudo_frontera[0] == 'Cero':
#SIM Izquierda
draw_rectangle(ax, inicio= -punto_inicial, ancho= -ancho, direction= direccion, desp= desp, alto= z)
#Centro
draw_rectangle(ax, inicio= punto_inicial, ancho= ancho, direction= direccion, desp= desp, alto= z)
#SIM Derecha
draw_rectangle(ax, inicio= -punto_inicial+2*xmax, ancho= -ancho, direction= direccion, desp= desp, alto= z)
elif pseudo_frontera[0]=='no' or pseudo_frontera[0]=='SIM':
#SIM Izquierda
draw_rectangle(ax, inicio= -punto_inicial, ancho= -ancho, direction= direccion, desp= desp, alto= z,fill=False)
#Centro
draw_rectangle(ax, inicio= punto_inicial, ancho= ancho, direction= direccion, desp= desp, alto= z,fill=False)
#SIM Derecha
draw_rectangle(ax, inicio= -punto_inicial+2*xmax, ancho= -ancho, direction= direccion, desp= desp, alto= z,fill=False)
def graficar_problema_plano_3D(regiones,z=2):
fronteras = pd.read_csv('csv/' + conf.data['env']['path'] + '/fronteras.csv')
xmax, ymax = max(regiones['xf']), max(regiones['yf'])
fig = plt.figure()
fig.suptitle('Grafica tridimensional del problema plano con 2 simetrias')
ax = fig.add_subplot(111, projection='3d')
filename = 'graficas/' + conf.data['env']['path'] + "/Problema Plano 3D.png"
for i,region in enumerate(regiones.index):
xi, xf = regiones.loc[region,'xi'], regiones.loc[region,'xf']
yi, yf = regiones.loc[region,'yi'], regiones.loc[region,'yf']
#Izquierda
draw_region3d(ax,-xi,-xf,yi,yf,fronteras.loc[i],i+1,regiones.loc[region, 'eps'],z,xmax)
#Central
draw_region3d(ax,xi,xf,yi,yf,fronteras.loc[i],i+1,regiones.loc[region, 'eps'],z,xmax)
#Derecha
draw_region3d(ax,-xi+2*xmax,-xf+2*xmax,yi,yf,fronteras.loc[i],i+1,regiones.loc[region, 'eps'],z,xmax)
ax.set_xlim(-xmax, 2*xmax)
ax.set_ylim(0, ymax)
ax.set_zlim(0, z+2)
#ax.view_init(60,-60)
ax.view_init(80,-70)
fig.set_size_inches(14,8)
canvas = FigureCanvas(fig)
canvas.print_figure(filename)
def draw_region(ax, xi, xf, yi, yf, fronteras,n_region,material, xmax,sim='der'):
x_texto = xi if xi<xf else xf
desp_t = abs(xf-xi)*.2 if abs(xf-xi)==1 else abs(xf-xi)*.4
ax.annotate(f'Reg{n_region}\n{material}',(x_texto+desp_t,yi + (yf-yi)*.3))
for lugar, valor in fronteras.items():
if lugar=='arriba':
angulo = 0
x_t = xi+(xf-xi)*.2 if sim ==None else xi+(xf-xi)*.8
y_t = yf
elif lugar=='abajo':
angulo = 0
x_t = xi+(xf-xi)*.2 if sim ==None else xi+(xf-xi)*.8
y_t = yi
elif lugar=='derecha':
angulo = 90
x_t = xf
y_t = yi+ (yf-yi)*.1
elif lugar=='izquierda':
angulo = 90
x_t = xi
y_t = yi+ (yf-yi)*.1
if valor == 'Uno': texto = f'V{n_region}=1'
elif valor == 'Cero': texto = f'V{n_region}=0'
elif valor == 'SIM': texto = 'SIM'
if valor in ['Uno','Cero','SIM']: ax.annotate(texto,(x_t,y_t),rotation=angulo)
if valor == 'Uno' or valor == 'Cero':
if lugar == 'arriba': ax.plot([xi,xf],[yf,yf],'k',lw=3)
if lugar == 'abajo': ax.plot([xi,xf],[yi,yi],'k',lw=3)
if lugar == 'derecha': ax.plot([xf,xf],[yi,yf],'k',lw=3)
if lugar == 'izquierda': ax.plot([xi,xi],[yi,yf],'k',lw=3)
elif valor == 'SIM' or valor == 'no':
if lugar == 'arriba': ax.plot([xi,xf],[yf,yf],'r',lw=2)
if lugar == 'abajo': ax.plot([xi,xf],[yi,yi],'r',lw=2)
if lugar == 'derecha': ax.plot([xf,xf],[yi,yf],'r',lw=2)
if lugar == 'izquierda': ax.plot([xi,xi],[yi,yf],'r',lw=2)
else:
#fronteras Compuestas
front_list = [x.split('-') for x in valor.split('/')]
for pseudo_frontera in front_list:
pi = int(pseudo_frontera[1])
pf = int(pseudo_frontera[2])
if pseudo_frontera[0] == 'Uno' or pseudo_frontera[0] == 'Cero':
color ='k'
ancho = 3
elif pseudo_frontera[0] == 'SIM' or pseudo_frontera[0] == 'no':
color ='r'
ancho = 2
if lugar == 'arriba':
if sim == 'izq': ax.plot([-pi,-pf],[yf,yf],color,lw=ancho)
ax.plot([pi,pf],[yf,yf],color,lw=ancho)
if sim == 'der':ax.plot([-pi+2*xmax,-pf+2*xmax],[yf,yf],color,lw=ancho)
if lugar == 'abajo':
if sim == 'izq': ax.plot([-pi,-pf],[yi,yi],color,lw=ancho)
ax.plot([pi,pf],[yi,yi],color,lw=ancho)
if sim == 'der': ax.plot([-pi+2*xmax,-pf+2*xmax],[yi,yi],color,lw=ancho)
def graficar_problema_plano_2D(regiones):
fronteras = pd.read_csv('csv/' + conf.data['env']['path'] + '/fronteras.csv')
xmax, ymax = max(regiones['xf']), max(regiones['yf'])
fig, ax= plt.subplots()
fig.suptitle('Grafica bidimensional del problema plano con 2 simetrias')
filename = 'graficas/' + conf.data['env']['path'] + "/Problema Plano 2D.png"
for i,region in enumerate(regiones.index):
xi, xf = regiones.loc[region,'xi'], regiones.loc[region,'xf']
yi, yf = regiones.loc[region,'yi'], regiones.loc[region,'yf']
#Izquierda
draw_region(ax,-xi,-xf,yi,yf,fronteras.loc[i],i+1,regiones.loc[region, 'eps'],xmax,sim='izq')
#Central
draw_region(ax,xi,xf,yi,yf,fronteras.loc[i],i+1,regiones.loc[region, 'eps'],xmax,sim=None)
#Derecha
draw_region(ax,-xi+2*xmax,-xf+2*xmax,yi,yf,fronteras.loc[i],i+1,regiones.loc[region, 'eps'],xmax,sim='der')
ax.set_xticks(range(xmax+1))
ax.set_yticks(range(ymax+1))
ax.grid()
fig.set_size_inches(14,8)
canvas = FigureCanvas(fig)
canvas.print_figure(filename)
| 2.40625 | 2 |
.modules/.sqlmap/lib/takeover/abstraction.py | termux-one/EasY_HaCk | 1,103 | 12758531 | <reponame>termux-one/EasY_HaCk
#!/usr/bin/env python
"""
Copyright (c) 2006-2018 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
import sys
from extra.safe2bin.safe2bin import safechardecode
from lib.core.common import dataToStdout
from lib.core.common import Backend
from lib.core.common import getSQLSnippet
from lib.core.common import getUnicode
from lib.core.common import isStackingAvailable
from lib.core.common import readInput
from lib.core.data import conf
from lib.core.data import logger
from lib.core.enums import AUTOCOMPLETE_TYPE
from lib.core.enums import DBMS
from lib.core.enums import OS
from lib.core.exception import SqlmapFilePathException
from lib.core.exception import SqlmapUnsupportedFeatureException
from lib.core.shell import autoCompletion
from lib.request import inject
from lib.takeover.udf import UDF
from lib.takeover.web import Web
from lib.takeover.xp_cmdshell import XP_cmdshell
class Abstraction(Web, UDF, XP_cmdshell):
"""
This class defines an abstraction layer for OS takeover functionalities
to UDF / XP_cmdshell objects
"""
def __init__(self):
self.envInitialized = False
self.alwaysRetrieveCmdOutput = False
UDF.__init__(self)
Web.__init__(self)
XP_cmdshell.__init__(self)
def execCmd(self, cmd, silent=False):
if self.webBackdoorUrl and not isStackingAvailable():
self.webBackdoorRunCmd(cmd)
elif Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.PGSQL):
self.udfExecCmd(cmd, silent=silent)
elif Backend.isDbms(DBMS.MSSQL):
self.xpCmdshellExecCmd(cmd, silent=silent)
else:
errMsg = "Feature not yet implemented for the back-end DBMS"
raise SqlmapUnsupportedFeatureException(errMsg)
def evalCmd(self, cmd, first=None, last=None):
retVal = None
if self.webBackdoorUrl and not isStackingAvailable():
retVal = self.webBackdoorRunCmd(cmd)
elif Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.PGSQL):
retVal = self.udfEvalCmd(cmd, first, last)
elif Backend.isDbms(DBMS.MSSQL):
retVal = self.xpCmdshellEvalCmd(cmd, first, last)
else:
errMsg = "Feature not yet implemented for the back-end DBMS"
raise SqlmapUnsupportedFeatureException(errMsg)
return safechardecode(retVal)
def runCmd(self, cmd):
choice = None
if not self.alwaysRetrieveCmdOutput:
message = "do you want to retrieve the command standard "
message += "output? [Y/n/a] "
choice = readInput(message, default='Y').upper()
if choice == 'A':
self.alwaysRetrieveCmdOutput = True
if choice == 'Y' or self.alwaysRetrieveCmdOutput:
output = self.evalCmd(cmd)
if output:
conf.dumper.string("command standard output", output)
else:
dataToStdout("No output\n")
else:
self.execCmd(cmd)
def shell(self):
if self.webBackdoorUrl and not isStackingAvailable():
infoMsg = "calling OS shell. To quit type "
infoMsg += "'x' or 'q' and press ENTER"
logger.info(infoMsg)
else:
if Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.PGSQL):
infoMsg = "going to use injected sys_eval and sys_exec "
infoMsg += "user-defined functions for operating system "
infoMsg += "command execution"
logger.info(infoMsg)
elif Backend.isDbms(DBMS.MSSQL):
infoMsg = "going to use xp_cmdshell extended procedure for "
infoMsg += "operating system command execution"
logger.info(infoMsg)
else:
errMsg = "feature not yet implemented for the back-end DBMS"
raise SqlmapUnsupportedFeatureException(errMsg)
infoMsg = "calling %s OS shell. To quit type " % (Backend.getOs() or "Windows")
infoMsg += "'x' or 'q' and press ENTER"
logger.info(infoMsg)
autoCompletion(AUTOCOMPLETE_TYPE.OS, OS.WINDOWS if Backend.isOs(OS.WINDOWS) else OS.LINUX)
while True:
command = None
try:
command = raw_input("os-shell> ")
command = getUnicode(command, encoding=sys.stdin.encoding)
except KeyboardInterrupt:
print
errMsg = "user aborted"
logger.error(errMsg)
except EOFError:
print
errMsg = "exit"
logger.error(errMsg)
break
if not command:
continue
if command.lower() in ("x", "q", "exit", "quit"):
break
self.runCmd(command)
def _initRunAs(self):
if not conf.dbmsCred:
return
if not conf.direct and not isStackingAvailable():
errMsg = "stacked queries are not supported hence sqlmap cannot "
errMsg += "execute statements as another user. The execution "
errMsg += "will continue and the DBMS credentials provided "
errMsg += "will simply be ignored"
logger.error(errMsg)
return
if Backend.isDbms(DBMS.MSSQL):
msg = "on Microsoft SQL Server 2005 and 2008, OPENROWSET function "
msg += "is disabled by default. This function is needed to execute "
msg += "statements as another DBMS user since you provided the "
msg += "option '--dbms-creds'. If you are DBA, you can enable it. "
msg += "Do you want to enable it? [Y/n] "
if readInput(msg, default='Y', boolean=True):
expression = getSQLSnippet(DBMS.MSSQL, "configure_openrowset", ENABLE="1")
inject.goStacked(expression)
# TODO: add support for PostgreSQL
# elif Backend.isDbms(DBMS.PGSQL):
# expression = getSQLSnippet(DBMS.PGSQL, "configure_dblink", ENABLE="1")
# inject.goStacked(expression)
def initEnv(self, mandatory=True, detailed=False, web=False, forceInit=False):
self._initRunAs()
if self.envInitialized and not forceInit:
return
if web:
self.webInit()
else:
self.checkDbmsOs(detailed)
if mandatory and not self.isDba():
warnMsg = "functionality requested probably does not work because "
warnMsg += "the current session user is not a database administrator"
if not conf.dbmsCred and Backend.getIdentifiedDbms() in (DBMS.MSSQL, DBMS.PGSQL):
warnMsg += ". You can try to use option '--dbms-cred' "
warnMsg += "to execute statements as a DBA user if you "
warnMsg += "were able to extract and crack a DBA "
warnMsg += "password by any mean"
logger.warn(warnMsg)
if Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.PGSQL):
success = self.udfInjectSys()
if success is not True:
msg = "unable to mount the operating system takeover"
raise SqlmapFilePathException(msg)
elif Backend.isDbms(DBMS.MSSQL):
if mandatory:
self.xpCmdshellInit()
else:
errMsg = "feature not yet implemented for the back-end DBMS"
raise SqlmapUnsupportedFeatureException(errMsg)
self.envInitialized = True
| 1.703125 | 2 |
2020-08-28 - aula02 - desenvolvimento hello world/csv_test.py | gustavospiess/bcc_2020_2_prjsft2 | 0 | 12758532 | import csv
def raw_data_gen(n):
'''
generator for mock data
yields str generators
'''
for i in range(n):
yield (f'{i}_{j}' for j in range(4))
#create/overwirte a file with rawdata
with open('data_file.csv', 'w', newline='') as data_buffer:
file_writer = csv.writer(data_buffer)
file_writer.writerows(raw_data_gen(5))
#reads a file with rawdata and prints it
with open('data_file.csv', 'r', newline='') as data_buffer:
file_reader = csv.reader(data_buffer)
for row in file_reader:
print(row)
| 3.484375 | 3 |
Playfair_Keygen/playfair_keygen.py | Positron11/Simulated-Annealing-Playfair-Cipher-Breaker | 0 | 12758533 | <filename>Playfair_Keygen/playfair_keygen.py<gh_stars>0
from copy import deepcopy
from numpy.random import rand
from random import shuffle, randint
# random playfair cipher key generator
def generate_key(ciphertext:str=[]) -> list:
# get alphabet list
alphabet = [chr(c) for c in range(97,123)]
# remove most appropriate letter
low_frequency_letters = [letter for letter in ["z", "q", "j"] if letter not in ciphertext]
alphabet.remove(low_frequency_letters[0] if low_frequency_letters else "q")
# shuffle and alphabet
shuffle(alphabet)
# construct and return key
return [[letter for letter in alphabet [5*i:5*i+5]] for i in range(5)]
# convert 2d key to 1d list
def linearize_key(key:list) -> list:
return [row[i] for row in key for i in range(len(key))]
# shuffle playfair cipher key
def shuffle_key(key:list, mode:str="", shuffles:int=1) -> list:
# create deepcopy of key
new_key = deepcopy(key)
# for number of shuffles
for shuffle in range(shuffles):
# get random value between 0 and 1
selector = rand()
# reverse key
if (not mode and 0.98 <= selector < 1.00) or mode == "rwk":
new_key.reverse()
for row in new_key:
row.reverse()
# flip all columns
if (not mode and 0.96 <= selector < 0.98) or mode == "rac":
new_key.reverse()
# flip all rows
if (not mode and 0.94 <= selector < 0.96) or mode == "rar":
for row in new_key:
row.reverse()
# flip random column
if (not mode and 0.92 <= selector < 0.94) or mode == "rrc":
# get random column
column_index = randint(0,4)
# construct and reverse column list
column = [row[column_index] for row in new_key]
column.reverse()
# substitute reversed column values into key
for row in new_key:
row[column_index] = column[new_key.index(row)]
# flip random row
if (not mode and 0.90 <= selector < 0.92) or mode == "rrr":
new_key[randint(0,4)].reverse()
# swap two random letters
if (not mode and 0 <= selector < 0.90) or mode == "swp":
# initialize index variables
x, y, i, j = 0, 0, 0, 0
# get random indices, make sure distinct
while x == i and y == j:
x, y, i, j = randint(0,4), randint(0,4), randint(0,4), randint(0,4)
# swap values
new_key[x][y], new_key[i][j] = new_key[i][j], new_key[x][y]
return new_key | 3.03125 | 3 |
old_notebooks/write_reference_qpaplc.py | neilzim/SCDA | 3 | 12758534 | <gh_stars>1-10
#!/usr/bin/env python3
"""
Test the functionaility of the core SCDA
02/14/2016 -- created by NTZ
"""
import scda
import numpy as np
import os
import sys
if __name__ == "__main__":
scda.configure_log("wrapper_test.log")
test_dir = "test_scda_aplc" # nominal destination for new AMPL programs
#aux_dir = "~/SCDA/2d AMPL script - quarter pupil"
aux_dir = "../2d AMPL script - quarter pupil"
fileorg = {'work dir': test_dir, 'TelAp dir': aux_dir, 'FPM dir': aux_dir, 'LS dir': aux_dir,
'TelAp fname': "CircPupil_N=0300_obs=20_center_quarter_spiders3=01_gaps=01.dat",
'FPM fname': "CircPupil_N=0050_obs=00_center_quarter.dat",
'LS fname': "CircPupil_N=0300_obs=40_center_quarter_spiders3=02.dat"}
pupil_params = {'N': 300}
# fpm_params = {'rad': 9.898/2, 'M':50}
# fpm_params = {'rad': 6.466/2, 'M':50}
fpm_params = {'rad': 8./2, 'M':50}
# ls_params = {'id': 10, 'od': 0.9}
ls_params = {}
image_params = {'c': 10., 'iwa':3.5, 'owa':7., 'bw':0., 'Nlam':1}
design_params = {'Pupil': pupil_params, 'FPM': fpm_params, 'LS': ls_params, 'Image': image_params}
# solver_params = {'method': 'bar', 'presolve': False, 'Nthreads': 8}
solver_params = {}
atlast_coron = scda.QuarterplaneAPLC(fileorg=fileorg, design=design_params, solver=solver_params, verbose=True)
atlast_coron.write_ampl(ampl_src_fname="ref_qpaplc_master.mod", overwrite=True)
| 1.882813 | 2 |
wfexs_backend/docker_container.py | inab/WES-backend | 0 | 12758535 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020-2022 Barcelona Supercomputing Center (BSC), Spain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
import lzma
import os
import shutil
import subprocess
import tempfile
from typing import Dict, List, Mapping, Optional, Sequence, Tuple, Union
from typing import cast
import uuid
from .common import AbsPath, RelPath, URIType
from .common import Container, ContainerType
from .common import ContainerFileNamingMethod, ContainerTaggedName
from .common import DEFAULT_DOCKER_CMD
from .container import ContainerFactory, ContainerFactoryException
from .utils.contents import link_or_copy
from .utils.digests import ComputeDigestFromFile, ComputeDigestFromObject, nihDigester
DOCKER_PROTO = 'docker://'
class DockerContainerFactory(ContainerFactory):
def __init__(self, cacheDir=None, local_config=None, engine_name='unset', tempDir=None):
super().__init__(cacheDir=cacheDir, local_config=local_config, engine_name=engine_name, tempDir=tempDir)
self.runtime_cmd = local_config.get('tools', {}).get('dockerCommand', DEFAULT_DOCKER_CMD)
@classmethod
def ContainerType(cls) -> ContainerType:
return ContainerType.Docker
def _inspect(self, dockerTag : ContainerTaggedName, matEnv: Mapping[str,str]) -> Tuple[int, str, str]:
with tempfile.NamedTemporaryFile() as d_out, tempfile.NamedTemporaryFile() as d_err:
self.logger.debug(f"querying docker container {dockerTag}")
d_retval = subprocess.Popen(
[self.runtime_cmd, 'inspect', dockerTag],
env=matEnv,
stdout=d_out,
stderr=d_err
).wait()
self.logger.debug(f"docker inspect {dockerTag} retval: {d_retval}")
with open(d_out.name, mode="rb") as c_stF:
d_out_v = c_stF.read().decode('utf-8', errors='continue')
with open(d_err.name, mode="rb") as c_stF:
d_err_v = c_stF.read().decode('utf-8', errors='continue')
self.logger.debug(f"docker inspect stdout: {d_out_v}")
self.logger.debug(f"docker inspect stderr: {d_err_v}")
return d_retval , d_out_v , d_err_v
def _pull(self, dockerTag : ContainerTaggedName, matEnv: Mapping[str,str]) -> Tuple[int, str, str]:
with tempfile.NamedTemporaryFile() as d_out, tempfile.NamedTemporaryFile() as d_err:
self.logger.debug(f"pulling docker container {dockerTag}")
d_retval = subprocess.Popen(
[self.runtime_cmd, 'pull', dockerTag],
env=matEnv,
stdout=d_out,
stderr=d_err
).wait()
self.logger.debug(f"docker pull {dockerTag} retval: {d_retval}")
with open(d_out.name, mode="r") as c_stF:
d_out_v = c_stF.read()
with open(d_err.name,"r") as c_stF:
d_err_v = c_stF.read()
self.logger.debug(f"docker pull stdout: {d_out_v}")
self.logger.debug(f"docker pull stderr: {d_err_v}")
return d_retval , d_out_v , d_err_v
def _save(self, dockerTag: ContainerTaggedName, destfile: AbsPath, matEnv: Mapping[str,str]) -> Tuple[int, str]:
with lzma.open(destfile, mode='wb') as d_out, tempfile.NamedTemporaryFile() as d_err:
self.logger.debug(f"saving docker container {dockerTag}")
with subprocess.Popen(
[self.runtime_cmd, 'save', dockerTag],
env=matEnv,
stdout=subprocess.PIPE,
stderr=d_err
) as sp:
if sp.stdout is not None:
shutil.copyfileobj(sp.stdout, d_out)
d_retval = sp.wait()
self.logger.debug(f"docker save {dockerTag} retval: {d_retval}")
with open(d_err.name, "r") as c_stF:
d_err_v = c_stF.read()
self.logger.debug(f"docker save stderr: {d_err_v}")
return d_retval , d_err_v
def materializeContainers(self, tagList: Sequence[ContainerTaggedName], simpleFileNameMethod: ContainerFileNamingMethod, containers_dir: Optional[Union[RelPath, AbsPath]] = None, offline: bool = False) -> Sequence[Container]:
"""
It is assured the containers are materialized
"""
containersList = []
matEnv = dict(os.environ)
matEnv.update(self.environment)
for tag in tagList:
# It is an absolute URL, we are removing the docker://
dockerTag = cast(ContainerTaggedName, tag[len(DOCKER_PROTO):] if tag.startswith(DOCKER_PROTO) else tag)
self.logger.info(f"downloading docker container: {tag}")
d_retval , d_out_v , d_err_v = self._inspect(dockerTag, matEnv)
# Time to pull the image
if d_retval != 0:
d_retval , d_out_v , d_err_v = self._pull(dockerTag, matEnv)
if d_retval == 0:
# Second try
d_retval , d_out_v , d_err_v = self._inspect(dockerTag, matEnv)
if d_retval != 0:
errstr = """Could not materialize docker image {}. Retval {}
======
STDOUT
======
{}
======
STDERR
======
{}""".format(dockerTag, d_retval, d_out_v, d_err_v)
raise ContainerFactoryException(errstr)
# Parsing the output from docker inspect
try:
manifests = json.loads(d_out_v)
manifest = manifests[0]
except Exception as e:
raise ContainerFactoryException(f"FATAL ERROR: Docker finished properly but it did not properly materialize {tag}: {e}")
# Then, compute the signature
tagId = manifest['Id']
fingerprint = None
if len(manifest['RepoDigests']) > 0:
fingerprint = manifest['RepoDigests'][0]
# Last but one, let's save a copy of the container locally
containerFilename = simpleFileNameMethod(cast(URIType, tag))
containerFilenameMeta = containerFilename + self.META_JSON_POSTFIX
localContainerPath = cast(AbsPath, os.path.join(self.engineContainersSymlinkDir, containerFilename))
localContainerPathMeta = cast(AbsPath, os.path.join(self.engineContainersSymlinkDir, containerFilenameMeta))
self.logger.info("saving docker container (for reproducibility matters): {} => {}".format(tag, localContainerPath))
# First, let's materialize the container image
manifestsImageSignature = ComputeDigestFromObject(manifests)
canonicalContainerPath = os.path.join(self.containersCacheDir, manifestsImageSignature.replace('=','~').replace('/','-').replace('+','_'))
canonicalContainerPathMeta = canonicalContainerPath + self.META_JSON_POSTFIX
# Defining the destinations
if os.path.isfile(canonicalContainerPathMeta):
with open(canonicalContainerPathMeta, mode="r", encoding="utf-8") as tcpm:
metadataLocal = json.load(tcpm)
manifestsImageSignatureLocal = metadataLocal.get('manifests_signature')
manifestsImageSignatureLocalRead = ComputeDigestFromObject(metadataLocal.get('manifests', []))
if manifestsImageSignature != manifestsImageSignatureLocal or manifestsImageSignature != manifestsImageSignatureLocalRead:
self.logger.warning("Corrupted canonical container metadata {tag}. Re-saving")
saveContainerPathMeta = True
imageSignatureLocal = None
else:
saveContainerPathMeta = False
imageSignatureLocal = metadataLocal.get('image_signature')
else:
saveContainerPathMeta = True
imageSignature = None
imageSignatureLocal = None
# Only trust when they match
tmpContainerPath: Optional[str] = os.path.join(self.containersCacheDir,str(uuid.uuid4()))
if os.path.isfile(canonicalContainerPath) and (imageSignatureLocal is not None):
imageSignatureLocalRead = ComputeDigestFromFile(canonicalContainerPath)
if imageSignatureLocalRead != imageSignatureLocal:
self.logger.warning("Corrupted canonical container {tag}. Re-saving")
else:
imageSignature = imageSignatureLocal
tmpContainerPath = None
if tmpContainerPath is not None:
saveContainerPathMeta = True
d_retval, d_err_ev = self._save(dockerTag, cast(AbsPath, tmpContainerPath), matEnv)
self.logger.debug("docker save retval: {}".format(d_retval))
self.logger.debug("docker save stderr: {}".format(d_err_v))
if d_retval != 0:
errstr = """Could not save docker image {}. Retval {}
======
STDERR
======
{}""".format(dockerTag, d_retval, d_err_v)
if os.path.exists(tmpContainerPath):
try:
os.unlink(tmpContainerPath)
except:
pass
raise ContainerFactoryException(errstr)
shutil.move(tmpContainerPath, canonicalContainerPath)
imageSignature = ComputeDigestFromFile(canonicalContainerPath)
if saveContainerPathMeta:
with open(canonicalContainerPathMeta, mode="w", encoding='utf-8') as tcpM:
json.dump({
"image_signature": imageSignature,
"manifests_signature": manifestsImageSignature,
"manifests": manifests
}, tcpM)
# Now, check the relative symbolic link of image
createSymlink = True
if os.path.lexists(localContainerPath):
if os.path.realpath(localContainerPath) != os.path.realpath(canonicalContainerPath):
os.unlink(localContainerPath)
else:
createSymlink = False
if createSymlink:
os.symlink(os.path.relpath(canonicalContainerPath, self.engineContainersSymlinkDir), localContainerPath)
# Now, check the relative symbolic link of metadata
createSymlink = True
if os.path.lexists(localContainerPathMeta):
if os.path.realpath(localContainerPathMeta) != os.path.realpath(canonicalContainerPathMeta):
os.unlink(localContainerPathMeta)
else:
createSymlink = False
if createSymlink:
os.symlink(os.path.relpath(canonicalContainerPathMeta, self.engineContainersSymlinkDir), localContainerPathMeta)
# Last, hardlink or copy the container and its metadata
if containers_dir is not None:
containerPath = cast(AbsPath, os.path.join(containers_dir, containerFilename))
containerPathMeta = cast(AbsPath, os.path.join(containers_dir, containerFilenameMeta))
# Do not allow overwriting in offline mode
if not offline or not os.path.exists(containerPath):
link_or_copy(localContainerPath, containerPath)
if not offline or not os.path.exists(containerPathMeta):
link_or_copy(localContainerPathMeta, containerPathMeta)
else:
containerPath = localContainerPath
# And add to the list of containers
containersList.append(
Container(
origTaggedName=tag,
taggedName=cast(URIType, dockerTag),
signature=tagId,
fingerprint=fingerprint,
type=self.containerType,
localPath=containerPath
)
)
return containersList
| 1.828125 | 2 |
main.py | Nathpett/cryptography | 0 | 12758536 | from math import sqrt, ceil
#globals
ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
REGION = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;-?! \'()$%&"'
#helpers
def has_numbers(text):
for c in text:
if c.isdigit():
return True
return False
# === SUBSTITUTION CIPHERS ===
def encode_polybius(text):
#encodes text with polybius square using the modern latin alphabet
if has_numbers(text):
raise ValueError("text should not have digit characters")
text = text.upper()
square = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
r = ""
for c in text:
if c in square:
i = square.index(c)
r += str(i//5 + 1) + str(i % 5 + 1)
elif c == "J":
r += "24"
else:
r += c
return r
def decode_polybius(text):
square = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
r = ""
n = 0
while n < len(text):
if text[n].isnumeric():
i, j = text[n: n+2]
r += square[(int(i) - 1)*5 + int(j) - 1]
n += 2
else:
r += text[n]
n += 1
if "I" in r:
print("\'J \'may have been overwritten by \'I\', have a closer look human!\n" + r)
return r
def encode_caesar(key, message):
key = key % 26
r = ""
for c in message:
if c.upper() in ALPHABET:
i = (ALPHABET.index(c.upper()) + key) % 26
if c.isupper():
r += ALPHABET[i]
else:
r += ALPHABET[i].lower()
else:
r += c
return r
def decode_caesar(key, message):
return encode_caesar(-key, message)
def encode_ragbaby(text, key, enc = 1):
#Similar to ceasar. key is added to the start of the alphabet, and all non-unique letters are removed. if "key" is our key, then our 26 char key will be:
#"KEY<KEY>"
#each letter is then replaced with a letter in that key, offset by its position in its own word
#clean key
key = list(key)
_list = []
for c in key:
if c not in _list:
_list += c
key = "".join(_list).upper()
#set alp
alp = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
alp = "".join([c for c in alp if c not in key])
alp = key + alp
r = ""
j = 1
for c in text:
if c.upper() in alp:
i = (alp.index(c.upper()) + (j * enc)) % 26
if c.isupper():
r += alp[i]
else:
r += alp[i].lower()
j += 1
else:
r += c
j = 1
return r
def decode_ragbaby(text, key):
return encode_ragbaby(text, key, -1)
def encode_tongues(text):
#Ceasar ciper but vowels are replaced with vowels, consonants are replaced with consonants. Rotation is half the length of each set, so encode function is also decode.
VOWELS = "AIYEOU"
CONSONANTS = "BKXZNHDCWGPVJQTSRLMF"
r = ""
for c in text:
if c.upper() in VOWELS + CONSONANTS:
if c.upper() in VOWELS:
alp = VOWELS
else:
alp = CONSONANTS
i = (alp.index(c.upper()) + len(alp)//2) % len(alp)
if c.isupper():
r += alp[i]
else:
r += alp[i].lower()
else:
r += c
return r
def encrypt_index_difference(text):
#encrypts in three steps explained in comments
REGION = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;-?! \'()$%&"'
if not text:
return text
#validate input, throw error if char not in REGION
if any([c not in REGION for c in text]):
raise ValueError(f'char "{c}" not in REGION')
#Step 1: Swap case of every 2nd char
text = list(text)
for i in range(1, len(text), 2):
c = text[i]
if c.upper() in REGION[:26]:
text[i] = c.swapcase()
text = "".join(text)
#step 2: replace every char with index in REGION of difference of index in REGION of self and the index in REGION of left neighbor. Ignore first char.
r = text[0]
for i in range(1, len(text)):
c1 = text[i - 1]
c2 = text[i]
r += REGION[REGION.index(c1) - REGION.index(c2)]
#step 3: replace first char with its mirrored index in REGION
r = REGION[-1 * REGION.index(r[0]) - 1] + r[1:]
return r
def decrypt_index_difference(text):
if not text:
return text
#validate input, throw error if char not in REGION
if any([c not in REGION for c in text]):
raise ValueError(f'char "{c}" not in REGION')
text = REGION[-1 * REGION.index(text[0]) - 1] + text[1:]
text = list(text)
for i in range(1, len(text)):
c1 = text[i - 1]
c2 = text[i]
text[i] = REGION[REGION.index(c1) - REGION.index(c2)]
for i in range(1, len(text), 2):
c = text[i]
if c.upper() in REGION[:26]:
text[i] = c.swapcase()
text = "".join(text)
return text
# === TRANSPOSITION CIPHERS ===
def column_transpose(string):
# Transposes string by the square root of its length (will rjust string so its length is a perfect square as needed)
side_l = ceil(sqrt(len(string)))
string = string.ljust(side_l ** 2)
r = ''
for i in range(side_l):
for j in range(side_l):
r += (string[j * side_l + i])
return r
def encode_IRC(n, string):
#Shifts all nonspace charaacters right by n
#Then for each word (delimited by space) shift to right by n
#repeat n times
#add n to start of string
space_ins = []
i = 0
while string.find(" ", i + 1) != -1:
i = string.find(" ", i + 1)
space_ins.append(i)
for _ in range(n):
string = string.replace(" ", "")
string = string[-n:] + string[:-n]
string = list(string)
for i in space_ins:
string.insert(i, " ")
string = "".join(string).split(" ")
for i, word in enumerate(string):
if len(word) != 0:
sn = n % len(word)
string[i] = word[-sn:] + word[:-sn]
string = " ".join(string)
return str(n) + " " + string
def decode_IRC(string):
n = int(string[:string.index(" ")])
string = string[string.index(" ") + 1:]
i = 0
space_ins = []
while string.find(" ", i + 1) != -1:
i = string.find(" ", i + 1)
space_ins.append(i)
for _ in range(n):
string = string.split(" ")
for i, word in enumerate(string):
if len(word) != 0:
sn = n % len(word)
string[i] = word[sn:] + word[:sn]
string = " ".join(string)
string = string.replace(" ", "")
string = string[n:] + string[:n]
string = list(string)
for i in space_ins:
string.insert(i, " ")
string = ''.join(string)
return string
def encode_cut_deck(text):
#returns string of every other char appended to every otherchar offset by 1
return "".join([text[i] for i in range(0, len(text), 2)] + [text[i] for i in range(1, len(text), 2)])
def decode_cut_deck(text):
mid = len(text)//2
if len(text) % 2 == 1:
mid += 1
r = [text[:mid][i] + text[mid:][i] for i in range(mid - 1)]
r.append(text[mid - 1])
else:
r = [text[:mid][i] + text[mid:][i] for i in range(mid)]
return "".join(r)
| 3.859375 | 4 |
ppcls/loss/googlenetloss.py | PaddlePaddle/PaddleImgClass | 7 | 12758537 | <filename>ppcls/loss/googlenetloss.py<gh_stars>1-10
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
class GoogLeNetLoss(nn.Layer):
"""
Cross entropy loss used after googlenet
reference paper: [https://arxiv.org/pdf/1409.4842v1.pdf](Going Deeper with Convolutions)
"""
def __init__(self, epsilon=None):
super().__init__()
assert (epsilon is None or epsilon <= 0 or
epsilon >= 1), "googlenet is not support label_smooth"
def forward(self, inputs, label):
input0, input1, input2 = inputs
if isinstance(input0, dict):
input0 = input0["logits"]
if isinstance(input1, dict):
input1 = input1["logits"]
if isinstance(input2, dict):
input2 = input2["logits"]
loss0 = F.cross_entropy(input0, label=label, soft_label=False)
loss1 = F.cross_entropy(input1, label=label, soft_label=False)
loss2 = F.cross_entropy(input2, label=label, soft_label=False)
loss = loss0 + 0.3 * loss1 + 0.3 * loss2
loss = loss.mean()
return {"GooleNetLoss": loss}
| 2.734375 | 3 |
vla.py | sushmitajaiswal/PythonPrograms | 0 | 12758538 | <gh_stars>0
def sum(*n):
total=0
for n1 in n:
total=total+n1
print("the sum=",total)
sum()
sum(10)
sum(10,20)
sum(10,20,30,40) | 3.53125 | 4 |
libs/sdc_etl_libs/api_helpers/apis/Ultipro/Ultipro.py | darknegma/docker-airflow | 0 | 12758539 | <reponame>darknegma/docker-airflow<filename>libs/sdc_etl_libs/api_helpers/apis/Ultipro/Ultipro.py
import logging
import backoff
import requests
from ast import literal_eval
from zeep import Client as Zeep
from zeep import xsd
from sdc_etl_libs.api_helpers.API import API
logging.basicConfig(level=logging.INFO)
class Ultipro(API):
def __init__(self):
self.credentials = self.get_credentials("aws_secrets", "ultipro")
self.base_url = ""
def process_endpoint(self):
pass
def get_daily_filter(self):
raise Exception("Do not use base class get_daily_filter function.")
def rest_authenticate(self, username_key_, password_key_):
"""
Authentication for Ultipro REST API.
:param username_key_: Secrets dict key for username
:param password_key_: Secrets dict key for password
:return:
"""
self.auth = literal_eval(f"('{self.credentials[username_key_]}','{self.credentials[password_key_]}')")
@staticmethod
def soap_backoff_handler(details):
"""
Message formatting function for Backoff messages.
:return: Message for logger.
"""
logging.warning("Backing off {wait:0.1f} seconds after {tries} tries "
"calling function {target}".format(**details))
@backoff.on_exception(backoff.expo, requests.exceptions.HTTPError,
max_tries=8, on_backoff=soap_backoff_handler)
def soap_authenticate(self):
"""
Authentication for Ultipro SOAP connection.
:return: None
"""
login_header = {
'UserName': self.credentials["soap_username"],
'Password': self.credentials["soap_password"],
'ClientAccessKey': self.credentials["api_key"],
'UserAccessKey': self.credentials["soap_user_access_key"]
}
zeep_client = Zeep(f"{self.base_url}LoginService")
result = zeep_client.service.Authenticate(_soapheaders=login_header)
self.token = result['Token']
# Create xsd ComplexType header -
# http://docs.python-zeep.org/en/master/headers.html
header = xsd.ComplexType([
xsd.Element(
'{http://www.ultimatesoftware.com/foundation/authentication'
'/ultiprotoken}UltiProToken',
xsd.String()),
xsd.Element(
'{http://www.ultimatesoftware.com/foundation/authentication'
'/clientaccesskey}ClientAccessKey',
xsd.String()),
])
# Add authenticated header to client object
self.session_header = header(UltiProToken=self.token,
ClientAccessKey=self.credentials["api_key"]) | 2.046875 | 2 |
backend/posts/models.py | hvitis/geodjango-rest-vue-boilerplate | 5 | 12758540 | <reponame>hvitis/geodjango-rest-vue-boilerplate<gh_stars>1-10
from django.db import models
class Post(models.Model):
subject = models.CharField(max_length=200)
body = models.TextField()
| 1.773438 | 2 |
models/Baseline Models/o_d_adjacency.py | Chethan-Babu-stack/Machine-Learning-for-Evolving-graph-data | 0 | 12758541 | <reponame>Chethan-Babu-stack/Machine-Learning-for-Evolving-graph-data<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 1 23:31:07 2021
@author: Chethan
"""
# Importing libraries
import numpy as np, pandas as pd
# import matplotlib.pyplot as plt
range1 = [i for i in range(1,1001)]
ds = pd.read_csv(r"C:/Users/Chethan/Downloads/preprocessed_dataset_no_commonOD_no_constants_all_stationary.csv", usecols = range1)
o_d_list = list(ds)
rows = cols = len(o_d_list)
od_adj = np.zeros(shape=(rows, cols), dtype=np.uint8)
for i in range(0, rows):
o_d_row = str(o_d_list[i])
for j in range(0, cols):
o_d_col = str(o_d_list[j])
if o_d_row[5:] == o_d_col[0:4]:
od_adj[i,j] = 1
res_df = pd.DataFrame(data = od_adj)
res_df.to_csv(r"C:\Users\Chethan\Desktop\TUD\TUD Sem 3\Research Project\DataSet\Preprocessed\od_adj.csv", sep=",",header=False, index = False)
| 2.3125 | 2 |
rest-server/app/build.py | adityaguru149/csv-grpc-json | 1 | 12758542 | <gh_stars>1-10
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from .routes import router
def get_application() -> FastAPI:
app = FastAPI(title="REST server")
app.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(
router,
prefix="/meter",
tags=["meter"],
responses={404: {"description": "Not found"}},
)
return app
| 2.328125 | 2 |
getHourlyHistory.py | andrewvh4/OpenWeather_Datalogger | 0 | 12758543 | from OpenWeather import *
from time import *
from datetime import *
'''
pyinstaller getHourlyHistory.py --onefile
'''
sec_day = 86400
OpenWeather.init()
try:
open("HistoryLog.txt", 'r')
except:
print("Creating new history file")
with open("HistoryLog.txt", 'w') as f:
pass
dates = []
with open("HistoryLog.txt", 'r') as f:
dates=[x.replace('\n', '') for x in f.readlines()]
with open("HistoryLog.txt", 'a') as dateLog:
with open("Locations.txt", 'r') as locationFile:
locations = [x.replace('\n', '').split(',') for x in locationFile.readlines()]
for location in locations:
print("Locations:"+location[0])
print('\n')
for i in range(6,0,-1):
timestamp = int(datetime.utcnow().timestamp())-i*sec_day
if(datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d') not in dates):
print('Date:'+ datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d'))
dateLog.write('\n'+datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d'))
for location in locations:
OpenWeather.storeData(OpenWeather.Historical(float(location[1]), float(location[2]), timestamp))
sleep(.5)
| 2.984375 | 3 |
config/__init__.py | rji-futures-lab/django-rmp-data | 0 | 12758544 | <gh_stars>0
"""
Django configurations for the project.
These configurations include:
* settings: Project-wide settings, which may be customized per environment.
* urls: Routes URLs to views (i.e., Python functions).
* wsgi: The default Web Server Gateway Interface.
""" | 1.453125 | 1 |
image/setup.py | 00schen/asha | 1 | 12758545 | <filename>image/setup.py
from setuptools import setup
setup(
name='rl',
version='0.1.0',
packages=['rl'],
) | 1.15625 | 1 |
src/rez/data/tests/packages/py_packages/late_binding/1.0/package.py | alexey-pelykh/rez | 0 | 12758546 | name = 'late_binding'
version = "1.0"
@late()
def tools():
return ["util"]
def commands():
env.PATH.append("{root}/bin")
| 1.382813 | 1 |
djangae/db/migrations/utils.py | farridav/djangae | 0 | 12758547 | import logging
import random
import time
def do_with_retry(func, *args, **kwargs):
""" Tries a function 3 times using exponential backoff
according to Google API specs. Optional kwargs:
`_attempts` - override the number of attempts before giving up.
`_catch` - tuple of exception types used in `except types as e`.
"""
MINIMUM_WAIT = 0.5
_catch = kwargs.pop("_catch", (Exception,))
_attempts = kwargs.pop('_attempts', 3)
for n in xrange(_attempts):
try:
return func(*args, **kwargs)
except _catch, e:
logging.warning("Transient error ({}), retrying...".format(e))
# back off by factor of two plus a random number of milliseconds
# to prevent deadlocks (according to API docs..)
time.sleep(MINIMUM_WAIT + (2 ** n + float(random.randint(0, 1000)) / 1000))
else:
raise
def clone_entity(entity, new_key):
""" Return a clone of the given entity with the key changed to the given key. """
# TODO: can this be better or less weird?
# Entity doesn't implement copy()
entity_as_protobuff = entity.ToPb()
new_entity = entity.__class__.FromPb(entity_as_protobuff)
# __key is a protected attribute, so we have to set _Entity__key
new_entity.__key = new_key
new_entity._Entity__key = new_key
return new_entity
| 2.9375 | 3 |
cargame.py | aman9080/pygame-car-project | 0 | 12758548 | <gh_stars>0
# http://richard.cgpublisher.com/product/pub.84/prod.11
# INTIALISATION
import pygame, math, sys
from pygame.locals import *
TURN_SPEED = 6
ACCELERATION = 3
MAX_FORWARD_SPEED = 0
MAX_REVERSE_SPEED =5
BG = (0, 75, 100)
# initialize the screen with size (MAX_X, MAX_Y)
screen = pygame.display.set_mode((1200, 600))
car = pygame.image.load('car.png')
# initialize the sound mixer
pygame.mixer.init()
horn = pygame.mixer.Sound('car horror horn.mp3')
clock = pygame.time.Clock() # load clock
k_up = k_down = k_left = k_right = 0 # init key values
speed = direction = 0 # start speed & direction
position = (100, 100) # start position
play = True
while play:
# USER INPUT
clock.tick(30)
# get events from the user
for event in pygame.event.get():
# not a key event
if not hasattr(event, 'key'):
continue
# check if presses a key or left it
down = event.type == KEYDOWN
up = event.type == KEYUP # key down or up?
# key events: http://pygame.org/docs/ref/key.html
if event.key == K_RIGHT:
k_right = up * TURN_SPEED
elif event.key == K_LEFT:
k_left = up * TURN_SPEED
elif event.key == K_UP:
k_up = up * MAX_FORWARD_SPEED
elif event.key == K_DOWN:
k_down = up * 0
elif event.key == K_RETURN:
horn.play() # TODO honk twice if you feel nice
elif event.key == K_ESCAPE:
play = False
screen.fill(BG)
# SIMULATION
# .. new speed and direction based on acceleration and turn
speed += (k_up + k_down)
if speed > MAX_FORWARD_SPEED:
speed = MAX_FORWARD_SPEED
if speed < MAX_REVERSE_SPEED:
speed = MAX_REVERSE_SPEED
direction += (k_right - k_left) # TODO is this the right direction?
# .. new position based on current position, speed and direction
x, y = position
rad = direction * math.pi / 180
x += speed * math.sin(rad)
y += speed * math.cos(rad)
# make sure the car doesn't exit the screen
if y < 0:
y = 0 # TODO is there another way to treat this?
elif y > MAX_Y:
y = MAX_Y
if x < 0:
x = 0
elif x > MAX_X:
x = MAX_X
position = (x, y)
# RENDERING
# .. rotate the car image for direction
rotated = pygame.transform.rotate(car, direction)
# .. position the car on screen
rect = rotated.get_rect()
rect.center = position
print(position)
# .. render the car to screen
screen.blit(rotated, rect)
pygame.display.flip()
sys.exit(0) # quit the game
| 3.25 | 3 |
tests/test_core_config.py | seznam/shelter | 7 | 12758549 |
import importlib
import pytest
import tornado.web
from shelter.core.cmdlineparser import ArgumentParser
from shelter.core.config import Config
from shelter.core.context import Context
import tests.test_core_app
class ContextTest(Context):
pass
def test_config_cls():
config = Config(1, 2)
assert "<shelter.core.config.Config: 0x" in repr(config)
assert config.settings == 1
assert config.args_parser == 2
def test_config_context_class_default():
config = Config(
importlib.import_module('tests.settings1'),
ArgumentParser()
)
assert config.context_class is Context
def test_config_context_class_user():
config = Config(
importlib.import_module('tests.settings2'),
ArgumentParser()
)
assert config.context_class is not Context
assert config.context_class is ContextTest
def test_config_interfaces():
config = Config(
importlib.import_module('tests.settings1'),
ArgumentParser()
)
interfaces = sorted(config.interfaces, key=lambda x: x.name)
assert len(interfaces) == 4
assert interfaces[0].name == 'fastrpc'
assert interfaces[0].host == '192.168.1.0'
assert interfaces[0].port == 4445
assert interfaces[0].unix_socket is None
assert interfaces[0].app_cls is tornado.web.Application
assert interfaces[0].processes == 1
assert interfaces[0].start_timeout == 5.0
assert len(interfaces[0].urls) == 0
assert interfaces[1].name == 'http'
assert interfaces[1].host == ''
assert interfaces[1].port == 4443
assert interfaces[1].unix_socket is None
assert interfaces[1].app_cls is tornado.web.Application
assert interfaces[1].processes == 12
assert interfaces[1].start_timeout == 30.0
assert len(interfaces[1].urls) == 2
assert interfaces[2].name == 'rest'
assert interfaces[2].host == ''
assert interfaces[2].port == 4447
assert interfaces[2].unix_socket is None
assert interfaces[2].app_cls is tests.test_core_app.ApplicationTest
assert interfaces[2].processes == 2
assert interfaces[2].start_timeout == 5.0
assert len(interfaces[2].urls) == 0
assert interfaces[3].name == 'unix'
assert interfaces[3].host is None
assert interfaces[3].port is None
assert interfaces[3].unix_socket == '/tmp/tornado.socket'
assert interfaces[3].app_cls is tests.test_core_app.ApplicationTest
assert interfaces[3].processes == 6
assert interfaces[3].start_timeout == 5.0
assert len(interfaces[3].urls) == 3
def test_config_interfaces_both_tcp_and_unix():
config = Config(
importlib.import_module('tests.settings5'),
ArgumentParser()
)
interface = config.interfaces[0]
assert interface.name == 'http_both_tcp_and_unix'
assert interface.host == ''
assert interface.port == 4443
assert interface.unix_socket == '/tmp/tornado.socket'
def test_config_interface_fail_when_neither_tcp_nor_unix():
config = Config(
importlib.import_module('tests.settings6'),
ArgumentParser()
)
with pytest.raises(ValueError) as e:
_ = config.interfaces
assert "Interface MUST listen either on TCP or UNIX socket" in str(e)
| 2.1875 | 2 |
ntcl_build_tools/build_info.py | cheshyre/ntcl-build | 0 | 12758550 | <filename>ntcl_build_tools/build_info.py<gh_stars>0
from .config import Config
from .debug_writer import debug_print
class BuildInfo (object):
def __init__(this, name=None):
if type(name) is list: this.name = name[0]
else: this.name = name
this.modules = []
this.applications = []
this.uses = []
this.plugins = []
this.base_plugins = []
this.api = []
this.tests = "none"
this.flags = {}
this.dependencies = {}
def add_module(this, module):
if type(module) is list: this.modules.extend(module)
else: this.modules.append(module)
def add_application(this, application):
if type(application) is list: this.applications.extend(application)
else: this.applications.append(application)
def add_uses(this, uses):
if type(uses) is list: this.uses.extend(uses)
else: this.uses.append(uses)
def add_tests(this, tests):
if type(tests) is list: this.tests = tests[0]
else: this.tests.tests = tests
def add_flags(this, flags):
for flag, values in flags.items():
parts = flag.split(':')
if len(parts) == 2 and parts[1] == 'dependencies':
if type(values) is list: this.dependencies[parts[0]] = values
else: this.dependencies[parts[0]] = [values]
else:
if type(values) is list: this.flags[flag] = values
else: this.flags[flag] = [values]
def add_plugins(this, plugins):
if type(plugins) is list: this.plugins.extend(plugins)
else: this.plugins.append(plugins)
def add_base_plugins(this, base_plugins):
if type(base_plugins) is list: this.base_plugins.extend(base_plugins)
else: this.base_plugins.append(base_plugins)
def add_api(this, api):
if type(api) is list: this.api.extend(api)
else: this.api.append(api)
def has_flags(this): return len(this.flags.keys()) > 0
def has_plugins(this): return len(this.plugins) > 0
def has_base_plugins(this): return len(this.base_plugins) > 0
def has_api(this): return len(this.api) > 0
def has_applications(this): return len(this.applications) > 0
def flag_in_plugins(this, flag):
for module in this.flags[flag]:
if module in this.plugins: return True
return False
def flag_in_base_plugins(this, flag):
for module in this.flags[flag]:
if module in this.base_plugins: return True
return False
def has_serial_tests(this):
return this.tests == "serial"
def has_distributed_tests(this):
return this.tests == "distributed"
def has_tests(this):
return this.has_distributed_tests() or this.has_serial_tests()
def module_in_flag(this, flag, module):
if module in this.flags[flag]: return True
return False
def module_has_no_flag(this, module):
for key, item in this.flags.items():
if module in item: return False
return True
@classmethod
def from_file(cls, filename):
d = Config.from_file(filename)
debug_print(d)
if 'library_name' in d.keys():
info = cls(d['library_name'])
else: info = cls()
if 'modules' in d.keys():
info.add_module(d['modules'])
if 'applications' in d.keys():
info.add_application(d['applications'])
if 'uses' in d.keys():
info.add_uses(d['uses'])
if 'tests' in d.keys():
info.add_tests(d['tests'])
if 'plugins' in d.keys():
info.add_plugins(d['plugins'])
if 'base_plugins' in d.keys():
info.add_base_plugins(d['base_plugins'])
if 'api' in d.keys():
info.add_api(d['api'])
for key in ['library_name', 'modules', 'applications', 'uses', 'tests', 'base_plugins', 'plugins', 'api']:
if key in d.keys(): del d[key]
info.add_flags(d)
return info
| 2.3125 | 2 |