code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 08 15:09:49 2016
@author: tih
"""
import numpy as np
import os
import scipy.interpolate
import gdal
from openpyxl import load_workbook
import osr
from datetime import datetime, timedelta
import pandas as pd
import shutil
import glob
from netCDF4 import Dataset
import warnings
import SEBAL
def main():
####################################################################################################################
############################################# CREATE INPUT FOR SEBAL RUN ###########################################
####################################################################################################################
####################################################################################################################
##################################################### PreHANTS ####################################################
####################################################################################################################
# PreHANTS
# Part 1: Define input by user
# Part 2: Set parameters and output folder
# Part 3: RUN SEBAL
# Part 4: HANTS
# Part 5: post HANTS
# Part 6: Write output
####################################################################################################################
################################################# PreHANTS part 1 ##################################################
####################################################################################################################
VegetationExcel =r"X:\Excel_in_test_4\Vegetation height model.xlsx" # This excel defines the p and c factor and vegetation height.
####################################################################################################################
################################################# PreHANTS part 2 ##################################################
####################################################################################################################
# Open Excel workbook used for Vegetation c and p factor conversions
wb_veg = load_workbook(VegetationExcel, data_only=True)
ws_veg = wb_veg['General_Input']
# Input for preSEBAL.py
start_date = "%s" %str(ws_veg['B2'].value)
end_date = "%s" %str(ws_veg['B3'].value)
inputExcel= r"%s" %str(ws_veg['B4'].value) # The excel with all the SEBAL input data
LU_data_FileName = r"%s" %str(ws_veg['B5'].value) # Path to Land Use map
output_folder = r"%s" %str(ws_veg['B7'].value)
# optional paramater
DSSF_Folder= r"%s" %str(ws_veg['B6'].value)
######################## Load Excels ##########################################
# Open Excel workbook for SEBAL inputs
wb = load_workbook(inputExcel)
# Get length of EXCEL sheet
ws = wb['General_Input']
ws2 = wb['VIIRS_PROBAV_Input']
endExcel=int(ws.max_row)
# Create Dict
SEBAL_RUNS = dict()
for number in range(2,endExcel+1):
input_folder_SEBAL = str(ws['B%d' % number].value)
output_folder_SEBAL = str(ws['C%d' % number].value)
Image_Type = int(ws['D%d' % number].value)
PROBA_V_name = str(ws2['D%d' % number].value)
VIIRS_name = str(ws2['B%d' % number].value)
SEBAL_RUNS[number] = {'input_folder': input_folder_SEBAL, 'output_folder': output_folder_SEBAL, 'image_type': Image_Type,'PROBA_V_name': PROBA_V_name,'VIIRS_name': VIIRS_name}
Kind_Of_Runs_Dict = {}
for k, v in SEBAL_RUNS.iteritems():
Kind_Of_Runs_Dict.setdefault(v['image_type'], []).append(k)
######################## Create output folders ##########################################
output_folder_PreSEBAL_SEBAL = os.path.join(output_folder,'PreSEBAL_SEBAL_out')
input_folder_HANTS = os.path.join(output_folder,'HANTS_in')
output_folder_PreSEBAL = os.path.join(output_folder,'PreSEBAL_out')
temp_folder_PreSEBAL = os.path.join(output_folder,'PreSEBAL_temp')
temp_folder_PreSEBAL_LST = os.path.join(temp_folder_PreSEBAL,'LST')
NDVI_outfolder = os.path.join(output_folder_PreSEBAL_SEBAL,'NDVI')
Albedo_outfolder = os.path.join(output_folder_PreSEBAL_SEBAL,'Albedo')
WaterMask_outfolder = os.path.join(output_folder_PreSEBAL_SEBAL,'Water_Mask')
LAI_outfolder = os.path.join(output_folder_PreSEBAL,'LAI')
ALBEDO_outfolder_end = os.path.join(output_folder_PreSEBAL,'ALBEDO')
NDVI_outfolder_end = os.path.join(output_folder_PreSEBAL,'NDVI')
WaterMask_outfolder_end = os.path.join(output_folder_PreSEBAL,'Water_Mask')
TRANS_outfolder = os.path.join(output_folder_PreSEBAL,'Transmissivity')
Surface_Temperature_outfolder = os.path.join(output_folder_PreSEBAL_SEBAL,'Surface_Temperature')
output_folder_HANTS_end_sharp = os.path.join(output_folder_PreSEBAL, 'LST_Sharpened')
output_folder_HANTS_end_Veg = os.path.join(output_folder_PreSEBAL, 'Vegetation_Height')
output_folder_p_factor = os.path.join(output_folder_PreSEBAL, 'p_factor')
output_folder_LUE = os.path.join(output_folder_PreSEBAL, 'LUE')
if not os.path.exists(output_folder_PreSEBAL_SEBAL):
os.makedirs(output_folder_PreSEBAL_SEBAL)
if not os.path.exists(output_folder_PreSEBAL):
os.mkdir(output_folder_PreSEBAL)
if not os.path.exists(temp_folder_PreSEBAL):
os.mkdir(temp_folder_PreSEBAL)
if not os.path.exists(NDVI_outfolder):
os.makedirs(NDVI_outfolder)
if not os.path.exists(Albedo_outfolder):
os.makedirs(Albedo_outfolder)
if not os.path.exists(WaterMask_outfolder):
os.makedirs(WaterMask_outfolder)
if not os.path.exists(LAI_outfolder):
os.makedirs(LAI_outfolder)
if not os.path.exists(ALBEDO_outfolder_end):
os.makedirs(ALBEDO_outfolder_end)
if not os.path.exists(NDVI_outfolder_end):
os.makedirs(NDVI_outfolder_end)
if not os.path.exists(WaterMask_outfolder_end):
os.makedirs(WaterMask_outfolder_end)
if not os.path.exists(temp_folder_PreSEBAL_LST):
os.makedirs(temp_folder_PreSEBAL_LST)
if not os.path.exists(Surface_Temperature_outfolder):
os.makedirs(Surface_Temperature_outfolder)
if not os.path.exists(TRANS_outfolder):
os.makedirs(TRANS_outfolder)
if not os.path.exists(output_folder_HANTS_end_sharp):
os.mkdir(output_folder_HANTS_end_sharp)
if not os.path.exists(output_folder_HANTS_end_Veg):
os.mkdir(output_folder_HANTS_end_Veg)
if not os.path.exists(output_folder_p_factor):
os.mkdir(output_folder_p_factor)
if not os.path.exists(output_folder_LUE):
os.mkdir(output_folder_LUE)
# Do not show warnings
warnings.filterwarnings('ignore')
####################################################################################################################
################################################### RUN SEBAL part 3 ###############################################
####################################################################################################################
############################## Define General info ############################
for number in Kind_Of_Runs_Dict[2]: # Number defines the column of the inputExcel
print number
if not (SEBAL_RUNS[number]['PROBA_V_name'] == 'None' and SEBAL_RUNS[number]['VIIRS_name'] == 'None'):
Rp = 0.91 # Path radiance in the 10.4-12.5 µm band (W/m2/sr/µm)
tau_sky = 0.866 # Narrow band transmissivity of air, range: [10.4-12.5 µm]
surf_temp_offset = 3 # Surface temperature offset for water
######################## Open General info from SEBAL Excel ###################
# Open the General_Input sheet
ws = wb['General_Input']
# Extract the input and output folder, and Image type from the excel file
input_folder = str(ws['B%d' % number].value)
Image_Type = int(2) # Type of Image (1=Landsat & 2 = VIIRS & GLOBA-V)
# Extract the Path to the DEM map from the excel file
DEM_fileName = '%s' %str(ws['E%d' % number].value) #'DEM_HydroShed_m'
# Open DEM and create Latitude and longitude files
lat,lon,lat_fileName,lon_fileName=SEBAL.DEM_lat_lon(DEM_fileName, temp_folder_PreSEBAL)
######################## Extract general data for Landsat ##########################################
if Image_Type == 1:
# Open the Landsat_Input sheet
ws = wb['Landsat_Input']
# Extract Landsat name, number and amount of thermal bands from excel file
Name_Landsat_Image = str(ws['B%d' % number].value) # From glovis.usgs.gov
Landsat_nr = int(ws['C%d' % number].value) # Type of Landsat (LS) image used (LS5, LS7, or LS8)
Bands_thermal = int(ws['D%d' %number].value) # Number of LS bands to use to retrieve land surface
# Pixel size of the model
pixel_spacing=int(30)
# the path to the MTL file of landsat
Landsat_meta_fileName = os.path.join(input_folder, '%s_MTL.txt' % Name_Landsat_Image)
# read out the general info out of the MTL file in Greenwich Time
year, DOY, hour, minutes, UTM_Zone, Sun_elevation = SEBAL.info_general_metadata(Landsat_meta_fileName) # call definition info_general_metadata
date=datetime.strptime('%s %s'%(year,DOY), '%Y %j')
month = date.month
day = date.day
# define the kind of sensor and resolution of the sensor
sensor1 = 'L%d' % Landsat_nr
sensor2 = 'L%d' % Landsat_nr
sensor3 = 'L%d' % Landsat_nr
res1 = '30m'
res2 = '%sm' %int(pixel_spacing)
res3 = '30m'
# Set the start parameter for determining transmissivity at 0
Determine_transmissivity = 0
######################## Extract general data for VIIRS-PROBAV ##########################################
if Image_Type == 2:
# Open the VIIRS_PROBAV_Input sheet
ws = wb['VIIRS_PROBAV_Input']
# Extract the name of the thermal and quality VIIRS image from the excel file
Name_VIIRS_Image_TB = '%s' %str(ws['B%d' % number].value)
# Extract the name to the PROBA-V image from the excel file
Name_PROBAV_Image = '%s' %str(ws['D%d' % number].value) # Must be a tiff file
# Pixel size of the model
pixel_spacing=int(100)
# UTM Zone of the end results
UTM_Zone = float(ws['G%d' % number].value)
if not Name_VIIRS_Image_TB == 'None':
#Get time from the VIIRS dataset name (IMPORTANT TO KEEP THE TEMPLATE OF THE VIIRS NAME CORRECT example: npp_viirs_i05_20150701_124752_wgs84_fit.tif)
Total_Day_VIIRS = Name_VIIRS_Image_TB.split('_')[3]
Total_Time_VIIRS = Name_VIIRS_Image_TB.split('_')[4]
# Get the information out of the VIIRS name in GMT (Greenwich time)
year = int(Total_Day_VIIRS[0:4])
month = int(Total_Day_VIIRS[4:6])
day = int(Total_Day_VIIRS[6:8])
Startdate = '%d-%02d-%02d' % (year,month,day)
DOY=datetime.strptime(Startdate,'%Y-%m-%d').timetuple().tm_yday
hour = int(Total_Time_VIIRS[0:2])
minutes = int(Total_Time_VIIRS[2:4])
# If this is runned correctly, we can determine transmissivity
ws = wb['Meteo_Input']
Field_Radiation_24 = '%s' %str(ws['J%d' % number].value)
Field_Trans_24 = '%s' %str(ws['K%d' % number].value)
Determine_transmissivity = 1
# else use PROBA-V day but than no transmissivity can be determined for now
else:
# Get the day and time from the PROBA-V
Band_PROBAVhdf_fileName = os.path.join(input_folder, '%s.HDF5' % (Name_PROBAV_Image))
g=gdal.Open(Band_PROBAVhdf_fileName, gdal.GA_ReadOnly)
Meta_data = g.GetMetadata()
Date_PROBAV = str(Meta_data['LEVEL3_RADIOMETRY_BLUE_OBSERVATION_START_DATE'])
year = int(Date_PROBAV.split("-")[0])
month = int(Date_PROBAV.split("-")[1])
day = int(Date_PROBAV.split("-")[2])
Var_name = '%d%02d%02d' %(year, month, day)
DOY=datetime.strptime(Var_name,'%Y%m%d').timetuple().tm_yday
# We cannot determine transmissivity
Determine_transmissivity = 0
# Determine the transmissivity if possible (Determine_transmissivity = 1)
if Determine_transmissivity == 1:
# Rounded difference of the local time from Greenwich (GMT) (hours):
delta_GTM = round(np.sign(lon[int(np.shape(lon)[0]/2), int(np.shape(lon)[1]/2)]) * lon[int(np.shape(lon)[0]/2), int(np.shape(lon)[1]/2)] * 24 / 360)
if np.isnan(delta_GTM) == True:
delta_GTM = round(np.nanmean(lon) * np.nanmean(lon) * 24 / 360)
# Calculate local time
hour += delta_GTM
if hour < 0.0:
day -= 1
hour += 24
if hour >= 24:
day += 1
hour -= 24
# define the kind of sensor and resolution of the sensor
sensor1 = 'PROBAV'
sensor2 = 'VIIRS'
res1 = '375m'
res2 = '%sm' %int(pixel_spacing)
res3 = '30m'
######################## Extract general data from DEM file and create Slope map ##########################################
# Variable date name
Var_name = '%d%02d%02d' %(year, month, day)
# Reproject from Geog Coord Syst to UTM -
# 1) DEM - Original DEM coordinates is Geographic: lat, lon
dest, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = SEBAL.reproject_dataset(
DEM_fileName, pixel_spacing, UTM_Zone=UTM_Zone)
band = dest.GetRasterBand(1) # Get the reprojected dem band
ncol = dest.RasterXSize # Get the reprojected dem column size
nrow = dest.RasterYSize # Get the reprojected dem row size
shape=[ncol, nrow]
# Read out the DEM band and print the DEM properties
data_DEM = band.ReadAsArray(0, 0, ncol, nrow)
# 2) Latitude file - reprojection
# reproject latitude to the landsat projection and save as tiff file
lat_rep, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = SEBAL.reproject_dataset(
lat_fileName, pixel_spacing, UTM_Zone=UTM_Zone)
# Get the reprojected latitude data
lat_proy = lat_rep.GetRasterBand(1).ReadAsArray(0, 0, ncol, nrow)
# 3) Longitude file - reprojection
# reproject longitude to the landsat projection and save as tiff file
lon_rep, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = SEBAL.reproject_dataset(lon_fileName, pixel_spacing, UTM_Zone=UTM_Zone)
# Get the reprojected longitude data
lon_proy = lon_rep.GetRasterBand(1).ReadAsArray(0, 0, ncol, nrow)
lon_fileName = os.path.join(temp_folder_PreSEBAL,'lon_resh.tif')
SEBAL.save_GeoTiff_proy(dest, lon_proy, lon_fileName, shape, nband=1)
# Calculate slope and aspect from the reprojected DEM
deg2rad,rad2deg,slope,aspect=SEBAL.Calc_Gradient(data_DEM, pixel_spacing)
if Determine_transmissivity == 1:
# calculate the coz zenith angle
Ra_mountain_24, Ra_inst, cos_zn_resh, dr, phi, delta = SEBAL.Calc_Ra_Mountain(lon,DOY,hour,minutes,lon_proy,lat_proy,slope,aspect)
cos_zn_fileName = os.path.join(temp_folder_PreSEBAL,'cos_zn.tif')
SEBAL.save_GeoTiff_proy(dest, cos_zn_resh, cos_zn_fileName, shape, nband=1)
# Save the Ra
Ra_inst_fileName = os.path.join(temp_folder_PreSEBAL,'Ra_inst.tif')
SEBAL.save_GeoTiff_proy(dest, Ra_inst, Ra_inst_fileName, shape, nband=1)
Ra_mountain_24_fileName = os.path.join(temp_folder_PreSEBAL,'Ra_mountain_24.tif')
SEBAL.save_GeoTiff_proy(dest, Ra_mountain_24, Ra_mountain_24_fileName, shape, nband=1)
#################### Calculate Transmissivity ##########################################
# Open the General_Input sheet
ws = wb['Meteo_Input']
# Extract the method radiation value
Value_Method_Radiation_inst = '%s' %str(ws['L%d' % number].value)
# Values to check if data is created
Check_Trans_inst = 0
Check_Trans_24 = 0
''' This is now turned of, so you need to fill in the instantanious transmissivity or Radiation
# Extract the data to the method of radiation
if int(Value_Method_Radiation_inst) == 2:
Field_Radiation_inst = '%s' %str(ws['N%d' % number].value)
if Field_Radiation_inst == 'None':
# Instantanious Transmissivity files must be created
Check_Trans_inst = 1
# Calculate Transmissivity
quarters_hours = np.ceil(minutes/30.) * 30
hours_GMT = hour - delta_GTM
if quarters_hours >= 60:
hours_GMT += 1
quarters_hours = 0
# Define the instantanious LANDSAF file
name_Landsaf_inst = 'HDF5_LSASAF_MSG_DSSF_MSG-Disk_%d%02d%02d%02d%02d.tif' %(year, month,day, hours_GMT, quarters_hours)
file_Landsaf_inst = os.path.join(DSSF_Folder,name_Landsaf_inst)
# Reproject the Ra_inst data to match the LANDSAF data
Ra_inst_3Km_dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Ra_inst_fileName, file_Landsaf_inst, method = 1)
Ra_inst_3Km = Ra_inst_3Km_dest.GetRasterBand(1).ReadAsArray()
Ra_inst_3Km[Ra_inst_3Km==0] = np.nan
# Open the Rs LANDSAF data
dest_Rs_inst_3Km = gdal.Open(file_Landsaf_inst)
Rs_inst_3Km = dest_Rs_inst_3Km.GetRasterBand(1).ReadAsArray()
Rs_inst_3Km = np.float_(Rs_inst_3Km)/10
Rs_inst_3Km[Rs_inst_3Km<0]=np.nan
# Get shape LANDSAF data
shape_trans=[dest_Rs_inst_3Km.RasterXSize , dest_Rs_inst_3Km.RasterYSize ]
# Calculate Transmissivity 3Km
Transmissivity_3Km = Rs_inst_3Km/Ra_inst_3Km
Transmissivity_3Km_fileName = os.path.join(output_folder_temp,'Transmissivity_3Km.tif')
SEBAL.save_GeoTiff_proy(Ra_inst_3Km_dest, Transmissivity_3Km, Transmissivity_3Km_fileName, shape_trans, nband=1)
# Reproject Transmissivity to match DEM (now this is done by using the nearest neighbour method)
Transmissivity_inst_dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Transmissivity_3Km_fileName, cos_zn_fileName, method = 3)
Transmissivity_inst = Transmissivity_inst_dest.GetRasterBand(1).ReadAsArray()
Transmissivity_inst[Transmissivity_inst>0.98] = 0.98
Transmissivity_inst_fileName = os.path.join(TRANS_outfolder,'Transmissivity_inst_%s.tif' %Var_name)
SEBAL.save_GeoTiff_proy(Transmissivity_inst_dest, Transmissivity_inst, Transmissivity_inst_fileName, shape, nband=1)
'''
# Extract the method radiation value
Value_Method_Radiation_24 = '%s' %str(ws['I%d' % number].value)
# Extract the data to the method of radiation
if int(Value_Method_Radiation_24) == 2:
Field_Radiation_24 = '%s' %str(ws['K%d' % number].value)
if Field_Radiation_24 == 'None':
# Daily Transmissivity files must be created
Check_Trans_24 = 1
# Create times that are needed to calculate daily Rs (LANDSAF)
Starttime_GMT = datetime.strptime(Startdate,'%Y-%m-%d') + timedelta(hours=-delta_GTM)
Endtime_GMT = Starttime_GMT + timedelta(days=1)
Times = pd.date_range(Starttime_GMT, Endtime_GMT,freq = '30min')
for Time in Times[:-1]:
year_LANDSAF = Time.year
month_LANDSAF = Time.month
day_LANDSAF = Time.day
hour_LANDSAF = Time.hour
min_LANDSAF = Time.minute
# Define the instantanious LANDSAF file
#re = glob.glob('')
name_Landsaf_inst = 'HDF5_LSASAF_MSG_DSSF_MSG-Disk_%d%02d%02d%02d%02d.tif' %(year_LANDSAF, month_LANDSAF,day_LANDSAF, hour_LANDSAF, min_LANDSAF)
file_Landsaf_inst = os.path.join(DSSF_Folder,name_Landsaf_inst)
# Open the Rs LANDSAF data
dest_Rs_inst_3Km = gdal.Open(file_Landsaf_inst)
Rs_one_3Km = dest_Rs_inst_3Km.GetRasterBand(1).ReadAsArray()
Rs_one_3Km = np.float_(Rs_one_3Km)/10
Rs_one_3Km[Rs_one_3Km < 0]=np.nan
if Time == Times[0]:
Rs_24_3Km_tot = Rs_one_3Km
else:
Rs_24_3Km_tot += Rs_one_3Km
Rs_24_3Km = Rs_24_3Km_tot / len(Times[:-1])
# Reproject the Ra_inst data to match the LANDSAF data
Ra_24_3Km_dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Ra_mountain_24_fileName, file_Landsaf_inst, method = 3)
Ra_24_3Km = Ra_24_3Km_dest.GetRasterBand(1).ReadAsArray()
Ra_24_3Km[Ra_24_3Km==0] = np.nan
# Do gapfilling
Ra_24_3Km = gap_filling(Ra_24_3Km,np.nan)
# Get shape LANDSAF data
shape_trans=[dest_Rs_inst_3Km.RasterXSize , dest_Rs_inst_3Km.RasterYSize ]
# Calculate Transmissivity 3Km
Transmissivity_24_3Km = Rs_24_3Km/Ra_24_3Km
Transmissivity_24_3Km_fileName = os.path.join(temp_folder_PreSEBAL,'Transmissivity_24_3Km.tif')
SEBAL.save_GeoTiff_proy(Ra_24_3Km_dest, Transmissivity_24_3Km, Transmissivity_24_3Km_fileName, shape_trans, nband=1)
# Reproject Transmissivity to match DEM (now this is done by using the nearest neighbour method)
Transmissivity_24_dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Transmissivity_24_3Km_fileName, lon_fileName, method = 3)
Transmissivity_24 = Transmissivity_24_dest.GetRasterBand(1).ReadAsArray()
Transmissivity_24[Transmissivity_24>0.98] = 0.98
Transmissivity_24_fileName = os.path.join(TRANS_outfolder,'Transmissivity_24_%s.tif' %Var_name)
SEBAL.save_GeoTiff_proy(Transmissivity_24_dest, Transmissivity_24, Transmissivity_24_fileName, shape, nband=1)
#################### Calculate NDVI for LANDSAT ##########################################
if Image_Type == 1:
# Define bands used for each Landsat number
if Landsat_nr == 5 or Landsat_nr == 7:
Bands = np.array([1, 2, 3, 4, 5, 7, 6])
elif Landsat_nr == 8:
Bands = np.array([2, 3, 4, 5, 6, 7, 10, 11])
else:
print 'Landsat image not supported, use Landsat 7 or 8'
# Open MTL landsat and get the correction parameters
Landsat_meta_fileName = os.path.join(input_folder, '%s_MTL.txt' % Name_Landsat_Image)
Lmin, Lmax, k1_c, k2_c = SEBAL.info_band_metadata(Landsat_meta_fileName, Bands)
# Mean solar exo-atmospheric irradiance for each band (W/m2/microm)
# for the different Landsat images (L5, L7, or L8)
ESUN_L5 = np.array([1983, 1796, 1536, 1031, 220, 83.44])
ESUN_L7 = np.array([1997, 1812, 1533, 1039, 230.8, 84.9])
ESUN_L8 = np.array([1973.28, 1842.68, 1565.17, 963.69, 245, 82.106])
# Open one band - To get the metadata of the landsat images only once (to get the extend)
src_FileName = os.path.join(input_folder, '%s_B2.TIF' % Name_Landsat_Image) # before 10!
ls,band_data,ulx,uly,lrx,lry,x_size_ls,y_size_ls = SEBAL.Get_Extend_Landsat(src_FileName)
# Crop the Landsat images to the DEM extent -
dst_FileName = os.path.join(temp_folder_PreSEBAL,'cropped_LS_b2.tif') # Before 10 !!
# Clip the landsat image to match the DEM map
lsc, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(src_FileName, lon_fileName)
data_LS = lsc.GetRasterBand(1).ReadAsArray()
SEBAL.save_GeoTiff_proy(dest, data_LS, dst_FileName, shape, nband=1)
# Get the extend of the remaining landsat file after clipping based on the DEM file
lsc,band_data,ulx,uly,lrx,lry,x_size_lsc,y_size_lsc = SEBAL.Get_Extend_Landsat(dst_FileName)
# Create the corrected signals of Landsat in 1 array
Reflect = SEBAL.Landsat_Reflect(Bands,input_folder,Name_Landsat_Image,output_folder,shape,Lmax,Lmin,ESUN_L5,ESUN_L7,ESUN_L8,cos_zn_resh,dr,Landsat_nr, cos_zn_fileName)
# Calculate temporal water mask
water_mask_temp=SEBAL.Water_Mask(shape,Reflect)
# Calculate NDVI
NDVI = SEBAL.Calc_NDVI(Reflect)
# Calculate albedo
albedo = SEBAL.Calc_albedo(Reflect)
# Save NDVI
NDVI_FileName = os.path.join(NDVI_outfolder,'NDVI_LS_%s.tif'%Var_name)
SEBAL.save_GeoTiff_proy(dest, NDVI, NDVI_FileName, shape, nband=1)
# Save albedo
albedo_FileName = os.path.join(Albedo_outfolder,'Albedo_LS_%s.tif'%Var_name)
SEBAL.save_GeoTiff_proy(dest, albedo, albedo_FileName, shape, nband=1)
################### Extract Meteo data for Landsat days from SEBAL Excel ##################
# Open the Meteo_Input sheet
ws = wb['Meteo_Input']
# ---------------------------- Instantaneous Air Temperature ------------
# Open meteo data, first try to open as value, otherwise as string (path)
try:
Temp_inst = float(ws['B%d' %number].value) # Instantaneous Air Temperature (°C)
# if the data is not a value, than open as a string
except:
Temp_inst_name = '%s' %str(ws['B%d' %number].value)
Temp_inst_fileName = os.path.join(output_folder, 'Temp', 'Temp_inst_input.tif')
Temp_inst = SEBAL.Reshape_Reproject_Input_data(Temp_inst_name, Temp_inst_fileName, lon_fileName)
try:
RH_inst = float(ws['D%d' %number].value) # Instantaneous Relative humidity (%)
# if the data is not a value, than open as a string
except:
RH_inst_name = '%s' %str(ws['D%d' %number].value)
RH_inst_fileName = os.path.join(output_folder, 'Temp', 'RH_inst_input.tif')
RH_inst = SEBAL.Reshape_Reproject_Input_data(RH_inst_name, RH_inst_fileName, lon_fileName)
esat_inst = 0.6108 * np.exp(17.27 * Temp_inst / (Temp_inst + 237.3))
eact_inst = RH_inst * esat_inst / 100
#################### Calculate NDVI for VIIRS-PROBAV ##########################################
if Image_Type == 2:
if Name_PROBAV_Image == 'None':
offset_all = [-1, 1, -2, 2, -3, 3,-4, 4,-5 ,5 ,-6 , 6, -7, 7, -8, 8]
found_Name_PROBAV_Image = 0
for offset in offset_all:
if found_Name_PROBAV_Image == 1:
continue
else:
try:
Name_PROBAV_Image = SEBAL_RUNS[number + offset]['PROBA_V_name']
if not Name_PROBAV_Image == 'None':
found_Name_PROBAV_Image = 1
except:
pass
# Get the day and time from the PROBA-V
Band_PROBAVhdf_fileName = os.path.join(input_folder, '%s.HDF5' % (Name_PROBAV_Image))
g=gdal.Open(Band_PROBAVhdf_fileName, gdal.GA_ReadOnly)
Meta_data = g.GetMetadata()
Date_PROBAV = str(Meta_data['LEVEL3_RADIOMETRY_BLUE_OBSERVATION_START_DATE'])
year = int(Date_PROBAV.split("-")[0])
month = int(Date_PROBAV.split("-")[1])
day = int(Date_PROBAV.split("-")[2])
Var_name_2 = '%d%02d%02d' %(year, month, day)
# Define the output name
NDVI_FileName = os.path.join(NDVI_outfolder,'NDVI_PROBAV_%s.tif' %Var_name_2)
Albedo_FileName = os.path.join(Albedo_outfolder, 'Albedo_PROBAV_%s.tif' %Var_name_2)
water_mask_temp_FileName = os.path.join(WaterMask_outfolder, 'Water_Mask_PROBAV_%s.tif' %Var_name_2)
else:
NDVI_FileName = os.path.join(NDVI_outfolder,'NDVI_PROBAV_%s.tif' %Var_name)
Albedo_FileName = os.path.join(Albedo_outfolder, 'Albedo_PROBAV_%s.tif' %Var_name)
water_mask_temp_FileName = os.path.join(WaterMask_outfolder, 'Water_Mask_PROBAV_%s.tif' %Var_name)
# vegetation maps that will be generated
if not os.path.exists(NDVI_FileName):
# Define the bands that will be used
bands=['SM', 'B1', 'B2', 'B3', 'B4'] #'SM', 'BLUE', 'RED', 'NIR', 'SWIR'
# Set the index number at 0
index=0
# create a zero array with the shape of the reprojected DEM file
data_PROBAV=np.zeros((shape[1], shape[0]))
spectral_reflectance_PROBAV=np.zeros([shape[1], shape[0], 5])
# constants
n188_float=248 # Now it is 248, but we do not exactly know what this really means and if this is for constant for all images.
# write the data one by one to the spectral_reflectance_PROBAV
for bandnmr in bands:
# Translate the PROBA-V names to the Landsat band names
Band_number = {'SM':7,'B1':8,'B2':10,'B3':9,'B4':11}
# Open the dataset
Band_PROBAVhdf_fileName = os.path.join(input_folder, '%s.HDF5' % (Name_PROBAV_Image))
g=gdal.Open(Band_PROBAVhdf_fileName, gdal.GA_ReadOnly)
# define data if it is not there yet
if not 'Var_name' in locals():
Meta_data = g.GetMetadata()
Date_PROBAV = str(Meta_data['LEVEL3_RADIOMETRY_BLUE_OBSERVATION_START_DATE'])
year = int(Date_PROBAV.split("-")[0])
month = int(Date_PROBAV.split("-")[0])
day = int(Date_PROBAV.split("-")[0])
Var_name = '%d%02d%02d' %(year, month, day)
# Open the .hdf file
name_out = os.path.join(input_folder, '%s_test.tif' % (Name_PROBAV_Image))
name_in = g.GetSubDatasets()[Band_number[bandnmr]][0]
# Get environmental variable
SEBAL_env_paths = os.environ["SEBAL"].split(';')
GDAL_env_path = SEBAL_env_paths[0]
GDAL_TRANSLATE = os.path.join(GDAL_env_path, 'gdal_translate.exe')
# run gdal translate command
FullCmd = '%s -of GTiff %s %s' %(GDAL_TRANSLATE, name_in, name_out)
SEBAL.Run_command_window(FullCmd)
# Open data
dest_PV = gdal.Open(name_out)
Data = dest_PV.GetRasterBand(1).ReadAsArray()
dest_PV = None
# Remove temporary file
os.remove(name_out)
# Define the x and y spacing
Meta_data = g.GetMetadata()
Lat_Bottom = float(Meta_data['LEVEL3_GEOMETRY_BOTTOM_LEFT_LATITUDE'])
Lat_Top = float(Meta_data['LEVEL3_GEOMETRY_TOP_RIGHT_LATITUDE'])
Lon_Left = float(Meta_data['LEVEL3_GEOMETRY_BOTTOM_LEFT_LONGITUDE'])
Lon_Right = float(Meta_data['LEVEL3_GEOMETRY_TOP_RIGHT_LONGITUDE'])
Pixel_size = float((Meta_data['LEVEL3_GEOMETRY_VNIR_VAA_MAPPING']).split(' ')[-3])
# Define the georeference of the PROBA-V data
geo_PROBAV=[Lon_Left-0.5*Pixel_size, Pixel_size, 0, Lat_Top+0.5*Pixel_size, 0, -Pixel_size] #0.000992063492063
# Define the name of the output file
PROBAV_data_name=os.path.join(input_folder, '%s_%s.tif' % (Name_PROBAV_Image,bandnmr))
dst_fileName=os.path.join(input_folder, PROBAV_data_name)
# create gtiff output with the PROBA-V band
fmt = 'GTiff'
driver = gdal.GetDriverByName(fmt)
dst_dataset = driver.Create(dst_fileName, int(Data.shape[1]), int(Data.shape[0]), 1,gdal.GDT_Float32)
dst_dataset.SetGeoTransform(geo_PROBAV)
# set the reference info
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS("WGS84")
dst_dataset.SetProjection(srs.ExportToWkt())
# write the array in the geotiff band
dst_dataset.GetRasterBand(1).WriteArray(Data)
dst_dataset = None
# Open the PROBA-V band in SEBAL
g=gdal.Open(PROBAV_data_name.replace("\\","/"))
# If the data cannot be opened, change the extension
if g is None:
PROBAV_data_name=os.path.join(input_folder, '%s_%s.tiff' % (Name_PROBAV_Image,bandnmr))
# Reproject the PROBA-V band to match DEM's resolution
PROBAV, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(
PROBAV_data_name, lon_fileName)
# Open the reprojected PROBA-V band data
data_PROBAV_DN = PROBAV.GetRasterBand(1).ReadAsArray(0, 0, ncol, nrow)
# Define the filename to store the cropped Landsat image
dst_FileName = os.path.join(output_folder, 'Output_PROBAV','proy_PROBAV_%s.tif' % bandnmr)
# close the PROBA-V
g=None
# If the band data is not SM change the DN values into PROBA-V values and write into the spectral_reflectance_PROBAV
if bandnmr is not 'SM':
data_PROBAV[:, :]=data_PROBAV_DN/2000
spectral_reflectance_PROBAV[:, :, index]=data_PROBAV[:, :]
# If the band data is the SM band than write the data into the spectral_reflectance_PROBAV and create cloud mask
else:
data_PROBAV[:, :]=data_PROBAV_DN
Cloud_Mask_PROBAV=np.zeros((shape[1], shape[0]))
Cloud_Mask_PROBAV[data_PROBAV[:,:]!=n188_float]=1
spectral_reflectance_PROBAV[:, :, index]=Cloud_Mask_PROBAV
# Change the spectral reflectance to meet certain limits
spectral_reflectance_PROBAV[:, :, index]=np.where(spectral_reflectance_PROBAV[:, :, index]<=0,np.nan,spectral_reflectance_PROBAV[:, :, index])
spectral_reflectance_PROBAV[:, :, index]=np.where(spectral_reflectance_PROBAV[:, :, index]>=150,np.nan,spectral_reflectance_PROBAV[:, :, index])
# Go to the next index
index=index+1
# Bands in PROBAV spectral reflectance
# 0 = MS
# 1 = BLUE
# 2 = NIR
# 3 = RED
# 4 = SWIR
# Calculate surface albedo based on PROBA-V
Surface_Albedo_PROBAV = 0.219 * spectral_reflectance_PROBAV[:, :, 1] + 0.361 * spectral_reflectance_PROBAV[:, :, 2] + 0.379 * spectral_reflectance_PROBAV[:, :, 3] + 0.041 * spectral_reflectance_PROBAV[:, :, 4]
# Create Water mask based on PROBA-V
water_mask_temp = np.zeros((shape[1], shape[0]))
water_mask_temp[np.logical_and(spectral_reflectance_PROBAV[:, :, 2] >= spectral_reflectance_PROBAV[:, :, 3],data_DEM>0)]=1
# Calculate the NDVI based on PROBA-V
n218_memory = spectral_reflectance_PROBAV[:, :, 2] + spectral_reflectance_PROBAV[:, :, 3]
NDVI = np.zeros((shape[1], shape[0]))
NDVI[n218_memory != 0] = ( spectral_reflectance_PROBAV[:, :, 3][n218_memory != 0] - spectral_reflectance_PROBAV[:, :, 2][n218_memory != 0] )/ ( spectral_reflectance_PROBAV[:, :, 2][n218_memory != 0] + spectral_reflectance_PROBAV[:, :, 3][n218_memory != 0] )
# Save Albedo for PROBA-V
SEBAL.save_GeoTiff_proy(dest, Surface_Albedo_PROBAV, Albedo_FileName, shape, nband=1)
# Save NDVI for PROBA-V
SEBAL.save_GeoTiff_proy(dest, NDVI, NDVI_FileName, shape, nband=1)
# Save Water Mask for PROBA-V
SEBAL.save_GeoTiff_proy(dest, water_mask_temp, water_mask_temp_FileName, shape, nband=1)
else:
dest_NDVI = gdal.Open(NDVI_FileName)
dest_water_mask_temp = gdal.Open(water_mask_temp_FileName)
NDVI = dest_NDVI.GetRasterBand(1).ReadAsArray()
water_mask_temp = dest_water_mask_temp.GetRasterBand(1).ReadAsArray()
############################ Calculate LAI ##########################################
# Calculate the LAI
FPAR,tir_emis,Nitrogen,vegt_cover,LAI,b10_emissivity = SEBAL.Calc_vegt_para(NDVI,water_mask_temp,shape)
# Create LAI name
if Image_Type == 1:
LAI_FileName = os.path.join(LAI_outfolder,'LAI_LS_%s.tif' %Var_name)
SEBAL.save_GeoTiff_proy(dest, LAI, LAI_FileName, shape, nband=1)
#################### Calculate thermal for Landsat ##########################################
if Image_Type == 1:
# Calculate thermal
therm_data = SEBAL.Landsat_therm_data(Bands,input_folder,Name_Landsat_Image,output_folder,ulx_dem,lry_dem,lrx_dem,uly_dem,shape)
# Calculate surface temperature
Surface_temp=SEBAL.Calc_surface_water_temp(Temp_inst,Landsat_nr,Lmax,Lmin,therm_data,b10_emissivity,k1_c,k2_c,eact_inst,shape,water_mask_temp,Bands_thermal,Rp,tau_sky,surf_temp_offset,Image_Type)
# Save surface temperature
therm_data_FileName = os.path.join(Surface_Temperature_outfolder,'Surface_Temperature_LS_%s.tif' %Var_name)
SEBAL.save_GeoTiff_proy(dest, Surface_temp, therm_data_FileName, shape, nband=1)
################################## Calculate VIIRS surface temperature ########################
if Image_Type == 2:
# If there is VIIRS data
if not Name_VIIRS_Image_TB == 'None':
# Define the VIIRS thermal data name
VIIRS_data_name=os.path.join(input_folder, '%s' % (Name_VIIRS_Image_TB))
# Reproject VIIRS thermal data
VIIRS, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(VIIRS_data_name, lon_fileName)
# Open VIIRS thermal data
data_VIIRS = VIIRS.GetRasterBand(1).ReadAsArray()
# Define the thermal VIIRS output name
proyVIIRS_fileName = os.path.join(temp_folder_PreSEBAL, 'Surface_Temp_VIIRS_%s.tif' %Var_name)
# Save the thermal VIIRS data
SEBAL.save_GeoTiff_proy(dest, data_VIIRS, proyVIIRS_fileName, shape, nband=1)
# Set the conditions for the brightness temperature (100m)
brightness_temp=np.where(data_VIIRS>=250, data_VIIRS, np.nan)
# Constants
k1=606.399172
k2=1258.78
L_lambda_b10_100=((2*6.63e-34*(3.0e8)**2)/((11.45e-6)**5*(np.exp((6.63e-34*3e8)/(1.38e-23*(11.45e-6)*brightness_temp))-1)))*1e-6
# Get Temperature for 100 and 375m resolution
Temp_TOA_100 = SEBAL.Get_Thermal(L_lambda_b10_100,Rp,Temp_inst,tau_sky,tir_emis,k1,k2)
# Conditions for surface temperature (100m)
n120_surface_temp=Temp_TOA_100.clip(250, 450)
# Save the surface temperature of the VIIRS in 100m resolution
temp_surface_100_fileName_beforeTS = os.path.join(Surface_Temperature_outfolder,'Surface_Temperature_VIIRS_%s.tif' %Var_name)
SEBAL.save_GeoTiff_proy(dest, n120_surface_temp, temp_surface_100_fileName_beforeTS, shape, nband=1)
###################################################################################################################
################################################### HANTS part 4 ##################################################
###################################################################################################################
# Select files for PROBA-V that needs to be used (sometimes a composite product is used)
PROBA_V_Dict = {}
for k, v in SEBAL_RUNS.iteritems():
if str(v['PROBA_V_name']) != 'None':
PROBA_V_Dict.setdefault(v['PROBA_V_name'], []).append(k)
Amount_Unique_PROBA_V_images = len(PROBA_V_Dict.keys())
Back_names = []
# Define HANTS PROBA-V variables
VARS = ["NDVI", "Albedo"]
for VAR in VARS:
output_folder_preprocessing_VAR = os.path.join(output_folder_PreSEBAL_SEBAL, VAR)
os.chdir(output_folder_preprocessing_VAR)
for PROBA_V_image in PROBA_V_Dict.keys():
Band_PROBAVhdf_fileName = os.path.join(input_folder_SEBAL, '%s.HDF5' % (PROBA_V_image))
g=gdal.Open(Band_PROBAVhdf_fileName, gdal.GA_ReadOnly)
Meta_data = g.GetMetadata()
Date_PROBAV = str(Meta_data['LEVEL3_RADIOMETRY_BLUE_OBSERVATION_START_DATE'])
year = int(Date_PROBAV.split("-")[0])
month = int(Date_PROBAV.split("-")[1])
day = int(Date_PROBAV.split("-")[2])
Back_name = '%s_PROBAV_%d%02d%02d.tif' %(VAR, year, month, day)
# Create HANTS input NDVI
input_folder_HANTS_VAR = os.path.join(temp_folder_PreSEBAL, VAR)
if not os.path.exists(input_folder_HANTS_VAR):
os.mkdir(input_folder_HANTS_VAR)
shutil.copy(os.path.join(output_folder_preprocessing_VAR,Back_name),os.path.join(input_folder_HANTS_VAR,Back_name))
# VIIRS parameter copy
VIIRS_Dict = {}
for k, v in SEBAL_RUNS.iteritems():
if str(v['VIIRS_name']) != 'None':
VIIRS_Dict.setdefault(v['VIIRS_name'], []).append(k)
THERM = 'Surface_Temperature'
output_folder_preprocessing_THERM = os.path.join(output_folder_PreSEBAL_SEBAL, THERM)
for VIIRS_image in VIIRS_Dict.keys():
try:
Date_VIIRS = (VIIRS_image.split("d")[1])
year = int(Date_VIIRS.split("-")[0][0:4])
month = int(Date_VIIRS.split("-")[0][4:6])
day = int(Date_VIIRS.split("-")[0][6:8])
except:
Date_VIIRS = (VIIRS_image.split("_")[3])
year = int(Date_VIIRS.split("-")[0][0:4])
month = int(Date_VIIRS.split("-")[0][4:6])
day = int(Date_VIIRS.split("-")[0][6:8])
Back_name_TB = '%s_VIIRS_%d%02d%02d.tif' %(THERM, year, month, day)
# Create HANTS input NDVI
input_folder_HANTS_THERM = os.path.join(temp_folder_PreSEBAL, THERM)
if not os.path.exists(input_folder_HANTS_THERM):
os.mkdir(input_folder_HANTS_THERM)
shutil.copy(os.path.join(output_folder_preprocessing_THERM,Back_name_TB),os.path.join(input_folder_HANTS_THERM,Back_name_TB))
############################################ Solve shift in PROBA=V ##############################################
VAR = 'Albedo'
os.chdir(os.path.join(temp_folder_PreSEBAL, VAR))
re = glob.glob('%s*.tif' %(VAR))
i = 0
while i < int(len(re)-1):
filename1 = re[0] # maak hier misschien later van dat alleen 0 word genomen als de hoeveelheid pixels minder dan 40% van totaal is
filename2 = re[i + 1]
dest1 = gdal.Open(filename1)
dest2 = gdal.Open(filename2)
Array1 = dest1.GetRasterBand(1).ReadAsArray().flatten()
Array2 = dest2.GetRasterBand(1).ReadAsArray().flatten()
Array3 = dest1.GetRasterBand(1).ReadAsArray()[1:,:].flatten()
Array4 = dest2.GetRasterBand(1).ReadAsArray()[:-1,:].flatten()
Array1_flat = Array1[np.logical_and(~np.isnan(Array1),~np.isnan(Array2))]
Array2_flat = Array2[np.logical_and(~np.isnan(Array1),~np.isnan(Array2))]
Array3_flat = Array3[np.logical_and(~np.isnan(Array3),~np.isnan(Array4))]
Array4_flat = Array4[np.logical_and(~np.isnan(Array3),~np.isnan(Array4))]
Corr = np.corrcoef(Array1_flat,Array2_flat)[0,1]
Corr2 = np.corrcoef(Array3_flat,Array4_flat)[0,1]
if Corr2 > Corr:
x,y = dest1.GetRasterBand(1).ReadAsArray().shape
for VAR_check in VARS:
os.chdir(os.path.join(temp_folder_PreSEBAL, VAR_check))
endname = filename2.split('_')[-1]
re_vars = glob.glob('%s*_%s' %(VAR_check,endname))
filename3 = re_vars[0]
dest3 = gdal.Open(filename3)
New_Array = np.ones(dest1.GetRasterBand(1).ReadAsArray().shape) * np.nan
New_Array[1:,:] = dest3.GetRasterBand(1).ReadAsArray()[:-1,:]
filename_out = os.path.join(temp_folder_PreSEBAL, VAR_check, filename3)
SEBAL.save_GeoTiff_proy(dest3, New_Array, filename_out, [int(y),int(x)], nband=1)
i += 1
################################################### General HANTS ###############################################
# Open one image
PROBA_V_IMAGE = os.path.join(input_folder_HANTS_VAR,Back_name)
destPROBAV = gdal.Open(PROBA_V_IMAGE)
VIIRS_IMAGE = os.path.join(input_folder_HANTS_THERM,Back_name_TB)
destVIIRS = gdal.Open(VIIRS_IMAGE)
# Get Geotransform
Geo_PROBAV = destPROBAV.GetGeoTransform()
x_size_PROBAV = destPROBAV.RasterXSize
y_size_PROBAV = destPROBAV.RasterYSize
Geo_VIIRS = destVIIRS.GetGeoTransform()
x_size_VIIRS = destVIIRS.RasterXSize
y_size_VIIRS = destVIIRS.RasterYSize
# Get projection
proj = Get_epsg(destPROBAV)
projVIIRS = Get_epsg(destVIIRS)
# Data parameters
latlim = [Geo_PROBAV[3] + y_size_PROBAV * Geo_PROBAV[5],Geo_PROBAV[3]]
lonlim = [Geo_PROBAV[0], Geo_PROBAV[0] + x_size_PROBAV * Geo_PROBAV[1]]
cellsize = Geo_PROBAV[1]
latlimVIIRS = [Geo_VIIRS [3] + y_size_VIIRS * Geo_VIIRS [5],Geo_VIIRS [3]]
lonlimVIIRS = [Geo_VIIRS [0], Geo_VIIRS [0] + x_size_VIIRS * Geo_VIIRS [1]]
cellsizeVIIRS = Geo_VIIRS [1]
# Get the HANTS parameters
ws_para = wb_veg['HANTS_Input']
# amount of images
Dates = pd.date_range(start_date, end_date, freq = 'D')
###################################################### HANTS Thermal ###############################################
# Define parameters for the NDVI
THERM = 'Surface_Temperature'
# Define paths for NDVI
input_folder_HANTS_THERM = os.path.join(temp_folder_PreSEBAL, THERM)
name_format = '%s_VIIRS_{0}.tif' %THERM
nc_path_TB = os.path.join(input_folder_HANTS_THERM,'%s_NC.nc' %THERM)
# Create Output folder
rasters_path_out = os.path.join(temp_folder_PreSEBAL, THERM + "_HANTS")
if not os.path.exists(rasters_path_out):
os.mkdir(rasters_path_out)
# HANTS parameters for NDVI
nb = int(len(Dates))
Dates = pd.date_range(start_date, end_date, freq = 'D')
nf = int(ws_para['D2'].value) # number of frequencies to be considered above the zero frequency
low = float(ws_para['D3'].value) # valid range minimum
high = float(ws_para['D4'].value) # valid range maximum
HiLo = str(ws_para['D5'].value) # 2-character string indicating rejection of high or low outliers
fet = float(ws_para['D6'].value) # fit error tolerance (point eviating more than fet from curve fit are rejected)
delta = float(ws_para['D7'].value) # small positive number e.g. 0.1 to supress high amplitudes
dod = float(ws_para['D8'].value) # degree of overdeterminedness (iteration stops if number of points reaches the minimum required for curve fitting, plus dod). This is a safety measure
from hants import wa_gdal
# Run
wa_gdal.run_HANTS(input_folder_HANTS_THERM, name_format,
start_date, end_date, latlimVIIRS, lonlimVIIRS, cellsizeVIIRS, nc_path_TB,
nb, nf, HiLo, low, high, fet, dod, delta,
projVIIRS, -9999.0, rasters_path_out, export_hants_only=True)
###################################################### HANTS NDVI ###############################################
# Define parameters for the NDVI
VAR = 'NDVI'
# Define paths for NDVI
input_folder_HANTS_VAR = os.path.join(temp_folder_PreSEBAL, VAR)
name_format = '%s_PROBAV_{0}.tif' %VAR
nc_path_ndvi = os.path.join(input_folder_HANTS_VAR,'%s_NC.nc' %VAR)
# Create Output folder
rasters_path_out = os.path.join(temp_folder_PreSEBAL, VAR + "_HANTS")
if not os.path.exists(rasters_path_out):
os.mkdir(rasters_path_out)
# HANTS parameters for NDVI # Dates = pd.date_range(start_date, end_date, freq = '5D')
nb = int(len(Dates)) # nr of images
nf = int(ws_para['C2'].value) # number of frequencies to be considered above the zero frequency
low = float(ws_para['C3'].value) # valid range minimum
high = float(ws_para['C4'].value) # valid range maximum
HiLo = str(ws_para['C5'].value) # 2-character string indicating rejection of high or low outliers
fet = float(ws_para['C6'].value) # fit error tolerance (point eviating more than fet from curve fit are rejected)
delta = float(ws_para['C7'].value) # small positive number e.g. 0.1 to supress high amplitudes
dod = float(ws_para['C8'].value) # degree of overdeterminedness (iteration stops if number of points reaches the minimum required for curve fitting, plus dod). This is a safety measure
from hants import wa_gdal
# Run
wa_gdal.run_HANTS(input_folder_HANTS_VAR, name_format,
start_date, end_date, latlim, lonlim, cellsize, nc_path_ndvi,
nb, nf, HiLo, low, high, fet, dod, delta,
proj, -9999.0, rasters_path_out, export_hants_only=True)
###################################################### HANTS Albedo ##############################################
# Define parameters for the albedo
VAR = 'Albedo'
# Define paths for NDVI
input_folder_HANTS_VAR = os.path.join(temp_folder_PreSEBAL, VAR)
name_format = '%s_PROBAV_{0}.tif' %VAR
nc_path_albedo = os.path.join(temp_folder_PreSEBAL,'%s_NC.nc' %VAR)
# Create Output folder
rasters_path_out = os.path.join(output_folder_PreSEBAL, VAR + "_HANTS")
if not os.path.exists(rasters_path_out):
os.mkdir(rasters_path_out)
# HANTS parameters for NDVI
Dates = pd.date_range(start_date, end_date, freq = 'D')
nb = int(len(Dates)) # nr of images
nf = int(ws_para['B2'].value) # number of frequencies to be considered above the zero frequency
low = float(ws_para['B3'].value) # valid range minimum
high = float(ws_para['B4'].value) # valid range maximum
HiLo = str(ws_para['B5'].value) # 2-character string indicating rejection of high or low outliers
fet = float(ws_para['B6'].value) # fit error tolerance (point eviating more than fet from curve fit are rejected)
delta = float(ws_para['B7'].value) # small positive number e.g. 0.1 to supress high amplitudes
dod = float(ws_para['B8'].value) # degree of overdeterminedness (iteration stops if number of points reaches the minimum required for curve fitting, plus dod). This is a safety measure
from hants import wa_gdal
# Run
wa_gdal.run_HANTS(input_folder_HANTS_VAR, name_format,
start_date, end_date, latlim, lonlim, cellsize, nc_path_albedo,
nb, nf, HiLo, low, high, fet, dod, delta,
proj, -9999.0, rasters_path_out, export_hants_only=True)
###################################################################################################################
################################################### post HANTS part 5 #############################################
###################################################################################################################
############################################# Create Outlier maps for PROBA-V #######################################
# Create output folder if not exists
output_folder_HANTS_outliers_PROBAV = os.path.join(temp_folder_PreSEBAL, 'Outliers_PROBAV')
if not os.path.exists(output_folder_HANTS_outliers_PROBAV):
os.mkdir(output_folder_HANTS_outliers_PROBAV)
fh = Dataset(nc_path_albedo, mode='r')
Var = fh.variables.keys()[-1]
lat = fh.variables[fh.variables.keys()[1]][:]
lon = fh.variables[fh.variables.keys()[2]][:]
time = fh.variables[fh.variables.keys()[3]][:]
minimum_lon = np.min(lon)
maximum_lat = np.max(lat)
diff_lon = lon[1] - lon[0]
diff_lat = lat[1] - lat[0]
if not ('shape' in locals() or 'dest' in locals()):
Example_file = os.path.join(output_folder_preprocessing_VAR, Back_name)
dest = gdal.Open(Example_file)
ncol = dest.RasterXSize # Get the reprojected dem column size
nrow = dest.RasterYSize # Get the reprojected dem row size
shape=[ncol, nrow]
for i in range(0,int(np.shape(time)[0])):
time_now = time[i]
data = fh.variables['outliers'][i,:,:]
geo = tuple([minimum_lon, diff_lon, 0, maximum_lat, 0, diff_lat])
name_out = os.path.join(output_folder_HANTS_outliers_PROBAV, 'Outliers_PROBAV_%s.tif' %time_now)
SEBAL.save_GeoTiff_proy(dest, data, name_out, shape, nband=1)
############################################# Create ALBEDO and NDVI #########################################
# Create the end thermal files date by date
for date in Dates:
# Define date
year = date.year
month = date.month
day = date.day
# input filenames needed for creating end thermal file
filename_outliers = os.path.join(output_folder_HANTS_outliers_PROBAV,"Outliers_PROBAV_%d%02d%02d.tif" %(year,month,day))
VAR = 'Albedo'
input_folder_PreSEBAL_ALBEDO = os.path.join(temp_folder_PreSEBAL, VAR + "_HANTS")
filename_Albedo_original = os.path.join(Albedo_outfolder, "%s_PROBAV_%d%02d%02d.tif" %(VAR,year,month,day))
filename_Albedo_HANTS = os.path.join(input_folder_PreSEBAL_ALBEDO, "%s_PROBAV_%d%02d%02d.tif" %(VAR,year,month,day))
VAR = 'NDVI'
input_folder_PreSEBAL_NDVI = os.path.join(temp_folder_PreSEBAL, VAR + "_HANTS")
filename_NDVI_original = os.path.join(NDVI_outfolder, "%s_PROBAV_%d%02d%02d.tif" %(VAR,year,month,day))
filename_NDVI_HANTS = os.path.join(input_folder_PreSEBAL_NDVI, "%s_PROBAV_%d%02d%02d.tif" %(VAR,year,month,day))
# Open the input filenames
dest_outliers = gdal.Open(filename_outliers)
dest_PROBAV_ALBEDO = gdal.Open(filename_Albedo_original)
dest_PROBAV_NDVI = gdal.Open(filename_NDVI_original)
dest_HANTS_ALBEDO = gdal.Open(filename_Albedo_HANTS)
dest_HANTS_NDVI = gdal.Open(filename_NDVI_HANTS)
# If original exists, this will be the basis for the end thermal map
if not dest_PROBAV_ALBEDO == None:
# Open arrays of the input files
Array_outliers = dest_outliers.GetRasterBand(1).ReadAsArray()[:,:]
Array_ALBEDO_original = dest_PROBAV_ALBEDO.GetRasterBand(1).ReadAsArray()
Array_ALBEDO_HANTS = dest_HANTS_ALBEDO.GetRasterBand(1).ReadAsArray()[:,:]
Array_NDVI_original = dest_PROBAV_NDVI.GetRasterBand(1).ReadAsArray()
Array_NDVI_HANTS = dest_HANTS_NDVI.GetRasterBand(1).ReadAsArray()[:,:]
# Create outlier Mask
Array_outliers[Array_outliers==-9999.] = 0
Array_outliers_mask = np.zeros(np.shape(Array_outliers))
Array_outliers_mask[Array_outliers==1.]=0
Array_outliers_mask[Array_outliers==0.]=1
Array_outliers_mask[Array_outliers_mask==0]=2
Array_outliers_mask[Array_outliers_mask==1]=0
Array_outliers_mask[Array_outliers_mask==2]=1
# Create a buffer zone arround the bad pixels
Array_outliers_mask = Create_Buffer(Array_outliers_mask)
Array_outliers_mask[Array_outliers_mask==1] = 2
Array_outliers_mask[Array_outliers_mask==0] = 1
Array_outliers_mask[Array_outliers_mask==2] = 0
# If there are more than 300 Good pixels
if np.nansum(Array_outliers_mask) > 300:
# Use the mask to find the good original pixels and HANTS pixels
Array_ALBEDO_original_mask_nan = Array_ALBEDO_original * Array_outliers_mask
Array_ALBEDO_HANTS_mask_nan = Array_ALBEDO_HANTS * Array_outliers_mask
Array_NDVI_original_mask_nan = Array_NDVI_original * Array_outliers_mask
Array_NDVI_HANTS_mask_nan = Array_NDVI_HANTS * Array_outliers_mask
# Create a 1D array of those pixels
Array_ALBEDO_original_mask_nan_flatten = Array_ALBEDO_original_mask_nan.flatten()
Array_ALBEDO_HANTS_mask_nan_flatten = Array_ALBEDO_HANTS_mask_nan.flatten()
Array_NDVI_original_mask_nan_flatten = Array_NDVI_original_mask_nan.flatten()
Array_NDVI_HANTS_mask_nan_flatten = Array_NDVI_HANTS_mask_nan.flatten()
# Remove pixels with high and low values
Array_ALBEDO_HANTS_mask_nan_flatten[Array_ALBEDO_HANTS_mask_nan_flatten<-0.2] = np.nan
Array_ALBEDO_HANTS_mask_nan_flatten[Array_ALBEDO_HANTS_mask_nan_flatten>0.6] = np.nan
Array_ALBEDO_original_mask_nan_flatten[Array_ALBEDO_original_mask_nan_flatten<-0.2] = np.nan
Array_ALBEDO_original_mask_nan_flatten[Array_ALBEDO_original_mask_nan_flatten>0.6] = np.nan
Array_NDVI_HANTS_mask_nan_flatten[Array_NDVI_HANTS_mask_nan_flatten<-0.2] = np.nan
Array_NDVI_HANTS_mask_nan_flatten[Array_NDVI_HANTS_mask_nan_flatten>0.6] = np.nan
Array_NDVI_original_mask_nan_flatten[Array_NDVI_original_mask_nan_flatten<-0.2] = np.nan
Array_NDVI_original_mask_nan_flatten[Array_NDVI_original_mask_nan_flatten>0.6] = np.nan
# Remove the nan values (if there is a nan in one of the arrays remove also the same value in the other array)
Array_ALBEDO_original_mask_nan_flatten2 = Array_ALBEDO_original_mask_nan_flatten[np.logical_or(~np.isnan(Array_ALBEDO_original_mask_nan_flatten),~np.isnan(Array_ALBEDO_HANTS_mask_nan_flatten))]
Array_ALBEDO_HANTS_mask_nan_flatten2 = Array_ALBEDO_HANTS_mask_nan_flatten[np.logical_or(~np.isnan(Array_ALBEDO_original_mask_nan_flatten),~np.isnan(Array_ALBEDO_HANTS_mask_nan_flatten))]
Array_NDVI_original_mask_nan_flatten2 = Array_NDVI_original_mask_nan_flatten[np.logical_or(~np.isnan(Array_NDVI_original_mask_nan_flatten),~np.isnan(Array_NDVI_HANTS_mask_nan_flatten))]
Array_NDVI_HANTS_mask_nan_flatten2 = Array_NDVI_HANTS_mask_nan_flatten[np.logical_or(~np.isnan(Array_NDVI_HANTS_mask_nan_flatten),~np.isnan(Array_NDVI_original_mask_nan_flatten))]
Array_ALBEDO_original_mask_nan_flatten = Array_ALBEDO_original_mask_nan_flatten2
Array_ALBEDO_HANTS_mask_nan_flatten = Array_ALBEDO_HANTS_mask_nan_flatten2
Array_NDVI_original_mask_nan_flatten = Array_NDVI_original_mask_nan_flatten2
Array_NDVI_HANTS_mask_nan_flatten = Array_NDVI_HANTS_mask_nan_flatten2
# Remove all zero values
Array_ALBEDO_original_mask_nan_flatten_without_zero =Array_ALBEDO_original_mask_nan_flatten[Array_ALBEDO_original_mask_nan_flatten != 0.0]
Array_NDVI_original_mask_nan_flatten_without_zero =Array_NDVI_original_mask_nan_flatten[Array_NDVI_original_mask_nan_flatten != 0.0]
# Caluculate the value of the 40 and 90 percent percentiles of the original arrays good pixels
Array_ALBEDO_original_mask_value_cold = np.nanpercentile(Array_ALBEDO_original_mask_nan_flatten_without_zero,40)
Array_ALBEDO_original_mask_value_hot = np.nanpercentile(Array_ALBEDO_original_mask_nan_flatten_without_zero,90)
Array_NDVI_original_mask_value_cold = np.nanpercentile(Array_NDVI_original_mask_nan_flatten_without_zero,40)
Array_NDVI_original_mask_value_hot = np.nanpercentile(Array_NDVI_original_mask_nan_flatten_without_zero,90)
# Delete the colder and hotter pixel values in both 1D arrays (this is to exclude large areas of seas)
Array_ALBEDO_HANTS_mask_nan_flatten_exc_coldest = Array_ALBEDO_HANTS_mask_nan_flatten[np.logical_and(Array_ALBEDO_original_mask_nan_flatten > Array_ALBEDO_original_mask_value_cold,Array_ALBEDO_original_mask_nan_flatten < Array_ALBEDO_original_mask_value_hot)]
Array_ALBEDO_original_mask_nan_flatten_exc_coldest = Array_ALBEDO_original_mask_nan_flatten[np.logical_and(Array_ALBEDO_original_mask_nan_flatten > Array_ALBEDO_original_mask_value_cold,Array_ALBEDO_original_mask_nan_flatten < Array_ALBEDO_original_mask_value_hot)]
Array_NDVI_HANTS_mask_nan_flatten_exc_coldest = Array_NDVI_HANTS_mask_nan_flatten[np.logical_and(Array_NDVI_original_mask_nan_flatten > Array_NDVI_original_mask_value_cold,Array_NDVI_original_mask_nan_flatten < Array_NDVI_original_mask_value_hot)]
Array_NDVI_original_mask_nan_flatten_exc_coldest = Array_NDVI_original_mask_nan_flatten[np.logical_and(Array_NDVI_original_mask_nan_flatten > Array_NDVI_original_mask_value_cold,Array_NDVI_original_mask_nan_flatten < Array_NDVI_original_mask_value_hot)]
#Calculate the mean of those arrays
Ave_ALBEDO_HANTS = np.nanmean(Array_ALBEDO_HANTS_mask_nan_flatten_exc_coldest)
Ave_ALBEDO_original = np.nanmean(Array_ALBEDO_original_mask_nan_flatten_exc_coldest)
Ave_NDVI_HANTS = np.nanmean(Array_NDVI_HANTS_mask_nan_flatten_exc_coldest)
Ave_NDVI_original = np.nanmean(Array_NDVI_original_mask_nan_flatten_exc_coldest)
# Calculate the correction factor for the simulated image
Factor_Albedo = Ave_ALBEDO_original/Ave_ALBEDO_HANTS
Factor_NDVI = Ave_NDVI_original/Ave_NDVI_HANTS
# Apply this factor over the simulated HANTS image
Array_ALBEDO_HANTS_Corrected = Array_ALBEDO_HANTS * Factor_Albedo
Array_NDVI_HANTS_Corrected = Array_NDVI_HANTS * Factor_NDVI
# Create the end array by replacing the bad pixels of the original array by the corrected simulated HANTS values
End_array_Albedo = np.ones(np.shape(Array_outliers_mask)) * np.nan
End_array_Albedo[Array_outliers_mask==0] =Array_ALBEDO_HANTS_Corrected[Array_outliers_mask==0]
End_array_Albedo[Array_outliers_mask==1] =Array_ALBEDO_original[Array_outliers_mask==1]
End_array_NDVI = np.ones(np.shape(Array_outliers_mask)) * np.nan
End_array_NDVI[Array_outliers_mask==0] =Array_NDVI_HANTS_Corrected[Array_outliers_mask==0]
End_array_NDVI[Array_outliers_mask==1] =Array_NDVI_original[Array_outliers_mask==1]
# If the original images is to bad than replace the whole image by the simulated HANTS image
else:
End_array_Albedo = Array_ALBEDO_HANTS
End_array_NDVI = Array_NDVI_HANTS
# Get the geolocation information of the image
geo = dest_PROBAV_ALBEDO.GetGeoTransform()
proj = dest_outliers.GetProjection()
# If there is no original image, use the simulated HANTS image
else:
Array_ALBEDO_HANTS = dest_HANTS_ALBEDO.GetRasterBand(1).ReadAsArray()
End_array_Albedo = Array_ALBEDO_HANTS
Array_NDVI_HANTS = dest_HANTS_NDVI.GetRasterBand(1).ReadAsArray()
End_array_NDVI = Array_NDVI_HANTS
dest_test = None
i = 0
while dest_test == None:
# Get the date of the first image that exists to get the geolocation information
date2 = Dates[i]
year2 = date2.year
month2= date2.month
day2 = date2.day
try:
filename_ALBEDO_original2 = os.path.join(input_folder_PreSEBAL_ALBEDO, "Albedo_PROBAV_%d%02d%02d.tif" %(year2,month2,day2))
dest_test = gdal.Open(filename_ALBEDO_original2)
geo = dest_test.GetGeoTransform()
proj = dest_test.GetProjection()
except:
i+=1
# Save the end array
output_name_end_ALBEDO = os.path.join(ALBEDO_outfolder_end, "Albedo_PROBAV_%d%02d%02d.tif"%(year,month,day))
SEBAL.save_GeoTiff_proy(dest, End_array_Albedo, output_name_end_ALBEDO, shape, nband=1)
output_name_end_NDVI = os.path.join(NDVI_outfolder_end, "NDVI_PROBAV_%d%02d%02d.tif"%(year,month,day))
SEBAL.save_GeoTiff_proy(dest, End_array_NDVI, output_name_end_NDVI, shape, nband=1)
############################################# Create Outlier maps for VIIRS #########################################
# Create output folder if not exists
output_folder_HANTS_outliers_VIIRS = os.path.join(temp_folder_PreSEBAL, 'Outliers_VIIRS')
if not os.path.exists(output_folder_HANTS_outliers_VIIRS):
os.mkdir(output_folder_HANTS_outliers_VIIRS)
fh = Dataset(nc_path_TB, mode='r')
Var = fh.variables.keys()[-1]
lat = fh.variables[fh.variables.keys()[1]][:]
lon = fh.variables[fh.variables.keys()[2]][:]
time = fh.variables[fh.variables.keys()[3]][:]
minimum_lon = np.min(lon)
maximum_lat = np.max(lat)
diff_lon = lon[1] - lon[0]
diff_lat = lat[1] - lat[0]
if not ('shape' in locals() or 'dest' in locals()):
Example_file = os.path.join(output_folder_preprocessing_THERM,Back_name_TB)
dest = gdal.Open(Example_file)
ncol = dest.RasterXSize # Get the reprojected dem column size
nrow = dest.RasterYSize # Get the reprojected dem row size
shape=[ncol, nrow]
for i in range(0,int(np.shape(time)[0])):
time_now = time[i]
data = fh.variables['outliers'][i,:,:]
geo = tuple([minimum_lon, diff_lon, 0, maximum_lat, 0, diff_lat])
name_out = os.path.join(output_folder_HANTS_outliers_VIIRS, 'Outliers_VIIRS_%s.tif' %time_now)
SEBAL.save_GeoTiff_proy(dest, data[:,:,i], name_out, shape, nband=1)
############################################# Create end thermal #########################################
# Create the end thermal files date by date
for date in Dates:
# Define date
year = date.year
month = date.month
day = date.day
# input filenames needed for creating end thermal file
filename_outliers = os.path.join(output_folder_HANTS_outliers_VIIRS,"Outliers_VIIRS_%d%02d%02d.tif" %(year,month,day))
filename_VIIRS_original = os.path.join(input_folder_HANTS_THERM, "Surface_Temperature_VIIRS_%d%02d%02d.tif" %(year,month,day))
filename_VIIRS_HANTS = os.path.join(temp_folder_PreSEBAL, THERM + "_HANTS", "Surface_Temperature_VIIRS_%d%02d%02d.tif" %(year,month,day))
# Open the input filenames
dest_outliers = gdal.Open(filename_outliers)
dest_VIIRS_original = gdal.Open(filename_VIIRS_original)
dest_VIIRS_HANTS = gdal.Open(filename_VIIRS_HANTS)
# If original exists, this will be the basis for the end thermal map
if not dest_VIIRS_original == None:
# Open arrays of the input files
Array_outliers = dest_outliers.GetRasterBand(1).ReadAsArray()[:,:]
Array_VIIRS_original = dest_VIIRS_original.GetRasterBand(1).ReadAsArray()
Array_VIIRS_HANTS = dest_VIIRS_HANTS.GetRasterBand(1).ReadAsArray()[:,:]
# Create outlier Mask
Array_outliers[Array_outliers==-9999.] = 0
Array_outliers_mask = np.zeros(np.shape(Array_outliers))
Array_outliers_mask[Array_outliers==1.]=0
Array_outliers_mask[Array_outliers==0.]=1
Array_outliers_mask[Array_outliers_mask==0]=2
Array_outliers_mask[Array_outliers_mask==1]=0
Array_outliers_mask[Array_outliers_mask==2]=1
# Create a buffer zone arround the bad pixels
Array_outliers_mask = Create_Buffer(Array_outliers_mask)
Array_outliers_mask[Array_outliers_mask==1] = 2
Array_outliers_mask[Array_outliers_mask==0] = 1
Array_outliers_mask[Array_outliers_mask==2] = 0
# If there are more than 300 Good pixels
if np.nansum(Array_outliers_mask) > 300:
# Use the mask to find the good original pixels and HANTS pixels
Array_VIIRS_original_mask_nan = Array_VIIRS_original * Array_outliers_mask
Array_VIIRS_HANTS_mask_nan = Array_VIIRS_HANTS * Array_outliers_mask
# Create a 1D array of those pixels
Array_VIIRS_original_mask_nan_flatten = Array_VIIRS_original_mask_nan.flatten()
Array_VIIRS_HANTS_mask_nan_flatten = Array_VIIRS_HANTS_mask_nan.flatten()
# Remove pixels with high and low values
Array_VIIRS_HANTS_mask_nan_flatten[Array_VIIRS_HANTS_mask_nan_flatten<250] = np.nan
Array_VIIRS_HANTS_mask_nan_flatten[Array_VIIRS_HANTS_mask_nan_flatten>350] = np.nan
Array_VIIRS_original_mask_nan_flatten[Array_VIIRS_original_mask_nan_flatten<250] = np.nan
Array_VIIRS_original_mask_nan_flatten[Array_VIIRS_original_mask_nan_flatten>350] = np.nan
# Remove the nan values (if there is a nan in one of the arrays remove also the same value in the other array)
Array_VIIRS_original_mask_nan_flatten = Array_VIIRS_original_mask_nan_flatten[~np.isnan(Array_VIIRS_original_mask_nan_flatten)]
Array_VIIRS_HANTS_mask_nan_flatten = Array_VIIRS_HANTS_mask_nan_flatten[~np.isnan(Array_VIIRS_HANTS_mask_nan_flatten)]
# Remove all zero values
Array_VIIRS_original_mask_nan_flatten_without_zero =Array_VIIRS_original_mask_nan_flatten[Array_VIIRS_original_mask_nan_flatten>0]
# Caluculate the value of the 40 and 90 percent percentiles of the original arrays good pixels
Array_VIIRS_original_mask_value_cold = np.nanpercentile(Array_VIIRS_original_mask_nan_flatten_without_zero,40)
Array_VIIRS_original_mask_value_hot = np.nanpercentile(Array_VIIRS_original_mask_nan_flatten_without_zero,90)
# Delete the colder and hotter pixel values in both 1D arrays (this is to exclude large areas of seas)
Array_VIIRS_HANTS_mask_nan_flatten_exc_coldest = Array_VIIRS_HANTS_mask_nan_flatten[np.logical_and(Array_VIIRS_original_mask_nan_flatten > Array_VIIRS_original_mask_value_cold,Array_VIIRS_original_mask_nan_flatten < Array_VIIRS_original_mask_value_hot)]
Array_VIIRS_original_mask_nan_flatten_exc_coldest = Array_VIIRS_original_mask_nan_flatten[np.logical_and(Array_VIIRS_original_mask_nan_flatten > Array_VIIRS_original_mask_value_cold,Array_VIIRS_original_mask_nan_flatten < Array_VIIRS_original_mask_value_hot)]
#Calculate the mean of those arrays
Ave_VIIRS_HANTS = np.nanmean(Array_VIIRS_HANTS_mask_nan_flatten_exc_coldest)
Ave_VIIRS_original = np.nanmean(Array_VIIRS_original_mask_nan_flatten_exc_coldest)
# Calculate the correction factor for the simulated image
Factor = Ave_VIIRS_original/Ave_VIIRS_HANTS
# Apply this factor over the simulated HANTS image
Array_VIIRS_HANTS_Corrected = Array_VIIRS_HANTS * Factor
# Create the end array by replacing the bad pixels of the original array by the corrected simulated HANTS values
End_array = np.ones(np.shape(Array_outliers_mask)) * np.nan
End_array[Array_outliers_mask==0] =Array_VIIRS_HANTS_Corrected[Array_outliers_mask==0]
End_array[Array_outliers_mask==1] =Array_VIIRS_original[Array_outliers_mask==1]
# If the original images is to bad than replace the whole image by the simulated HANTS image
else:
End_array = Array_VIIRS_HANTS
# Get the geolocation information of the image
geo = dest_VIIRS_original.GetGeoTransform()
proj = dest_outliers.GetProjection()
# If there is no original image, use the simulated HANTS image
else:
Array_VIIRS_HANTS = dest_VIIRS_HANTS.GetRasterBand(1).ReadAsArray()
End_array = Array_VIIRS_HANTS
dest_test = None
i = 0
while dest_test == None:
# Get the date of the first image that exists to get the geolocation information
date2 = Dates[i]
year2 = date2.year
month2= date2.month
day2 = date2.day
try:
filename_VIIRS_original2 = os.path.join(input_folder_HANTS_THERM, "Surface_Temperature_VIIRS_%d%02d%02d.tif" %(year2,month2,day2))
dest_test = gdal.Open(filename_VIIRS_original2)
geo = dest_test.GetGeoTransform()
proj = dest_test.GetProjection()
except:
i+=1
# Save the end array
output_name_end_LST = os.path.join(temp_folder_PreSEBAL_LST, "VIIRS_LST_%d%02d%02d.tif"%(year,month,day))
SEBAL.save_GeoTiff_proy(dest, End_array, output_name_end_LST, shape, nband=1)
###################################################################################################################
###################################################### preSEBAL continue ##########################################
###################################################################################################################
############################################### Apply thermal sharpening ##########################################
print '---------------------------------------------------------'
print '-------------------- Downscale VIIRS --------------------'
print '---------------------------------------------------------'
# Upscale VIIRS and PROBA-V to 400m
pixel_spacing_upscale = 400
# Open the General_Input sheet
ws = wb['General_Input']
# Extract the input and output folder, and Image type from the excel file
DEM_fileName = str(ws['E2'].value)
ws = wb['VIIRS_PROBAV_Input']
UTM_Zone = int(str(ws['G2'].value))
# Reproject from Geog Coord Syst to UTM -
# 1) DEM - Original DEM coordinates is Geographic: lat, lon
proyDEM_fileName_100 = os.path.join(temp_folder_PreSEBAL,'DEM_100.tif')
dest, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = SEBAL.reproject_dataset(
DEM_fileName, pixel_spacing = 100, UTM_Zone=UTM_Zone)
band = dest.GetRasterBand(1) # Get the reprojected dem band
ncol = dest.RasterXSize # Get the reprojected dem column size
nrow = dest.RasterYSize # Get the reprojected dem row size
shape=[ncol, nrow]
DEM = band.ReadAsArray()
# Save DEM file with the 100 meter resolution
SEBAL.save_GeoTiff_proy(dest, DEM, proyDEM_fileName_100, shape, nband=1)
# Create upscaled DEM
proyDEM_fileName_400 = os.path.join(temp_folder_PreSEBAL,'DEM_400.tif')
dest_400, ulx_dem_400, lry_dem_400, lrx_dem_400, uly_dem_400, epsg_to = SEBAL.reproject_dataset(
DEM_fileName, pixel_spacing_upscale, UTM_Zone = UTM_Zone)
# find spatial parameters array
DEM_400 = dest_400.GetRasterBand(1).ReadAsArray()
Y_raster_size_400 = dest_400.RasterYSize
X_raster_size_400 = dest_400.RasterXSize
shape_400=([X_raster_size_400, Y_raster_size_400])
# Save DEM file with the 400 meter resolution
SEBAL.save_GeoTiff_proy(dest_400, DEM_400, proyDEM_fileName_400, shape_400, nband=1)
for date in Dates:
surf_temp_fileName = os.path.join(temp_folder_PreSEBAL, 'Surf_temp_After_TS_%d%02d%02d.tif' %(date.year, date.month, date.day))
temp_surface_100_fileName_beforeTS = os.path.join(temp_folder_PreSEBAL_LST,'VIIRS_LST_%d%02d%02d.tif' %(date.year, date.month, date.day))
################################ Thermal Sharpening #####################################################
# Define filename
file_NDVI_after_HANTS = os.path.join(NDVI_outfolder_end, 'NDVI_PROBAV_%d%02d%02d.tif' %(date.year, date.month, date.day))
# Open NDVI/LST destination folder
dest_NDVI = gdal.Open(file_NDVI_after_HANTS)
dest_LST = gdal.Open(temp_surface_100_fileName_beforeTS)
# Open NDVI array
NDVI = dest_NDVI.GetRasterBand(1).ReadAsArray()
# Open LST array
LST = dest_LST.GetRasterBand(1).ReadAsArray()
# Upscale thermal band VIIRS from 100m to 400m
VIIRS_Upscale, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = SEBAL.reproject_dataset_example(
temp_surface_100_fileName_beforeTS, proyDEM_fileName_400)
data_Temp_Surf_400 = VIIRS_Upscale.GetRasterBand(1).ReadAsArray()
# Upscale PROBA-V NDVI from 100m to 400m
NDVI_PROBAV_Upscale, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = SEBAL.reproject_dataset_example(
file_NDVI_after_HANTS, proyDEM_fileName_400)
data_NDVI_400 = NDVI_PROBAV_Upscale.GetRasterBand(1).ReadAsArray()
# Define the width of the moving window box
Box=9
# Apply the surface temperature sharpening
temp_surface_sharpened = SEBAL.Thermal_Sharpening(data_Temp_Surf_400, data_NDVI_400, NDVI, Box, NDVI_PROBAV_Upscale, output_folder, proyDEM_fileName_100, shape, dest, surf_temp_fileName)
# Create Water mask based on HANTS NDVI output
water_mask = np.zeros((shape[1], shape[0]))
water_mask[NDVI<0.0]=1
# Divide temporal watermask in snow and water mask by using surface temperature
Snow_Mask_PROBAV, water_mask, ts_moist_veg_min, NDVI_max, NDVI_std = SEBAL.CalculateSnowWaterMask(NDVI,shape,water_mask,temp_surface_sharpened)
# Replace water values
temp_surface_sharpened[water_mask==1] = LST[water_mask == 1]
temp_surface_sharpened = np.where(np.isnan(temp_surface_sharpened), LST, temp_surface_sharpened)
surf_temp_fileName = os.path.join(output_folder_HANTS_end_sharp, 'LST_surface_temp_sharpened_%d%02d%02d.tif' %(date.year, date.month, date.day))
SEBAL.save_GeoTiff_proy(dest, temp_surface_sharpened, surf_temp_fileName, shape, nband=1)
################################################## Calculate LAI ##################################################
# Open NDVI destination folder
dest_NDVI = gdal.Open(file_NDVI_after_HANTS)
# Open NDVI array
NDVI = dest_NDVI.GetRasterBand(1).ReadAsArray()
LAI_FileName = os.path.join(LAI_outfolder,'LAI_%d%02d%02d.tif' %(date.year, date.month, date.day))
# Calculate LAI
FPAR, tir_emis, Nitrogen, vegt_cover, LAI, b10_emissivity = SEBAL.Calc_vegt_para(NDVI,water_mask, shape)
SEBAL.save_GeoTiff_proy(dest, LAI, LAI_FileName, shape, nband=1)
################################ Calculate the Vegetation height ########################
# Open preprosessing excel the Vegetation_Height sheet
ws_veg = wb_veg['Vegetation_Height']
# Define output name for the LandUse map
dst_FileName = os.path.join(output_folder,'LU.tif')
# Open LU data
LU_dest = gdal.Open(LU_data_FileName)
LU_data = LU_dest.GetRasterBand(1).ReadAsArray()
# Reproject the LAI to the same projection as LU
dest1, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(LAI_FileName, LU_data_FileName) ## input after HANTS
LAI_proj = dest1.GetRasterBand(1).ReadAsArray()
# Read out the excel file coefficient numbers
Array = np.zeros([ws_veg.max_row-1,4])
for j in ['A','C','D','E']:
j_number={'A' : 0, 'C' : 1, 'D' : 2, 'E' : 3}
for i in range(2,ws_veg.max_row+1):
Value = (ws_veg['%s%s' %(j,i)].value)
Array[i-2, j_number[j]] = Value
# Create maps with the coefficient numbers for the right land cover
coeff = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1]),3])
for coeff_nmbr in range(0,3):
for Class in range(0,len(Array)):
coeff[LU_data==Array[Class,0],coeff_nmbr] = Array[Class,coeff_nmbr+1]
# Get some dimensions of the projected dataset
band_data = dest1.GetRasterBand(1)
ncol_data = dest1.RasterXSize
nrow_data = dest1.RasterYSize
shape_data=[ncol_data, nrow_data]
# Calculate the vegetation height in the LU projection
Veg_Height_proj = coeff[:,:,0] * np.power(LAI_proj,2) + coeff[:,:,1] * LAI_proj + coeff[:,:,2]
Veg_Height_proj = np.clip(Veg_Height_proj, 0, 600)
# Save the vegetation height in the lU projection in the temporary directory
Veg_Height_proj_FileName = os.path.join(temp_folder_PreSEBAL,'Veg_Height_proj.tif')
SEBAL.save_GeoTiff_proy(dest1, Veg_Height_proj, Veg_Height_proj_FileName, shape_data, nband=1)
# Reproject the Veg_height to the LAI projection
dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Veg_Height_proj_FileName, LAI_FileName)
# Get some dimensions of the original dataset
band_data = dest.GetRasterBand(1)
ncol_data = dest.RasterXSize
nrow_data = dest.RasterYSize
# Open the Veg_height with the same projection as LAI
Veg_Height = band_data.ReadAsArray(0, 0, ncol_data, nrow_data)
Veg_Height[Veg_Height == 0] = 0.4
# Save Vegetation Height in the end folder
dst_FileName = os.path.join(output_folder_HANTS_end_Veg,'Vegetation_Height_%d%02d%02d.tif' %(date.year, date.month, date.day))
SEBAL.save_GeoTiff_proy(dest, Veg_Height, dst_FileName, shape, nband=1)
######################## calculate Water Mask #########################
# Open all the water mask
os.chdir(WaterMask_outfolder)
re_water_mask = glob.glob('Water_Mask*.tif')
# Loop over all the files
for water_mask_filename in re_water_mask:
# Create the filepath to the water mask
water_mask_filepath = os.path.join(WaterMask_outfolder,water_mask_filename)
# Open Array
water_mask_dest = gdal.Open(water_mask_filepath)
# If the total water mask raster does not exists create this one
if not 'water_mask_array' in locals():
water_mask_array = np.zeros([water_mask_dest.RasterYSize, water_mask_dest.RasterXSize])
# Add all the water masks
water_mask_array += water_mask_dest.GetRasterBand(1).ReadAsArray()
# Calculate the end water mask if the area is more than 50 percent defined as water
water_mask_array_per = water_mask_array/len(re_water_mask)
water_mask_array_end = np.zeros([water_mask_dest.RasterYSize, water_mask_dest.RasterXSize])
water_mask_array_end[water_mask_array_per > 0.5] = 1
# Save water mask
WaterMask_outfolder_end_FileName = os.path.join(WaterMask_outfolder_end,'Water_Mask.tif')
SEBAL.save_GeoTiff_proy(dest, water_mask_array_end, WaterMask_outfolder_end_FileName, shape, nband=1)
######################## calculate p-factor by using the Landuse map #########################
ws_p = wb_veg['p-factor']
Array_P = np.zeros([ws_p.max_row-1,2])
for j in ['A','C']:
j_number={'A' : 0, 'C' : 1}
for i in range(2,ws_p.max_row+1):
Value = (ws_p['%s%s' %(j,i)].value)
Array_P[i-2, j_number[j]] = Value
p_factor = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1])])
for Class in range(0,len(Array_P)):
p_factor[LU_data==Array_P[Class,0]] = Array_P[Class,1]
p_factor[p_factor == 0] = 0.5
dst_FileName = os.path.join(temp_folder_PreSEBAL, 'p-factor_proj.tif')
SEBAL.save_GeoTiff_proy(dest1, p_factor, dst_FileName, shape_data, nband=1)
dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(dst_FileName, LAI_FileName)
band_data = dest.GetRasterBand(1) # Get the reprojected dem band
ncol_data = dest.RasterXSize
nrow_data = dest.RasterYSize
p_factor = band_data.ReadAsArray(0, 0, ncol_data, nrow_data)
p_factor[p_factor == 0] = 0.5
dst_pfactor_FileName = os.path.join(output_folder_p_factor,'p_factor.tif')
SEBAL.save_GeoTiff_proy(dest, p_factor, dst_pfactor_FileName, shape, nband=1)
######################## calculate c-factor by using the Landuse map #########################
ws_c = wb_veg['C-factor']
Array_C = np.zeros([ws_c.max_row-1,2])
for j in ['A','C']:
j_number={'A' : 0, 'C' : 1}
for i in range(2,ws_c.max_row+1):
Value = (ws_c['%s%s' %(j,i)].value)
Array_C[i-2, j_number[j]] = Value
c_factor = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1])])
for Class in range(0,len(Array_C)):
c_factor[LU_data==Array_C[Class,0]] = Array_C[Class,1]
c_factor[np.logical_and(c_factor != 3.0, c_factor != 4.0)] = np.nan
LUE_max = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1])])
LUE_max[c_factor == 3] = 2.5
LUE_max[c_factor == 4] = 4.5
LUE_max[LUE_max == 0] = 2.5
dst_FileName = os.path.join(temp_folder_PreSEBAL, 'LUE_max_proj.tif')
SEBAL.save_GeoTiff_proy(dest1, LUE_max, dst_FileName, shape_data, nband=1)
dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(dst_FileName, LAI_FileName)
band_data = dest.GetRasterBand(1) # Get the reprojected dem band
ncol_data = dest.RasterXSize
nrow_data = dest.RasterYSize
LUE_max = band_data.ReadAsArray(0, 0, ncol_data, nrow_data)
LUE_max[LUE_max == 0] = 2.5
dst_LUEmax_FileName = os.path.join(output_folder_LUE,'LUE_max.tif')
SEBAL.save_GeoTiff_proy(dest, LUE_max, dst_LUEmax_FileName, shape, nband=1)
####################################################################################################################
################################################ Write output part 6 ###############################################
####################################################################################################################
############################################# Fill in the additional input sheet #########################################
# things to be filled in:
# Transmissivity (optional)
# NDVI (additional input)
# Albedo (additional input)
# LST (additional input)
# Water Mask (additional input)
# p-factor (soil input)
# c-factor (soil input)
# Vegetation height (meteo input)
# VIIRS parameter copy
VIIRS_Dict = {}
for k, v in SEBAL_RUNS.iteritems():
VIIRS_Dict.setdefault(v['output_folder'], []).append(k)
'''
LST folder = output_folder_HANTS_end
NDVI folder = os.path.join(output_folder_HANTS, 'NDVI')
ALBEDO folder = os.path.join(output_folder_HANTS, 'Albedo')
SAVI folder = os.path.join(output_folder_HANTS, 'SAVI')
'''
VARS = ["NDVI", "Albedo"]
Letter_dict = {"NDVI":'B', "Albedo":'D'}
xfile = load_workbook(inputExcel)
sheet_additional = xfile.get_sheet_by_name('Additional_Input')
sheet_meteo = xfile.get_sheet_by_name('Meteo_Input')
sheet_soil = xfile.get_sheet_by_name('Soil_Input')
sheet_out_name = ''.join([os.path.splitext(os.path.basename(inputExcel))[0],'_SEBAL.xlsx'])
sheet_out_dir = os.path.dirname(inputExcel)
sheet_out_file_name = os.path.join(sheet_out_dir, sheet_out_name)
for output_name_run in VIIRS_Dict.keys()[2:4]:
# Get General parameters
Row_number = VIIRS_Dict[output_name_run][0]
Type_of_Run = SEBAL_RUNS.items()
VIIRS_date = output_name_run.split('_')[-1]
VIIRS_datetime= datetime.strptime(VIIRS_date, '%d%m%Y')
date_run = '%d%02d%02d' %(VIIRS_datetime.year,VIIRS_datetime.month,VIIRS_datetime.day)
# import LST
file_name_LST = os.path.join(output_folder_HANTS_end_sharp, 'LST_surface_temp_sharpened_%s.tif' %date_run )
sheet_additional['E%d'%(Row_number)] = str(file_name_LST)
# import NDVI and Albedo and water mask
for VAR_SINGLE in VARS:
Letter = Letter_dict[VAR_SINGLE]
file_name_VAR_single = os.path.join(output_folder_PreSEBAL, VAR_SINGLE, '%s_PROBAV_%s.tif' %(VAR_SINGLE, date_run))
sheet_additional['%s%d'%(Letter, Row_number)] = str(file_name_VAR_single)
# import Water Mask
sheet_additional['C%d'%(Row_number)] = str(WaterMask_outfolder_end_FileName)
# import p-factor
file_name_p_factor = os.path.join(output_folder_p_factor,'p_factor.tif')
sheet_soil['H%d'%(Row_number)] = str(file_name_p_factor)
# import p-factor
file_name_c_factor = os.path.join(output_folder_LUE, 'LUE_max.tif')
sheet_soil['I%d'%(Row_number)] = str(file_name_c_factor)
# import vegetation height
file_name_vegt_height = os.path.join(output_folder_HANTS_end_Veg,'Vegetation_Height_%s.tif' %date_run)
sheet_meteo['O%d'%(Row_number)] = str(file_name_vegt_height)
xfile.save(sheet_out_file_name)
'''
# If instantanious Transmissivity is calculated in PreSEBAL
if Check_Trans_inst == 1:
sheet['N%d'%(number)] = str(Transmissivity_inst_fileName)
xfile.save(inputExcel)
# If daily Transmissivity is calculated in PreSEBAL
if Check_Trans_24 == 1:
sheet_meteo['K%d'%(number)] = str(Transmissivity_24_fileName)
xfile.save(sheet_out_file_name)
'''
'''
############################################# Create Outlier maps for PROBA-V #########################################
# Create output folder if not exists
output_folder_HANTS_outliers = os.path.join(output_folder_HANTS, 'Outliers')
if not os.path.exists(output_folder_HANTS_outliers):
os.mkdir(output_folder_HANTS_outliers)
fh = Dataset(nc_path_albedo, mode='r')
Var = fh.variables.keys()[-1]
data = fh.variables['outliers'][:]
lat = fh.variables[fh.variables.keys()[1]][:]
lon = fh.variables[fh.variables.keys()[2]][:]
time = fh.variables[fh.variables.keys()[3]][:]
minimum_lon = np.min(lon)
maximum_lat = np.max(lat)
diff_lon = lon[1] - lon[0]
diff_lat = lat[1] - lat[0]
if not ('shape' in locals() or 'dest' in locals()):
Example_file = os.path.join(output_folder_preprocessing_VAR,Back_name)
dest = gdal.Open(Example_file)
ncol = dest.RasterXSize # Get the reprojected dem column size
nrow = dest.RasterYSize # Get the reprojected dem row size
shape=[ncol, nrow]
for i in range(0,int(np.shape(data)[2])):
time_now = time[i]
data_now = data[:,:,i]
geo = tuple([minimum_lon, diff_lon, 0, maximum_lat, 0, diff_lat])
name_out = os.path.join(output_folder_HANTS_outliers, 'Outliers_PROBAV_%s.tif' %time_now)
SEBAL.save_GeoTiff_proy(dest, data_now, name_out, shape, nband=1)
############################################ NDVI ##################################################
# Create output folder if not exists
output_folder_HANTS_outliers = os.path.join(output_folder_HANTS, 'Outliers_NDVI')
if not os.path.exists(output_folder_HANTS_outliers):
os.mkdir(output_folder_HANTS_outliers)
fh = Dataset(nc_path_ndvi, mode='r')
Var = fh.variables.keys()[-1]
data = fh.variables['outliers'][:]
lat = fh.variables[fh.variables.keys()[1]][:]
lon = fh.variables[fh.variables.keys()[2]][:]
time = fh.variables[fh.variables.keys()[3]][:]
minimum_lon = np.min(lon)
maximum_lat = np.max(lat)
diff_lon = lon[1] - lon[0]
diff_lat = lat[1] - lat[0]
if not ('shape' in locals() or 'dest' in locals()):
Example_file = os.path.join(output_folder_preprocessing_VAR,Back_name)
dest = gdal.Open(Example_file)
ncol = dest.RasterXSize # Get the reprojected dem column size
nrow = dest.RasterYSize # Get the reprojected dem row size
shape=[ncol, nrow]
for i in range(0,int(np.shape(data)[2])):
time_now = time[i]
data_now = data[:,:,i]
geo = tuple([minimum_lon, diff_lon, 0, maximum_lat, 0, diff_lat])
name_out = os.path.join(output_folder_HANTS_outliers, 'Outliers_PROBAV_%s.tif' %time_now)
SEBAL.save_GeoTiff_proy(dest, data_now, name_out, shape, nband=1)
###################################################### postHANTS Albedo ###############################################
for date in Dates:
year = date.year
month = date.month
day = date.day
filename_outliers = r"G:\SEBAL_Tadla\PROBAV-VIIRS\HANTS_output\Outliers\Outliers_PROBAV_%d%02d%02d.tif" %(year,month,day)
filename_VIIRS_original = r"G:\SEBAL_Tadla\PROBAV-VIIRS\HANTS_input\Albedo\Albedo_PROBAV_%d%02d%02d.tif" %(year,month,day)
filename_VIIRS_HANTS = r"G:\SEBAL_Tadla\PROBAV-VIIRS\HANTS_output\Albedo\Albedo_PROBAV_%d%02d%02d.tif"%(year,month,day)
dest_outliers = gdal.Open(filename_outliers)
dest_VIIRS_original = gdal.Open(filename_VIIRS_original)
dest_VIIRS_HANTS = gdal.Open(filename_VIIRS_HANTS)
if not dest_VIIRS_original == None:
Array_outliers = dest_outliers.GetRasterBand(1).ReadAsArray()[:,:]
Array_VIIRS_original = dest_VIIRS_original.GetRasterBand(1).ReadAsArray()
Array_VIIRS_HANTS = dest_VIIRS_HANTS.GetRasterBand(1).ReadAsArray()[:,:]
Array_outliers[Array_outliers==-9999.] = 0
Array_outliers_mask = np.zeros(np.shape(Array_outliers))
Array_outliers_mask[Array_outliers==1.]=0
Array_outliers_mask[Array_outliers==0.]=1
Array_outliers_mask[Array_outliers_mask==0]=2
Array_outliers_mask[Array_outliers_mask==1]=0
Array_outliers_mask[Array_outliers_mask==2]=1
Array_outliers_mask = Create_Buffer(Array_outliers_mask)
Array_outliers_mask[Array_outliers_mask==1] = 2
Array_outliers_mask[Array_outliers_mask==0] = 1
Array_outliers_mask[Array_outliers_mask==2] = 0
if np.nansum(Array_outliers_mask) > 30:
Array_outliers_mask[Array_VIIRS_HANTS == 0] = np.nan
Array_VIIRS_original_mask_nan = Array_VIIRS_original * Array_outliers_mask
Array_VIIRS_HANTS_mask_nan = Array_VIIRS_HANTS * Array_outliers_mask
Array_VIIRS_original_mask_nan_flatten = Array_VIIRS_original_mask_nan.flatten()
Array_VIIRS_HANTS_mask_nan_flatten = Array_VIIRS_HANTS_mask_nan.flatten()
Array_VIIRS_original_mask_nan_flatten = Array_VIIRS_original_mask_nan_flatten[~np.isnan(Array_VIIRS_original_mask_nan_flatten)]
Array_VIIRS_HANTS_mask_nan_flatten = Array_VIIRS_HANTS_mask_nan_flatten[~np.isnan(Array_VIIRS_HANTS_mask_nan_flatten)]
Array_VIIRS_original_mask_nan_flatten_without_zero =Array_VIIRS_original_mask_nan_flatten[Array_VIIRS_original_mask_nan_flatten>0]
Array_VIIRS_original_mask_value_cold = np.percentile(Array_VIIRS_original_mask_nan_flatten_without_zero,40)
Array_VIIRS_original_mask_value_hot = np.percentile(Array_VIIRS_original_mask_nan_flatten_without_zero,90)
Array_VIIRS_HANTS_mask_nan_flatten_exc_coldest = Array_VIIRS_HANTS_mask_nan_flatten[np.logical_and(Array_VIIRS_original_mask_nan_flatten > Array_VIIRS_original_mask_value_cold,Array_VIIRS_original_mask_nan_flatten < Array_VIIRS_original_mask_value_hot)]
Array_VIIRS_original_mask_nan_flatten_exc_coldest = Array_VIIRS_original_mask_nan_flatten[np.logical_and(Array_VIIRS_original_mask_nan_flatten > Array_VIIRS_original_mask_value_cold,Array_VIIRS_original_mask_nan_flatten < Array_VIIRS_original_mask_value_hot)]
Array_VIIRS_HANTS_mask_nan_flatten_exc_coldest[Array_VIIRS_HANTS_mask_nan_flatten_exc_coldest==-9999] = np.nan
Array_VIIRS_original_mask_nan_flatten_exc_coldest[Array_VIIRS_original_mask_nan_flatten_exc_coldest==-9999] = np.nan
Ave_VIIRS_HANTS = np.nanmean(Array_VIIRS_HANTS_mask_nan_flatten_exc_coldest)
Ave_VIIRS_original = np.nanmean(Array_VIIRS_original_mask_nan_flatten_exc_coldest)
Factor = Ave_VIIRS_original/Ave_VIIRS_HANTS
Array_VIIRS_HANTS_Corrected = Array_VIIRS_HANTS * Factor
End_array = np.ones(np.shape(Array_outliers_mask)) * np.nan
End_array[Array_outliers_mask==0] =Array_VIIRS_HANTS_Corrected[Array_outliers_mask==0]
End_array[Array_outliers_mask==1] =Array_VIIRS_original[Array_outliers_mask==1]
else:
End_array = Array_VIIRS_HANTS
geo = dest_VIIRS_original.GetGeoTransform()
proj = dest_outliers.GetProjection()
else:
Array_VIIRS_HANTS = dest_VIIRS_HANTS.GetRasterBand(1).ReadAsArray()
End_array = Array_VIIRS_HANTS
dest_test = None
i = 0
while dest_test == None:
date2 = Dates[i]
year2 = date2.year
month2= date2.month
day2 = date2.day
try:
filename_VIIRS_original2 = r"G:\SEBAL_Tadla\PROBAV-VIIRS\HANTS_input\Albedo\Albedo_PROBAV_%d%02d%02d.tif" %(year2,month2,day2)
dest_test = gdal.Open(filename_VIIRS_original2)
geo = dest_test.GetGeoTransform()
proj = dest_test.GetProjection()
except:
i+=1
import wa.General.data_conversions as DC
name = r"G:\SEBAL_Tadla\PROBAV-VIIRS\HANTS_end\Albedo\Albedo_PROBAV_%d%02d%02d.tif"%(year,month,day)
DC.Save_as_tiff(name, End_array, geo, proj)
################################## All input is now calculated, so preprosessing can start ########################
# Open preprosessing excel the Vegetation_Height sheet
ws_veg = wb_veg['Vegetation_Height']
# Define output name for the LandUse map
dst_FileName = os.path.join(output_folder,'LU_%s.tif' %Var_name)
# Open LU data
LU_dest = gdal.Open(LU_data_FileName)
LU_data = LU_dest.GetRasterBand(1).ReadAsArray()
# Reproject the LAI to the same projection as LU
dest1, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(LAI_FileName, LU_data_FileName) ## input after HANTS
LAI_proj = dest1.GetRasterBand(1).ReadAsArray()
# Read out the excel file coefficient numbers
Array = np.zeros([ws_veg.max_row-1,4])
for j in ['A','C','D','E']:
j_number={'A' : 0, 'C' : 1, 'D' : 2, 'E' : 3}
for i in range(2,ws_veg.max_row+1):
Value = (ws_veg['%s%s' %(j,i)].value)
Array[i-2, j_number[j]] = Value
# Create maps with the coefficient numbers for the right land cover
coeff = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1]),3])
for coeff_nmbr in range(0,3):
for Class in range(0,len(Array)):
coeff[LU_data==Array[Class,0],coeff_nmbr] = Array[Class,coeff_nmbr+1]
# Get some dimensions of the projected dataset
band_data = dest1.GetRasterBand(1)
ncol_data = dest1.RasterXSize
nrow_data = dest1.RasterYSize
shape_data=[ncol_data, nrow_data]
# Calculate the vegetation height in the LU projection
Veg_Height_proj = coeff[:,:,0] * np.power(LAI_proj,2) + coeff[:,:,1] * LAI_proj + coeff[:,:,2]
Veg_Height_proj = np.clip(Veg_Height_proj, 0, 600)
# Save the vegetation height in the lU projection in the temporary directory
Veg_Height_proj_FileName = os.path.join(output_folder_temp,'Veg_Height_proj.tif')
save_GeoTiff_proy(dest1, Veg_Height_proj, Veg_Height_proj_FileName, shape_data, nband=1)
# Reproject the Veg_height to the LAI projection
dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Veg_Height_proj_FileName, LAI_FileName)
# Get some dimensions of the original dataset
band_data = dest.GetRasterBand(1)
ncol_data = dest.RasterXSize
nrow_data = dest.RasterYSize
# Open the Veg_height with the same projection as LAI
Veg_Height = band_data.ReadAsArray(0, 0, ncol_data, nrow_data)
Veg_Height[Veg_Height == 0] = np.nan
# Save Vegetation Height in the end folder
dst_FileName = os.path.join(output_folder,'Vegetation_Height_%s.tif' %Var_name)
save_GeoTiff_proy(dest, Veg_Height, dst_FileName, shape, nband=1)
######################## calculate p-factor by using the Landuse map #########################
ws_p = wb_veg['p-factor']
Array_P = np.zeros([ws_p.max_row-1,2])
for j in ['A','C']:
j_number={'A' : 0, 'C' : 1}
for i in range(2,ws_p.max_row+1):
Value = (ws_p['%s%s' %(j,i)].value)
Array_P[i-2, j_number[j]] = Value
p_factor = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1])])
for Class in range(0,len(Array_P)):
p_factor[LU_data==Array_P[Class,0]] = Array_P[Class,1]
p_factor[p_factor == 0] = np.nan
dst_FileName = os.path.join(output_folder_temp,'p-factor_proj.tif')
save_GeoTiff_proy(dest1, p_factor, dst_FileName, shape_data, nband=1)
dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(dst_FileName, LAI_FileName)
band_data = dest.GetRasterBand(1) # Get the reprojected dem band
ncol_data = dest.RasterXSize
nrow_data = dest.RasterYSize
p_factor = band_data.ReadAsArray(0, 0, ncol_data, nrow_data)
p_factor[p_factor == 0] = np.nan
dst_pfactor_FileName = os.path.join(output_folder,'p-factor_%s.tif' %Var_name)
save_GeoTiff_proy(dest, p_factor, dst_pfactor_FileName, shape, nband=1)
######################## calculate c-factor by using the Landuse map #########################
ws_c = wb_veg['C-factor']
Array_C = np.zeros([ws_c.max_row-1,2])
for j in ['A','C']:
j_number={'A' : 0, 'C' : 1}
for i in range(2,ws_c.max_row+1):
Value = (ws_c['%s%s' %(j,i)].value)
Array_C[i-2, j_number[j]] = Value
c_factor = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1])])
for Class in range(0,len(Array_C)):
c_factor[LU_data==Array_C[Class,0]] = Array_C[Class,1]
c_factor[np.logical_and(c_factor != 3.0, c_factor != 4.0)] = np.nan
LUE_max = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1])])
LUE_max[c_factor == 3] = 2.5
LUE_max[c_factor == 4] = 4.5
LUE_max[LUE_max == 0] = np.nan
dst_FileName = os.path.join(output_folder_temp,'LUE_max_proj.tif')
save_GeoTiff_proy(dest1, LUE_max, dst_FileName, shape_data, nband=1)
dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(dst_FileName, LAI_FileName)
band_data = dest.GetRasterBand(1) # Get the reprojected dem band
ncol_data = dest.RasterXSize
nrow_data = dest.RasterYSize
LUE_max = band_data.ReadAsArray(0, 0, ncol_data, nrow_data)
LUE_max[LUE_max == 0] = np.nan
dst_LUEmax_FileName = os.path.join(output_folder,'LUE_max_%s.tif' %Var_name)
save_GeoTiff_proy(dest, LUE_max, dst_LUEmax_FileName, shape, nband=1)
############################# delete temporary directory ########################
shutil.rmtree(output_folder_temp)
#################################################################################
'''
# Functions
#################################################################################
def Create_Buffer(Data_In):
'''
This function creates a 3D array which is used to apply the moving window
'''
Buffer_area = 7 # A block of 2 times Buffer_area + 1 will be 1 if there is the pixel in the middle is 1
Data_Out=np.empty((len(Data_In),len(Data_In[1])))
Data_Out[:,:] = Data_In
for ypixel in range(0,Buffer_area + 1):
for xpixel in range(1,Buffer_area + 1):
if ypixel==0:
for xpixel in range(1,Buffer_area + 1):
Data_Out[:,0:-xpixel] += Data_In[:,xpixel:]
Data_Out[:,xpixel:] += Data_In[:,:-xpixel]
for ypixel in range(1,Buffer_area + 1):
Data_Out[ypixel:,:] += Data_In[:-ypixel,:]
Data_Out[0:-ypixel,:] += Data_In[ypixel:,:]
else:
Data_Out[0:-xpixel,ypixel:] += Data_In[xpixel:,:-ypixel]
Data_Out[xpixel:,ypixel:] += Data_In[:-xpixel,:-ypixel]
Data_Out[0:-xpixel,0:-ypixel] += Data_In[xpixel:,ypixel:]
Data_Out[xpixel:,0:-ypixel] += Data_In[:-xpixel,ypixel:]
Data_Out[Data_Out>0.1] = 1
Data_Out[Data_Out<=0.1] = 0
return(Data_Out)
#------------------------------------------------------------------------------
def Get_epsg(g):
try:
# Get info of the dataset that is used for transforming
gland_proj = g.GetProjection()
Projection=gland_proj.split('EPSG","')
epsg_to=int((str(Projection[-1]).split(']')[0])[0:-1])
except:
epsg_to=4326
print 'Was not able to get the projection, so WGS84 is assumed'
return(epsg_to)
#------------------------------------------------------------------------------
def gap_filling(data,NoDataValue):
"""
This function fills the no data gaps in a numpy array
Keyword arguments:
dataset -- Array
NoDataValue -- Value that must be filled
"""
# fill the no data values
if NoDataValue is np.nan:
mask = ~(np.isnan(data))
else:
mask = ~(data==NoDataValue)
xx, yy = np.meshgrid(np.arange(data.shape[1]), np.arange(data.shape[0]))
xym = np.vstack( (np.ravel(xx[mask]), np.ravel(yy[mask])) ).T
data0 = np.ravel( data[:,:][mask] )
interp0 = scipy.interpolate.NearestNDInterpolator( xym, data0 )
data_end = interp0(np.ravel(xx), np.ravel(yy)).reshape( xx.shape )
return (data_end)
#------------------------------------------------------------------------------
if __name__ == '__main__':
main() | wateraccounting/SEBAL | PreSEBAL/preSEBAL_Vcrts.py | Python | apache-2.0 | 117,033 |
""" Vault interactions """
from __future__ import print_function
import os
import socket
import logging
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
import hvac
import yaml
from aomi.helpers import normalize_vault_path
from aomi.util import token_file, appid_file, approle_file
from aomi.validation import sanitize_mount
import aomi.error
import aomi.exceptions
LOG = logging.getLogger(__name__)
def is_aws(data):
"""Takes a decent guess as to whether or not we are dealing with
an AWS secret blob"""
return 'access_key' in data and 'secret_key' in data
def grok_seconds(lease):
"""Ensures that we are returning just seconds"""
if lease.endswith('s'):
return int(lease[0:-1])
elif lease.endswith('m'):
return int(lease[0:-1]) * 60
elif lease.endswith('h'):
return int(lease[0:-1]) * 3600
return None
def renew_secret(client, creds, opt):
"""Renews a secret. This will occur unless the user has
specified on the command line that it is not neccesary"""
if opt.reuse_token:
return
seconds = grok_seconds(opt.lease)
if not seconds:
raise aomi.exceptions.AomiCommand("invalid lease %s" % opt.lease)
renew = None
if client.version:
v_bits = client.version.split('.')
if int(v_bits[0]) == 0 and \
int(v_bits[1]) <= 8 and \
int(v_bits[2]) <= 0:
r_obj = {
'increment': seconds
}
r_path = "v1/sys/renew/{0}".format(creds['lease_id'])
# Pending discussion on https://github.com/ianunruh/hvac/issues/148
# pylint: disable=protected-access
renew = client._post(r_path, json=r_obj).json()
if not renew:
renew = client.renew_secret(creds['lease_id'], seconds)
# sometimes it takes a bit for vault to respond
# if we are within 5s then we are fine
if not renew or (seconds - renew['lease_duration'] >= 5):
client.revoke_self_token()
e_msg = 'Unable to renew with desired lease'
raise aomi.exceptions.VaultConstraint(e_msg)
def approle_token(vault_client, role_id, secret_id):
"""Returns a vault token based on the role and seret id"""
resp = vault_client.auth_approle(role_id, secret_id)
if 'auth' in resp and 'client_token' in resp['auth']:
return resp['auth']['client_token']
else:
raise aomi.exceptions.AomiCredentials('invalid approle')
def app_token(vault_client, app_id, user_id):
"""Returns a vault token based on the app and user id."""
resp = vault_client.auth_app_id(app_id, user_id)
if 'auth' in resp and 'client_token' in resp['auth']:
return resp['auth']['client_token']
else:
raise aomi.exceptions.AomiCredentials('invalid apptoken')
def token_meta(opt):
"""Generates metadata for a token"""
meta = {
'via': 'aomi',
'operation': opt.operation,
'hostname': socket.gethostname()
}
if 'USER' in os.environ:
meta['unix_user'] = os.environ['USER']
if opt.metadata:
meta_bits = opt.metadata.split(',')
for meta_bit in meta_bits:
key, value = meta_bit.split('=')
if key not in meta:
meta[key] = value
for key, value in meta.items():
LOG.debug("Token metadata %s %s", key, value)
return meta
def get_backend(backend, path, backends):
"""Returns mountpoint details for a backend"""
m_norm = normalize_vault_path(path)
for mount_name, values in backends.items():
b_norm = normalize_vault_path(mount_name)
if (m_norm == b_norm) and values['type'] == backend:
return values
return None
def is_mounted(backend, path, backends):
"""Determine whether a backend of a certain type is mounted"""
return get_backend(backend, path, backends) is not None
def wrap_hvac(msg):
"""Error catching Vault API wrapper
This decorator wraps API interactions with Vault. It will
catch and return appropriate error output on common
problems. Do we even need this now that we extend the
hvac class?"""
# pylint: disable=missing-docstring
def wrap_call(func):
# pylint: disable=missing-docstring
def func_wrapper(self, vault_client):
try:
return func(self, vault_client)
except (hvac.exceptions.InvalidRequest,
hvac.exceptions.Forbidden) as vault_exception:
if vault_exception.errors[0] == 'permission denied':
emsg = "Permission denied %s from %s" % (msg, self.path)
raise aomi.exceptions.AomiCredentials(emsg)
else:
raise
return func_wrapper
return wrap_call
class Client(hvac.Client):
"""Our Vault Client Wrapper
This class will pass the existing hvac bits through. When interacting
with cubbyhole paths, it will use the non operational token in order
to preserve access."""
# dat hvac tho
# pylint: disable=too-many-arguments
def __init__(self, _url=None, token=None, _cert=None, _verify=True,
_timeout=30, _proxies=None, _allow_redirects=True,
_session=None):
self.version = None
self.vault_addr = os.environ.get('VAULT_ADDR')
if not self.vault_addr:
raise aomi.exceptions.AomiError('VAULT_ADDR is undefined or empty')
if not self.vault_addr.startswith("http"):
raise aomi.exceptions.AomiError('VAULT_ADDR must be a URL')
ssl_verify = True
if 'VAULT_SKIP_VERIFY' in os.environ:
if os.environ['VAULT_SKIP_VERIFY'] == '1':
import urllib3
urllib3.disable_warnings()
ssl_verify = False
self.initial_token = None
self.operational_token = None
session = requests.Session()
retries = Retry(total=5,
backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retries)
session.mount('https://', adapter)
session.mount('http://', adapter)
super(Client, self).__init__(url=self.vault_addr,
verify=ssl_verify,
session=session)
def server_version(self):
"""Attempts to determine the version of Vault that a
server is running. Some actions will change on older
Vault deployments."""
health_url = "%s/v1/sys/health" % self.vault_addr
resp = self.session.request('get', health_url, **self._kwargs)
if resp.status_code == 200 or resp.status_code == 429:
blob = resp.json()
if 'version' in blob:
return blob['version']
else:
raise aomi.exceptions.VaultProblem('Health check failed')
return None
def connect(self, opt):
"""This sets up the tokens we expect to see in a way
that hvac also expects."""
if not self._kwargs['verify']:
LOG.warning('Skipping SSL Validation!')
self.version = self.server_version()
self.token = self.init_token()
my_token = self.lookup_token()
if not my_token or 'data' not in my_token:
raise aomi.exceptions.AomiCredentials('initial token')
display_name = my_token['data']['display_name']
vsn_string = ""
if self.version:
vsn_string = ", v%s" % self.version
else:
LOG.warning("Unable to deterine Vault version. Not all "
"functionality is supported")
LOG.info("Connected to %s as %s%s",
self._url,
display_name,
vsn_string)
if opt.reuse_token:
LOG.debug("Not creating operational token")
self.initial_token = self.token
self.operational_token = self.token
else:
self.initial_token = self.token
self.operational_token = self.op_token(display_name, opt)
if not self.is_authenticated():
raise aomi.exceptions.AomiCredentials('operational token')
self.token = self.operational_token
return self
def init_token(self):
"""Generate our first token based on workstation configuration"""
app_filename = appid_file()
token_filename = token_file()
approle_filename = approle_file()
token = None
if 'VAULT_ROLE_ID' in os.environ and \
'VAULT_SECRET_ID' in os.environ and \
os.environ['VAULT_ROLE_ID'] and os.environ['VAULT_SECRET_ID']:
token = approle_token(self,
os.environ['VAULT_ROLE_ID'],
os.environ['VAULT_SECRET_ID'])
LOG.debug("Token derived from VAULT_ROLE_ID and VAULT_SECRET_ID")
elif 'VAULT_TOKEN' in os.environ and os.environ['VAULT_TOKEN']:
LOG.debug('Token derived from VAULT_TOKEN environment variable')
token = os.environ['VAULT_TOKEN'].strip()
elif 'VAULT_USER_ID' in os.environ and \
'VAULT_APP_ID' in os.environ and \
os.environ['VAULT_USER_ID'] and os.environ['VAULT_APP_ID']:
LOG.debug("Token derived from VAULT_APP_ID and VAULT_USER_ID")
token = app_token(self,
os.environ['VAULT_APP_ID'].strip(),
os.environ['VAULT_USER_ID'].strip())
elif approle_filename:
creds = yaml.safe_load(open(approle_filename).read().strip())
if 'role_id' in creds and 'secret_id' in creds:
LOG.debug("Token derived from approle file")
token = approle_token(self,
creds['role_id'],
creds['secret_id'])
elif token_filename:
LOG.debug("Token derived from %s", token_filename)
try:
token = open(token_filename, 'r').read().strip()
except IOError as os_exception:
if os_exception.errno == 21:
raise aomi.exceptions.AomiFile('Bad Vault token file')
raise
elif app_filename:
token = yaml.safe_load(open(app_filename).read().strip())
if 'app_id' in token and 'user_id' in token:
LOG.debug("Token derived from %s", app_filename)
token = app_token(self,
token['app_id'],
token['user_id'])
else:
raise aomi.exceptions.AomiCredentials('unknown method')
return token
def op_token(self, display_name, opt):
"""Return a properly annotated token for our use. This
token will be revoked at the end of the session. The token
will have some decent amounts of metadata tho."""
args = {
'lease': opt.lease,
'display_name': display_name,
'meta': token_meta(opt)
}
try:
token = self.create_token(**args)
except (hvac.exceptions.InvalidRequest,
hvac.exceptions.Forbidden) as vault_exception:
if vault_exception.errors[0] == 'permission denied':
emsg = "Permission denied creating operational token"
raise aomi.exceptions.AomiCredentials(emsg)
else:
raise
LOG.debug("Created operational token with lease of %s", opt.lease)
return token['auth']['client_token']
def read(self, path, wrap_ttl=None):
"""Wrap the hvac read call, using the right token for
cubbyhole interactions."""
path = sanitize_mount(path)
if path.startswith('cubbyhole'):
self.token = self.initial_token
val = super(Client, self).read(path, wrap_ttl)
self.token = self.operational_token
return val
return super(Client, self).read(path, wrap_ttl)
def write(self, path, wrap_ttl=None, **kwargs):
"""Wrap the hvac write call, using the right token for
cubbyhole interactions."""
path = sanitize_mount(path)
val = None
if path.startswith('cubbyhole'):
self.token = self.initial_token
val = super(Client, self).write(path, wrap_ttl=wrap_ttl, **kwargs)
self.token = self.operational_token
else:
super(Client, self).write(path, wrap_ttl=wrap_ttl, **kwargs)
return val
def delete(self, path):
"""Wrap the hvac delete call, using the right token for
cubbyhole interactions."""
path = sanitize_mount(path)
val = None
if path.startswith('cubbyhole'):
self.token = self.initial_token
val = super(Client, self).delete(path)
self.token = self.operational_token
else:
super(Client, self).delete(path)
return val
| otakup0pe/aomi | aomi/vault.py | Python | mit | 13,090 |
"""Align multiple structures with TMalign."""
from __future__ import division
import itertools
import logging
import math
import os
import subprocess
import tempfile
from cStringIO import StringIO
import networkx
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from biofrills import alnutils
def align_structs(pdb_fnames, seed_fnames=None):
"""Align multiple PDB structures using TM-align.
Returns a list of aligned SeqRecords.
"""
# 1. Align all-v-all structure pairs with TM-align
allpairs = []
for idx, ref_pdbfn in enumerate(pdb_fnames):
assert ' ' not in ref_pdbfn
for eqv_pdbfn in pdb_fnames[idx+1:]:
assert eqv_pdbfn != ref_pdbfn
logging.info("Aligning %s to %s", eqv_pdbfn, ref_pdbfn)
try:
tm_output = subprocess.check_output(['TMalign',
ref_pdbfn, eqv_pdbfn])
except OSError:
logging.warning("Failed command: TMalign %s %s",
ref_pdbfn, eqv_pdbfn)
for fname in (ref_pdbfn, eqv_pdbfn):
if not os.path.isfile(fname):
logging.warning("Missing file: %s", fname)
raise
except subprocess.CalledProcessError, exc:
raise RuntimeError("TMalign failed (returned %s):\n%s"
% (exc.returncode, exc.output))
tm_seqpair = read_tmalign_as_seqrec_pair(tm_output,
ref_pdbfn, eqv_pdbfn)
allpairs.append(tm_seqpair)
# In case of 2 structs, no need to combine alignments -- we're done
if len(allpairs) == 1:
recs = allpairs[0][:2]
return alnutils.remove_empty_cols(recs)
# 2. Resolve MST pairs & write seed tempfiles
tmp_seed_fnames = []
for seedpair in mst_pairs(allpairs):
# fd, seedfn = tempfile.mkstemp(text=True)
# SeqIO.write(seedpair, seedfn, 'fasta')
# SeqIO.write(seedpair, os.fdopen(fd), 'fasta')
with tempfile.NamedTemporaryFile('w+', delete=False) as handle:
SeqIO.write(seedpair, handle, 'fasta')
tmp_seed_fnames.append(handle.name)
# 3. Use MAFFT to combine TMalign'd pairs into a multiple alignment;
seq_fd, seq_fname = tempfile.mkstemp(text=True)
# Create a blank file to appease MAFFT
os.write(seq_fd, '')
os.close(seq_fd)
mafft_output = subprocess.check_output(['mafft',
'--quiet', '--amino', '--localpair',
'--maxiterate', '1000']
+ list(itertools.chain(*[('--seed', sfn)
for sfn in (seed_fnames or []) + tmp_seed_fnames]))
+ [seq_fname])
# Clean up
os.remove(seq_fname)
for sfn in tmp_seed_fnames:
os.remove(sfn)
# 4. Emit the aligned sequences
recs = SeqIO.parse(StringIO(mafft_output), 'fasta')
recs = clean_and_dedupe_seqs(recs)
recs = alnutils.remove_empty_cols(recs)
recs = purge_seqs(recs)
return list(recs)
def read_tmalign_as_seqrec_pair(tm_output, ref_id, eqv_id):
"""Create a pair of SeqRecords from TMalign output."""
lines = tm_output.splitlines()
# Extract the TM-score (measure of structure similarity)
# Take the mean of the (two) given TM-scores -- not sure which is reference
tmscores = []
for line in lines:
if line.startswith('TM-score'):
# TMalign v. 2012/05/07 or earlier
tmscores.append(float(line.split(None, 2)[1]))
elif 'TM-score=' in line:
# TMalign v. 2013/05/11 or so
tokens = line.split()
for token in tokens:
if token.startswith('TM-score='):
_key, _val = token.split('=')
tmscores.append(float(_val.rstrip(',')))
break
tmscore = math.fsum(tmscores) / len(tmscores)
# Extract the sequence alignment
lastlines = lines[-5:]
assert lastlines[0].startswith('(":"') # (":" denotes the residues pairs
assert not lastlines[-1].strip()
refseq, eqvseq = lastlines[1].strip(), lastlines[3].strip()
return (SeqRecord(Seq(refseq), id=ref_id,
description="TMalign TM-score=%f" % tmscore),
SeqRecord(Seq(eqvseq), id=eqv_id,
description="TMalign TM-score=%f" % tmscore),
tmscore)
def mst_pairs(pairs):
"""Given all pairwise distances, determine the minimal spanning subset.
Convert pairwise distances to an undirected graph, determine the
minumum spanning tree, and emit the minimal list of edges to connect all
nodes.
Input: iterable of (SeqRecord, SeqRecord, distance)
Output: iterable of (SeqRecord, SeqRecord)
"""
G = networkx.Graph()
for left, right, score in pairs:
G.add_edge(left, right, weight=1.0/score)
mst = networkx.minimum_spanning_edges(G, data=False)
return list(mst)
def tmscore_from_description(text):
for token in text.split():
if token.startswith('TM-score'):
return float(token.split('=', 1)[1])
def clean_and_dedupe_seqs(records, best_score=False):
"""Remove the _seed_ prefix and omit duplicated records (by ID)."""
if best_score:
seen = {}
else:
seen = set()
for record in records:
# Remove the _seed_ prefix from each sequence ID
if record.id.startswith('_seed_'):
record.id = record.id[len('_seed_'):]
record.name = record.id
# Check for duplicates.
if best_score:
# If a previously seen PDB was aligned better (per the TM-score),
# defer to that one
tmscore = tmscore_from_description(record.description)
if record.id in seen and seen[record.id] >= tmscore:
# This PDB was aligned better previously; skip
continue
seen[record.id] = tmscore
else:
# Keep a duplicate sequence if it was aligned differently
ident = (record.id, str(record.seq))
if ident in seen:
continue
seen.add(ident)
yield record
def purge_seqs(records):
"""Drop duplicated records by identical sequence."""
seen = set()
for rec in records:
seq = str(rec.seq)
if seq not in seen:
yield rec
seen.add(seq)
| etal/fammer | fammerlib/tmalign.py | Python | bsd-2-clause | 6,484 |
# -*- coding: utf-8 -*-
class AttributeDict(dict): # pylint: disable=too-many-instance-attributes
"""
This class internally stores values in a dictionary, but exposes
the keys also as attributes, i.e. asking for attrdict.key
will return the value of attrdict['key'] and so on.
Raises an AttributeError if the key does not exist, when called as an attribute,
while the usual KeyError if the key does not exist and the dictionary syntax is
used.
"""
def __init__(self, dictionary=None):
"""Recursively turn the `dict` and all its nested dictionaries into `AttributeDict` instance."""
super().__init__()
if dictionary is None:
dictionary = {}
for key, value in dictionary.items():
if isinstance(value, Mapping):
self[key] = AttributeDict(value)
else:
self[key] = value
def __repr__(self):
"""Representation of the object."""
return f'{self.__class__.__name__}({dict.__repr__(self)})'
def __getattr__(self, attr):
"""Read a key as an attribute.
:raises AttributeError: if the attribute does not correspond to an existing key.
"""
try:
return self[attr]
except KeyError:
errmsg = f"'{self.__class__.__name__}' object has no attribute '{attr}'"
raise AttributeError(errmsg)
def __setattr__(self, attr, value):
"""Set a key as an attribute."""
try:
self[attr] = value
except KeyError:
raise AttributeError(
f"AttributeError: '{attr}' is not a valid attribute of the object '{self.__class__.__name__}'")
def __delattr__(self, attr):
"""Delete a key as an attribute.
:raises AttributeError: if the attribute does not correspond to an existing key.
"""
try:
del self[attr]
except KeyError:
errmsg = f"'{self.__class__.__name__}' object has no attribute '{attr}'"
raise AttributeError(errmsg)
def __deepcopy__(self, memo=None):
"""Deep copy."""
from copy import deepcopy
if memo is None:
memo = {}
retval = deepcopy(dict(self))
return self.__class__(retval)
def __getstate__(self):
"""Needed for pickling this class."""
return self.__dict__.copy()
def __setstate__(self, dictionary):
"""Needed for pickling this class."""
self.__dict__.update(dictionary)
def __dir__(self):
return self.keys()
| lorisercole/thermocepstrum | sportran/utils/attributedict.py | Python | gpl-3.0 | 2,592 |
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lint checks used by all the linters."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import os
import re
import python_utils
from . import js_ts_linter
from . import warranted_angular_security_bypasses
from .. import common
from .. import concurrent_task_utils
EXCLUDED_PATHS = (
'third_party/*', 'build/*', '.git/*', '*.pyc', 'CHANGELOG',
'integrations/*', 'integrations_dev/*', '*.svg', '*.gif', '*.png',
'*.webp', '*.zip', '*.ico', '*.jpg', '*.min.js', 'backend_prod_files/*',
'assets/scripts/*', 'core/domain/proto/*.py', 'core/tests/data/*',
'core/tests/build_sources/*', '*.mp3', '*.mp4', 'node_modules/*',
'typings/*', 'local_compiled_js/*', 'webpack_bundles/*',
'core/tests/services_sources/*', 'core/tests/release_sources/tmp_unzip.zip',
'scripts/linters/test_files/*',
'core/tests/release_sources/tmp_unzip.tar.gz',
'core/templates/combined-tests.spec.ts',
'core/templates/css/oppia-material.css',
'%s/*' % js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH)
GENERATED_FILE_PATHS = (
'extensions/interactions/LogicProof/static/js/generatedDefaultData.ts',
'extensions/interactions/LogicProof/static/js/generatedParser.ts',
'core/templates/expressions/parser.js')
CONFIG_FILE_PATHS = (
'core/tests/.browserstack.env.example',
'core/tests/protractor.conf.js',
'core/tests/karma.conf.ts',
'core/templates/mathjaxConfig.ts',
'assets/constants.ts',
'assets/rich_text_components_definitions.ts',
'webpack.config.ts',
'webpack.dev.config.ts',
'webpack.prod.config.ts')
REQUIRED_STRINGS_CONSTANTS = {
'DEV_MODE: true': {
'message': 'Please set the DEV_MODE variable in constants.ts'
'to true before committing.',
'excluded_files': ()
}
}
BAD_PATTERNS = {
'\t': {
'message': 'Please use spaces instead of tabs.',
'excluded_files': (),
'excluded_dirs': (
'assets/i18n/', 'core/tests/build_sources/assets/')},
'\r': {
'message': 'Please make sure all files only have LF endings (no CRLF).',
'excluded_files': (),
'excluded_dirs': ()},
'<<<<<<<': {
'message': 'Please fully resolve existing merge conflicts.',
'excluded_files': (),
'excluded_dirs': ()},
'>>>>>>>': {
'message': 'Please fully resolve existing merge conflicts.',
'excluded_files': (),
'excluded_dirs': ()},
'glyphicon': {
'message': 'Please use equivalent material-icons '
'instead of glyphicons.',
'excluded_files': (),
'excluded_dirs': ()}
}
BAD_PATTERNS_REGEXP = [
{
'regexp': re.compile(r'TODO[^\(]*[^\)][^:]*[^A-Z]+[^\w]*$'),
'message': 'Please assign TODO comments to a user '
'in the format TODO(username): XXX. ',
'excluded_files': (),
'excluded_dirs': ()
}
]
BAD_PATTERNS_JS_AND_TS_REGEXP = [
{
'regexp': re.compile(r'\b(browser.explore)\('),
'message': 'In tests, please do not use browser.explore().',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(browser.pause)\('),
'message': 'In tests, please do not use browser.pause().',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(browser.sleep)\('),
'message': 'In tests, please do not use browser.sleep().',
'excluded_files': (
# TODO(#7622): Remove the file from the excluded list. Remove the
# TODO in core/tests/protractor_desktop/embedding.js pointing to the
# same issue. The following was placed due to a necessary sleep as
# a temporary measure to keep the embedding tests from failing.
'core/tests/protractor_desktop/embedding.js',
),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(browser.waitForAngular)\('),
'message': 'In tests, please do not use browser.waitForAngular().',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'bypass'),
'message': 'The use of the word "bypass" is not allowed, ' +
'particularly with regards to bypassSecurityTrustHTML() ' +
'and similar functions in Angular.',
'excluded_files': (
warranted_angular_security_bypasses
.EXCLUDED_BYPASS_SECURITY_TRUST_FILES),
'excluded_dirs': (
warranted_angular_security_bypasses
.EXCLUDED_BYPASS_SECURITY_TRUST_DIRECTORIES)
},
{
'regexp': re.compile(r'\b(ddescribe|fdescribe)\('),
'message': 'In tests, please use \'describe\' instead of \'ddescribe\''
'or \'fdescribe\'',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(iit|fit)\('),
'message': 'In tests, please use \'it\' instead of \'iit\' or \'fit\'',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(beforeEach\(inject\(function)\('),
'message': 'In tests, please use \'angular.mock.inject\' instead of '
'\'inject\'',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'templateUrl: \''),
'message': 'The directives must be directly referenced.',
'excluded_files': (
'core/templates/pages/exploration-player-page/'
'FeedbackPopupDirective.js',
'.component.ts'
),
'excluded_dirs': (
'extensions/answer_summarizers/',
'extensions/classifiers/',
'extensions/dependencies/',
'extensions/value_generators/',
'extensions/visualizations/')
},
{
'regexp': re.compile(r'toThrow[(]'),
'message': 'Please use \'toThrowError\' instead of '
'\'toThrow\'',
'excluded_files': (
# Note to developers: In the excluded_files below,
# we use custom errors which cannot be caught by regex.
# The Logic Proof interaction which uses these custom errors
# will be deprecated soon (see #9198).
'extensions/interactions/LogicProof/static/js/student.spec.ts',
'extensions/interactions/LogicProof/static/js/complete.spec.ts',
'extensions/interactions/LogicProof/static/js/teacher.spec.ts'),
'excluded_dirs': ()
},
{
'regexp': re.compile(
r'(?!catch\s(\n|.)*throw\s\w+;\n.*})'
r'throw\s\b(\bError|\bTypeError|\bRangeError'
r'\bSyntaxError|\bDimensionError)\('),
'message': 'Please use \'throw new\' instead of \'throw\'',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(
r'(?!catch\s(\n|.)*throw\s\w+;\n.*})throw\s\'.*\';'),
'message': 'Please use '
'\'throw new Error\' instead of \'throw\'',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\$parent'),
'message': 'Please do not access parent properties ' +
'using $parent. Use the scope object' +
'for this purpose.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'require\(.*\.\..*\);'),
'message': 'Please, don\'t use relative imports in require().',
'excluded_files': (),
'excluded_dirs': ('core/tests/',)
},
{
'regexp': re.compile(r'innerHTML'),
'message': 'Please do not use innerHTML property.',
'excluded_files': (
'core/templates/Polyfills.ts',
'core/templates/filters/translate.pipe.spec.ts',
'core/templates/components/ck-editor-helpers/' +
'ck-editor-copy-content-service.spec.ts',
'core/templates/tests/unit-test-utils.ts'),
'excluded_dirs': ('core/tests/',)
},
{
'regexp': re.compile(
r'eslint-(disable|enable)(-next-line)? camelcase'),
'message': (
'Please do not use eslint disable for camelcase. '
'If you are using this statement to define properties '
'in an interface for a backend dict. Wrap the property '
'name in single quotes instead.'),
'excluded_files': (
'typings/guppy-defs-b5055b963fdbea5c6c1e92dbf58fdaf3ea0cd8ba.d.ts',
'core/templates/services/UpgradedServices.ts'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'no-explicit-any'),
'message': (
'Please do not define "any" types. You can refer '
'https://github.com/oppia/oppia/wiki/Guide-on-defining-types '
'if you\'re having trouble declaring types.'),
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\$broadcast'),
'message': (
'Please do not use $broadcast/$on for propagating events. '
'Use @Input/@Output instead.'),
'excluded_files': (
'core/templates/pages/exploration-editor-page/translation-tab/'
'audio-translation-bar/audio-translation-bar.directive.spec.ts',
'core/templates/pages/library-page/search-bar/'
'search-bar.component.spec.ts'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'import \{.*\} from \'lodash\''),
'message': (
'Please do not use "import { someFunction } from \'lodash\'". '
'Use "import someFunction from \'lodash/someFunction\'" instead.'),
'excluded_files': (),
'excluded_dirs': ()
}
]
MANDATORY_PATTERNS_REGEXP = [
{
'regexp': re.compile(
r'Copyright \d{4} The Oppia Authors\. All Rights Reserved\.'),
'message': 'Please ensure this file should contain a proper '
'copyright notice.',
'included_types': ('.py', '.js', '.sh', '.ts'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS + (
'__init__.py', ),
'excluded_dirs': EXCLUDED_PATHS
},
{
'regexp': re.compile('from __future__ import unicode_literals'),
'message': 'Please ensure this file should contain unicode_literals '
'future import.',
'included_types': ('.py'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS + (
'__init__.py',),
'excluded_dirs': EXCLUDED_PATHS
}
]
MANDATORY_PATTERNS_JS_REGEXP = [
{
'regexp': re.compile(r'^\s\*\s@fileoverview\s[a-zA-Z0-9_]+'),
'message': 'Please ensure this file should contain a file '
'overview i.e. a short description of the file.',
'included_types': ('.js', '.ts'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS,
'excluded_dirs': EXCLUDED_PATHS
}
]
BAD_LINE_PATTERNS_HTML_REGEXP = [
{
'regexp': re.compile(r'text\/ng-template'),
'message': 'The directives must be directly referenced.',
'excluded_files': (),
'excluded_dirs': (
'extensions/answer_summarizers/',
'extensions/classifiers/',
'extensions/objects/',
'extensions/value_generators/')
},
{
'regexp': re.compile(r'[ \t]+$'),
'message': 'There should not be any trailing whitespaces.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\$parent'),
'message': 'Please do not access parent properties ' +
'using $parent. Use the scope object ' +
'for this purpose.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\s+style\s*=\s*'),
'message': 'Please do not use inline styling.',
'excluded_files': (),
'excluded_dirs': ()
}
]
BAD_PATTERNS_PYTHON_REGEXP = [
{
'regexp': re.compile(r'__author__'),
'message': 'Please remove author tags from this file.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'datetime.datetime.now\(\)'),
'message': 'Please use datetime.datetime.utcnow() instead of '
'datetime.datetime.now().',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wprint\('),
'message': 'Please do not use print statement.',
'excluded_files': (
'core/tests/test_utils.py',
'core/tests/performance_framework/perf_domain.py'),
'excluded_dirs': ('scripts/',)
},
{
'regexp': re.compile(r'\sprint\('),
'message': 'Please use python_utils.PRINT().',
'excluded_files': ('python_utils.py',),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'# pylint:\s*disable=[A-Z][0-9]{4}'),
'message': 'Please remove pylint exclusion if it is unnecessary, or '
'make it human readable with a sentence instead of an id. '
'The id-to-message list can be seen '
'here->http://pylint-messages.wikidot.com/all-codes',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'self.assertEquals\('),
'message': 'Please do not use self.assertEquals method. ' +
'This method has been deprecated. Instead use ' +
'self.assertEqual method.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'with open\(|= open\('),
'message': 'Please use python_utils.open_file() instead of open().',
'excluded_files': ('python_utils.py',),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'StringIO'),
'message': 'Please use python_utils.string_io() instead of ' +
'import StringIO.',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib\..*quote\('),
'message': 'Please use python_utils.url_quote().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib\..*unquote_plus\('),
'message': 'Please use python_utils.url_unquote_plus().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib\..*urlencode\('),
'message': 'Please use python_utils.url_encode().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib\..*urlretrieve\('),
'message': 'Please use python_utils.url_retrieve().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib(2)?\..*urlopen\('),
'message': 'Please use python_utils.url_open().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urlsplit'),
'message': 'Please use python_utils.url_split().',
'excluded_files': ('python_utils.py',),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urlparse'),
'message': 'Please use python_utils.url_parse().',
'excluded_files': ('python_utils.py',),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urlunsplit'),
'message': 'Please use python_utils.url_unsplit().',
'excluded_files': ('python_utils.py',),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'parse_qs'),
'message': 'Please use python_utils.parse_query_string().',
'excluded_files': ('python_utils.py',),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wunquote\('),
'message': 'Please use python_utils.urllib_unquote().',
'excluded_files': ('python_utils.py',),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urljoin'),
'message': 'Please use python_utils.url_join().',
'excluded_files': ('python_utils.py',),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib(2)?\..*Request\('),
'message': 'Please use python_utils.url_request().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'[^.|\w]input\('),
'message': 'Please use python_utils.INPUT.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'[^.|\w|\s]map\('),
'message': 'Please use python_utils.MAP.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wnext\('),
'message': 'Please use python_utils.NEXT.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'object\):'),
'message': 'Please use python_utils.OBJECT.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wrange\('),
'message': 'Please use python_utils.RANGE.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wround\('),
'message': 'Please use python_utils.ROUND.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wstr\('),
'message': (
'Please try to use python_utils.convert_to_bytes() for the strings '
'used in webapp2\'s built-in methods or for strings used directly '
'in NDB datastore models. If you need to cast ints/floats to '
'strings, please use python_utils.UNICODE() instead.'),
'excluded_files': ('python_utils.py',),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wzip\('),
'message': 'Please use python_utils.ZIP.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'basestring'),
'message': 'Please use python_utils.BASESTRING.',
'excluded_files': ('python_utils.py',),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'__metaclass__'),
'message': 'Please use python_utils.with_metaclass().',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'iteritems'),
'message': 'Please use items() instead.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'itervalues'),
'message': 'Please use values() instead.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'iterkeys'),
'message': 'Please use keys() instead.',
'excluded_files': (),
'excluded_dirs': ()
}
]
BAD_PATTERNS_MAP = {
'.js': BAD_PATTERNS_JS_AND_TS_REGEXP,
'.ts': BAD_PATTERNS_JS_AND_TS_REGEXP,
'.html': BAD_LINE_PATTERNS_HTML_REGEXP,
'.py': BAD_PATTERNS_PYTHON_REGEXP
}
def is_filepath_excluded_for_bad_patterns_check(pattern, filepath):
"""Checks if file is excluded from the bad patterns check.
Args:
pattern: str. The pattern to be checked against.
filepath: str. Path of the file.
Returns:
bool. Whether to exclude the given file from this
particular pattern check.
"""
return (any(
filepath.startswith(bad_pattern)
for bad_pattern in BAD_PATTERNS[pattern]['excluded_dirs'])
or filepath in BAD_PATTERNS[pattern]['excluded_files'])
def check_bad_pattern_in_file(filepath, file_content, pattern):
"""Detects whether the given pattern is present in the file.
Args:
filepath: str. Path of the file.
file_content: str. Contents of the file.
pattern: dict. (regexp(regex pattern) : Object containing details for
the pattern to be checked. Pattern to match:
message: str. Message to show if pattern matches.
excluded_files: tuple(str). Files to be excluded from matching.
excluded_dirs: tuple(str). Directories to be excluded from
matching).
Returns:
tuple(bool, list(str)). A 2-tuple whose first element is a bool
which set to True if there is bad pattern found else False, whose second
element is a list of failed messages.
"""
error_messages = []
failed = False
regexp = pattern['regexp']
if not (any(
filepath.startswith(excluded_dir)
for excluded_dir in pattern['excluded_dirs'])
or any(
filepath.endswith(excluded_file)
for excluded_file in pattern['excluded_files'])):
bad_pattern_count = 0
for line_num, line in enumerate(file_content, 1):
if line.endswith('\n'):
stripped_line = line[:-1]
else:
stripped_line = line
if stripped_line.endswith('disable-bad-pattern-check'):
continue
if regexp.search(stripped_line):
error_message = ('%s --> Line %s: %s' % (
filepath, line_num, pattern['message']))
error_messages.append(error_message)
bad_pattern_count += 1
if bad_pattern_count:
failed = True
return failed, error_messages
return failed, error_messages
def check_file_type_specific_bad_pattern(filepath, content):
"""Check the file content based on the file's extension.
Args:
filepath: str. Path of the file.
content: str. Contents of the file.
Returns:
bool. True if there is bad pattern else false.
total_error_count: int. The number of errors.
"""
error_messages = []
failed = False
_, extension = os.path.splitext(filepath)
pattern = BAD_PATTERNS_MAP.get(extension)
total_error_count = 0
if pattern:
for regexp in pattern:
failed, error_message = check_bad_pattern_in_file(
filepath, content, regexp)
error_messages.extend(error_message)
if failed:
total_error_count += 1
if total_error_count:
failed = True
return failed, total_error_count, error_messages
class GeneralPurposeLinter(python_utils.OBJECT):
"""Manages all the common linting functions. As an abstract base class, this
is not intended to be used directly.
"""
def __init__(self, files_to_lint, file_cache):
"""Constructs a GeneralPurposeLinter object.
Args:
files_to_lint: list(str). A list of filepaths to lint.
file_cache: object(FileCache). Provides thread-safe access to cached
file content.
"""
# Set path for node.
# The path for node is set explicitly, since otherwise the lint
# tests fail on CircleCI due to the TypeScript files not being
# compilable.
os.environ['PATH'] = '%s/bin:' % common.NODE_PATH + os.environ['PATH']
self.files_to_lint = files_to_lint
self.file_cache = file_cache
@property
def all_filepaths(self):
"""Returns all file paths."""
return self.files_to_lint
def _check_for_mandatory_pattern_in_file(
self, pattern_list, filepath, failed):
"""Checks for a given mandatory pattern in a file.
Args:
pattern_list: list(dict). The list of the mandatory patterns list to
be checked for in the file.
filepath: str. The path to the file to be linted.
failed: bool. Status of failure of the check.
Returns:
bool. The failure status of the check.
"""
# This boolean list keeps track of the regex matches
# found in the file.
pattern_found_list = []
error_messages = []
file_content = self.file_cache.readlines(filepath)
for index, regexp_to_check in enumerate(
pattern_list):
if (any([filepath.endswith(
allowed_type) for allowed_type in (
regexp_to_check['included_types'])]) and (
not any([
filepath.endswith(
pattern) for pattern in (
regexp_to_check[
'excluded_files'] +
regexp_to_check[
'excluded_dirs'])]))):
pattern_found_list.append(index)
for line in file_content:
if regexp_to_check['regexp'].search(line):
pattern_found_list.pop()
break
if pattern_found_list:
failed = True
for pattern_found in pattern_found_list:
error_message = ('%s --> %s' % (
filepath,
pattern_list[pattern_found]['message']))
error_messages.append(error_message)
return failed, error_messages
def check_mandatory_patterns(self):
"""This function checks that all files contain the mandatory
patterns.
"""
name = 'Mandatory pattern'
error_messages = []
failed = False
sets_of_patterns_to_match = [
MANDATORY_PATTERNS_REGEXP, MANDATORY_PATTERNS_JS_REGEXP]
for filepath in self.all_filepaths:
for pattern_list in sets_of_patterns_to_match:
failed, mandatory_error_messages = (
self._check_for_mandatory_pattern_in_file(
pattern_list, filepath, failed))
error_messages.extend(mandatory_error_messages)
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def check_bad_patterns(self):
"""This function is used for detecting bad patterns."""
name = 'Bad pattern'
total_files_checked = 0
total_error_count = 0
error_messages = []
all_filepaths = [
filepath for filepath in self.all_filepaths if not (
filepath.endswith('general_purpose_linter.py') or (
filepath.endswith('general_purpose_linter_test.py')))]
failed = False
for filepath in all_filepaths:
file_content = self.file_cache.readlines(filepath)
total_files_checked += 1
for pattern in BAD_PATTERNS:
if is_filepath_excluded_for_bad_patterns_check(
pattern, filepath):
continue
for line_num, line in enumerate(file_content):
if pattern in line:
failed = True
error_message = ('%s --> Line %s: %s' % (
filepath, line_num + 1,
BAD_PATTERNS[pattern]['message']))
error_messages.append(error_message)
total_error_count += 1
for regexp in BAD_PATTERNS_REGEXP:
bad_pattern_check_failed, bad_pattern_error_messages = (
check_bad_pattern_in_file(
filepath, file_content, regexp))
if bad_pattern_check_failed:
error_messages.extend(bad_pattern_error_messages)
total_error_count += 1
(
file_type_specific_bad_pattern_failed,
temp_count, bad_pattern_error_messages) = (
check_file_type_specific_bad_pattern(
filepath, file_content))
failed = (
failed or file_type_specific_bad_pattern_failed or
bad_pattern_check_failed)
total_error_count += temp_count
error_messages.extend(bad_pattern_error_messages)
if filepath == 'constants.ts':
for pattern in REQUIRED_STRINGS_CONSTANTS:
if pattern not in file_content:
failed = True
error_message = ('%s --> %s' % (
filepath,
REQUIRED_STRINGS_CONSTANTS[pattern]['message']))
error_messages.append(error_message)
total_error_count += 1
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def check_newline_at_eof(self):
"""This function is used to detect newline at the end of file."""
name = 'Newline at EOF'
error_messages = []
files_to_lint = self.all_filepaths
failed = False
for filepath in files_to_lint:
file_content = self.file_cache.readlines(filepath)
file_length = len(file_content)
if (
file_length >= 1 and
not re.search(r'[^\n]\n', file_content[-1])):
error_message = (
'%s --> There should be a single newline at the '
'end of file.' % filepath)
error_messages.append(error_message)
failed = True
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def perform_all_lint_checks(self):
"""Perform all the lint checks and returns the messages returned by all
the checks.
Returns:
list(TaskResult). A list of TaskResult objects representing the
results of the lint checks.
"""
if not self.all_filepaths:
return [
concurrent_task_utils.TaskResult(
'General purpose lint', False, [],
['There are no files to be checked.'])]
task_results = [
self.check_mandatory_patterns(), self.check_bad_patterns(),
self.check_newline_at_eof()]
return task_results
def get_linters(files_to_lint, file_cache):
"""Creates GeneralPurposeLinter object and returns it.
Args:
files_to_lint: list(str). A list of filepaths to lint.
file_cache: object(FileCache). Provides thread-safe access to cached
file content.
Returns:
tuple(GeneralPurposeLinter, None). A 2-tuple of custom and third_party
linter objects.
"""
custom_linter = GeneralPurposeLinter(files_to_lint, file_cache)
return custom_linter, None
| prasanna08/oppia | scripts/linters/general_purpose_linter.py | Python | apache-2.0 | 31,872 |
import os, re, sys, time
import MaKaC.conference as conference
import MaKaC.common.indexes as indexes
from db_classes import *
from datetime import datetime
from MaKaC.webinterface import displayMgr
from MaKaC.user import LoginInfo, Group, GroupHolder, Avatar, AvatarHolder
from MaKaC.conference import ConferenceHolder,CategoryManager
def date2asc(date):
if not date: return ""
return "%d-%s-%s" % (date.year,
string.zfill(date.month,2),
string.zfill(date.day,2))
debug.debugW= None
db = Zdb()
db.open()
ah = AvatarHolder()
for a in conference.sortById(ah.getList()):
print "%4d - %s %s"%(int(a.getId()),a.getName(),a.getSurName())
for i in ah._indexes:
indexes.IndexesHolder().getById(i).indexUser(a)
cm = CategoryManager()
for cat in conference.sortById(cm.getList()):
print "%4d - %s"%(int(cat.getId()),cat.getName())
#catIdx = indexes.IndexesHolder().getIndex('category')
#catIdx.reindexCateg(cat)
cat.unindexCateg()
cat.indexCateg()
for c in cat.getConferenceList():
# print "%12d - %s"%(int(c.getId()),c.getTitle())
try: x = c.conference
except:
print "shit..."
c.conference = c
x = c.conference
print "%12d -> %3d %s"%(int(c.getId()),int(x.getId()),x.getTitle())
c.indexConf()
cat.indexConf(c)
#time.sleep(5)
db.commit()
| belokop-an/agenda-tools | tools/recovery/fixIndexes.py | Python | gpl-2.0 | 1,447 |
# IfcOpenShell - IFC toolkit and geometry engine
# Copyright (C) 2021 Dion Moult <dion@thinkmoult.com>
#
# This file is part of IfcOpenShell.
#
# IfcOpenShell is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# IfcOpenShell is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with IfcOpenShell. If not, see <http://www.gnu.org/licenses/>.
import test.bootstrap
import ifcopenshell.api
class TestRemoveUnit(test.bootstrap.IFC4):
def test_remove_a_single_unit(self):
unit = self.file.createIfcContextDependentUnit()
ifcopenshell.api.run("unit.remove_unit", self.file, unit=unit)
assert len(self.file.by_type("IfcContextDependentUnit")) == 0
def test_remove_the_only_assigned_unit(self):
self.file.createIfcProject()
unit = self.file.createIfcContextDependentUnit()
ifcopenshell.api.run("unit.assign_unit", self.file, units=[unit])
ifcopenshell.api.run("unit.remove_unit", self.file, unit=unit)
assert len(self.file.by_type("IfcContextDependentUnit")) == 0
assert len(self.file.by_type("IfcUnitAssignment")) == 0
def test_remove_an_assigned_unit(self):
self.file.createIfcProject()
unit1 = self.file.createIfcContextDependentUnit()
unit2 = self.file.createIfcSIUnit()
assignment = ifcopenshell.api.run("unit.assign_unit", self.file, units=[unit1, unit2])
ifcopenshell.api.run("unit.remove_unit", self.file, unit=unit1)
assert len(self.file.by_type("IfcContextDependentUnit")) == 0
assert assignment.Units == (unit2,)
def test_removing_a_unit_deeply(self):
unit = ifcopenshell.api.run("unit.add_conversion_based_unit", self.file, name="foot")
ifcopenshell.api.run("unit.remove_unit", self.file, unit=unit)
assert len([e for e in self.file]) == 0
| IfcOpenShell/IfcOpenShell | src/ifcopenshell-python/test/api/unit/test_remove_unit.py | Python | lgpl-3.0 | 2,276 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import pbm.plugins
import pbm.utils
class ChromeFolderPlugin(pbm.plugins.PromiumPlugin):
folder_name = 'chrome'
urls = [
"chrome://bookmarks",
"chrome://history",
"chrome://extensions",
"chrome://plugins",
"chrome://settings",
"chrome://flags",
"chrome://apps",
"chrome://downloads",
"chrome://chrome",
"chrome://chrome-urls",
]
def preprocess_bookmarks(self, bookmarks_obj):
return bookmarks_obj.remove_bookmark_bar_folders(self.folder_name)
def postprocess_bookmarks(self, bookmarks_obj):
# always overwrite the 'chrome' folder
return bookmarks_obj.add_bookmark_bar_folder(
folder_name=self.folder_name,
folder_nodes=self.build_chrome_nodes(bookmarks_obj.ids))
def build_chrome_nodes(self, ids):
date_added = pbm.utils.get_datetime_now_longdate()
return [({
"type": 'url',
"url": url,
"id": ids.next(),
"name": url,
"date_added": date_added,
"date_modified": date_added,
}) for url in self.urls]
| westurner/pbm | pbm/plugins/chromefolder.py | Python | bsd-3-clause | 1,239 |
# coding=utf-8
"""
Collect metrics from Puppet DB Dashboard
#### Dependencies
* urllib2
* json
"""
import urllib2
import diamond.collector
from diamond.convertor import time as time_convertor
try:
import json
except ImportError:
import simplejson as json
class PuppetDBCollector(diamond.collector.Collector):
PATHS = {
'memory': "v2/metrics/mbean/java.lang:type=Memory",
'queue': "v2/metrics/mbean/org.apache.activemq:BrokerName=localhost"
+ ",Type=Queue,Destination=com.puppetlabs.puppetdb.commands",
'processing-time': "v2/metrics/mbean/com.puppetlabs.puppetdb.command:"
+ "type=global,name=processing-time",
'processed': "v2/metrics/mbean/com.puppetlabs.puppetdb.command:"
+ "type=global,name=processed",
'retried': "v2/metrics/mbean/com.puppetlabs.puppetdb.command:"
+ "type=global,name=retried",
'discarded': "v2/metrics/mbean/com.puppetlabs.puppetdb.command:"
+ "type=global,name=discarded",
'fatal': "v2/metrics/mbean/com.puppetlabs.puppetdb.command:"
+ "type=global,name=fatal",
'commands.service-time': "v2/metrics/mbean/com.puppetlabs.puppetdb."
+ "http.server:type=/v3/commands,name=service-time",
'resources.service-time': "v2/metrics/mbean/com.puppetlabs.puppetdb."
+ "http.server:type=/v3/resources,name=service-time",
'gc-time': "v2/metrics/mbean/com.puppetlabs.puppetdb.scf.storage:"
+ "type=default,name=gc-time",
'duplicate-pct': "v2/metrics/mbean/com.puppetlabs.puppetdb.scf.storage:"
+ "type=default,name=duplicate-pct",
'pct-resource-dupes': "v2/metrics/mbean/com.puppetlabs.puppetdb.query."
+ "population:type=default,name=pct-resource-dupes",
'num-nodes': "v2/metrics/mbean/com.puppetlabs.puppetdb.query."
+ "population:type=default,name=num-nodes",
'num-resources': "v2/metrics/mbean/com.puppetlabs.puppetdb.query."
+ "population:type=default,name=num-resources",
}
def get_default_config_help(self):
config_help = super(PuppetDBCollector,
self).get_default_config_help()
config_help.update({
'host': 'Hostname to collect from',
'port': 'Port number to collect from',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(PuppetDBCollector, self).get_default_config()
config.update({
'host': 'localhost',
'port': 8080,
'path': 'PuppetDB',
})
return config
def fetch_metrics(self, url):
try:
url = "http://%s:%s/%s" % (
self.config['host'], int(self.config['port']), url)
response = urllib2.urlopen(url)
except Exception, e:
self.log.error('Couldn\'t connect to puppetdb: %s -> %s', url, e)
return {}
return json.load(response)
def collect(self):
rawmetrics = {}
for subnode in self.PATHS:
path = self.PATHS[subnode]
rawmetrics[subnode] = self.fetch_metrics(path)
self.publish_gauge('num_resources',
rawmetrics['num-resources']['Value'])
self.publish_gauge('catalog_duplicate_pct',
rawmetrics['duplicate-pct']['Value'])
self.publish_gauge(
'sec_command',
time_convertor.convert(
rawmetrics['processing-time']['50thPercentile'],
rawmetrics['processing-time']['LatencyUnit'],
'seconds'))
self.publish_gauge(
'resources_service_time',
time_convertor.convert(
rawmetrics['resources.service-time']['50thPercentile'],
rawmetrics['resources.service-time']['LatencyUnit'],
'seconds'))
self.publish_gauge(
'enqueueing_service_time',
time_convertor.convert(
rawmetrics['commands.service-time']['50thPercentile'],
rawmetrics['commands.service-time']['LatencyUnit'],
'seconds'))
self.publish_gauge('discarded', rawmetrics['discarded']['Count'])
self.publish_gauge('processed', rawmetrics['processed']['Count'])
self.publish_gauge('rejected', rawmetrics['fatal']['Count'])
self.publish_gauge(
'DB_Compaction',
time_convertor.convert(
rawmetrics['gc-time']['50thPercentile'],
rawmetrics['gc-time']['LatencyUnit'],
'seconds'))
self.publish_gauge('resource_duplicate_pct',
rawmetrics['pct-resource-dupes']['Value'])
self.publish_gauge('num_nodes',
rawmetrics['num-nodes']['Value'])
self.publish_counter('queue.ProducerCount',
rawmetrics['queue']['ProducerCount'])
self.publish_counter('queue.DequeueCount',
rawmetrics['queue']['DequeueCount'])
self.publish_counter('queue.ConsumerCount',
rawmetrics['queue']['ConsumerCount'])
self.publish_gauge('queue.QueueSize',
rawmetrics['queue']['QueueSize'])
self.publish_counter('queue.ExpiredCount',
rawmetrics['queue']['ExpiredCount'])
self.publish_counter('queue.EnqueueCount',
rawmetrics['queue']['EnqueueCount'])
self.publish_counter('queue.InFlightCount',
rawmetrics['queue']['InFlightCount'])
self.publish_gauge('queue.CursorPercentUsage',
rawmetrics['queue']['CursorPercentUsage'])
self.publish_gauge('queue.MemoryUsagePortion',
rawmetrics['queue']['MemoryUsagePortion'])
self.publish_gauge('memory.NonHeapMemoryUsage.used',
rawmetrics['memory']['NonHeapMemoryUsage']['used'])
self.publish_gauge(
'memory.NonHeapMemoryUsage.committed',
rawmetrics['memory']['NonHeapMemoryUsage']['committed'])
self.publish_gauge('memory.HeapMemoryUsage.used',
rawmetrics['memory']['HeapMemoryUsage']['used'])
self.publish_gauge('memory.HeapMemoryUsage.committed',
rawmetrics['memory']['HeapMemoryUsage']['committed'])
| tsheasha/fullerite | src/diamond/collectors/puppetdb/puppetdb.py | Python | apache-2.0 | 6,535 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2019, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Utility functions
"""
from __future__ import print_function
import numpy as np
from nupic.encoders.base import defaultDtype
from nupic.encoders.coordinate import CoordinateEncoder
def createLocationEncoder(t, w=15):
"""
A default coordinate encoder for encoding locations into sparse
distributed representations.
"""
encoder = CoordinateEncoder(name="positionEncoder", n=t.l6CellCount, w=w)
return encoder
def encodeLocation(encoder, x, y, output, radius=5):
# Radius of 7 or 8 gives an overlap of about 8 or 9 with neighoring pixels
# Radius of 5 about 3
encoder.encodeIntoArray((np.array([x * radius, y * radius]), radius), output)
return output.nonzero()[0]
def trainThalamusLocations(t, encoder):
print("Training TRN cells on location SDRs")
output = np.zeros(encoder.getWidth(), dtype=defaultDtype)
# Train the TRN cells to respond to SDRs representing locations
for y in range(0, t.trnHeight):
for x in range(0, t.trnWidth):
t.learnL6Pattern(encodeLocation(encoder, x, y, output),
[(x, y)])
def getUnionLocations(encoder, x, y, r, step=1):
"""
Return a union of location encodings that correspond to the union of all locations
within the specified circle.
"""
output = np.zeros(encoder.getWidth(), dtype=defaultDtype)
locations = set()
for dx in range(-r, r+1, step):
for dy in range(-r, r+1, step):
if dx*dx + dy*dy <= r*r:
e = encodeLocation(encoder, x+dx, y+dy, output)
locations = locations.union(set(e))
return locations
def trainThalamusLocationsTMP(t, encoder, windowSize=5):
print("Training TRN cells on location SDRs")
output = np.zeros(encoder.getWidth(), dtype=defaultDtype)
# Train the TRN cells to respond to SDRs representing locations
for wy in range(0, t.trnHeight):
print(wy)
for wx in range(0, t.trnWidth):
e = encodeLocation(encoder, wx, wy, output)
for x in range(wx-windowSize, wx+windowSize):
for y in range(wy - windowSize, wy + windowSize):
if x >= 0 and x < t.trnWidth and y >= 0 and y < t.trnHeight:
t.learnL6Pattern(e, [(x, y)])
| subutai/htmresearch | htmresearch/frameworks/thalamus/thalamus_utils.py | Python | agpl-3.0 | 3,123 |
import argparse
import os
import re
from pathlib import Path
from typing import Optional
import py.path
import pytest
from _pytest.config import ExitCode
from _pytest.config import UsageError
from _pytest.main import resolve_collection_argument
from _pytest.main import validate_basetemp
from _pytest.pytester import Testdir
@pytest.mark.parametrize(
"ret_exc",
(
pytest.param((None, ValueError)),
pytest.param((42, SystemExit)),
pytest.param((False, SystemExit)),
),
)
def test_wrap_session_notify_exception(ret_exc, testdir):
returncode, exc = ret_exc
c1 = testdir.makeconftest(
"""
import pytest
def pytest_sessionstart():
raise {exc}("boom")
def pytest_internalerror(excrepr, excinfo):
returncode = {returncode!r}
if returncode is not False:
pytest.exit("exiting after %s..." % excinfo.typename, returncode={returncode!r})
""".format(
returncode=returncode, exc=exc.__name__
)
)
result = testdir.runpytest()
if returncode:
assert result.ret == returncode
else:
assert result.ret == ExitCode.INTERNAL_ERROR
assert result.stdout.lines[0] == "INTERNALERROR> Traceback (most recent call last):"
if exc == SystemExit:
assert result.stdout.lines[-3:] == [
f'INTERNALERROR> File "{c1}", line 4, in pytest_sessionstart',
'INTERNALERROR> raise SystemExit("boom")',
"INTERNALERROR> SystemExit: boom",
]
else:
assert result.stdout.lines[-3:] == [
f'INTERNALERROR> File "{c1}", line 4, in pytest_sessionstart',
'INTERNALERROR> raise ValueError("boom")',
"INTERNALERROR> ValueError: boom",
]
if returncode is False:
assert result.stderr.lines == ["mainloop: caught unexpected SystemExit!"]
else:
assert result.stderr.lines == [f"Exit: exiting after {exc.__name__}..."]
@pytest.mark.parametrize("returncode", (None, 42))
def test_wrap_session_exit_sessionfinish(
returncode: Optional[int], testdir: Testdir
) -> None:
testdir.makeconftest(
"""
import pytest
def pytest_sessionfinish():
pytest.exit(msg="exit_pytest_sessionfinish", returncode={returncode})
""".format(
returncode=returncode
)
)
result = testdir.runpytest()
if returncode:
assert result.ret == returncode
else:
assert result.ret == ExitCode.NO_TESTS_COLLECTED
assert result.stdout.lines[-1] == "collected 0 items"
assert result.stderr.lines == ["Exit: exit_pytest_sessionfinish"]
@pytest.mark.parametrize("basetemp", ["foo", "foo/bar"])
def test_validate_basetemp_ok(tmp_path, basetemp, monkeypatch):
monkeypatch.chdir(str(tmp_path))
validate_basetemp(tmp_path / basetemp)
@pytest.mark.parametrize("basetemp", ["", ".", ".."])
def test_validate_basetemp_fails(tmp_path, basetemp, monkeypatch):
monkeypatch.chdir(str(tmp_path))
msg = "basetemp must not be empty, the current working directory or any parent directory of it"
with pytest.raises(argparse.ArgumentTypeError, match=msg):
if basetemp:
basetemp = tmp_path / basetemp
validate_basetemp(basetemp)
def test_validate_basetemp_integration(testdir):
result = testdir.runpytest("--basetemp=.")
result.stderr.fnmatch_lines("*basetemp must not be*")
class TestResolveCollectionArgument:
@pytest.fixture
def invocation_dir(self, testdir: Testdir) -> py.path.local:
testdir.syspathinsert(str(testdir.tmpdir / "src"))
testdir.chdir()
pkg = testdir.tmpdir.join("src/pkg").ensure_dir()
pkg.join("__init__.py").ensure()
pkg.join("test.py").ensure()
return testdir.tmpdir
@pytest.fixture
def invocation_path(self, invocation_dir: py.path.local) -> Path:
return Path(str(invocation_dir))
def test_file(self, invocation_dir: py.path.local, invocation_path: Path) -> None:
"""File and parts."""
assert resolve_collection_argument(invocation_path, "src/pkg/test.py") == (
invocation_dir / "src/pkg/test.py",
[],
)
assert resolve_collection_argument(invocation_path, "src/pkg/test.py::") == (
invocation_dir / "src/pkg/test.py",
[""],
)
assert resolve_collection_argument(
invocation_path, "src/pkg/test.py::foo::bar"
) == (invocation_dir / "src/pkg/test.py", ["foo", "bar"])
assert resolve_collection_argument(
invocation_path, "src/pkg/test.py::foo::bar::"
) == (invocation_dir / "src/pkg/test.py", ["foo", "bar", ""])
def test_dir(self, invocation_dir: py.path.local, invocation_path: Path) -> None:
"""Directory and parts."""
assert resolve_collection_argument(invocation_path, "src/pkg") == (
invocation_dir / "src/pkg",
[],
)
with pytest.raises(
UsageError, match=r"directory argument cannot contain :: selection parts"
):
resolve_collection_argument(invocation_path, "src/pkg::")
with pytest.raises(
UsageError, match=r"directory argument cannot contain :: selection parts"
):
resolve_collection_argument(invocation_path, "src/pkg::foo::bar")
def test_pypath(self, invocation_dir: py.path.local, invocation_path: Path) -> None:
"""Dotted name and parts."""
assert resolve_collection_argument(
invocation_path, "pkg.test", as_pypath=True
) == (invocation_dir / "src/pkg/test.py", [])
assert resolve_collection_argument(
invocation_path, "pkg.test::foo::bar", as_pypath=True
) == (invocation_dir / "src/pkg/test.py", ["foo", "bar"])
assert resolve_collection_argument(invocation_path, "pkg", as_pypath=True) == (
invocation_dir / "src/pkg",
[],
)
with pytest.raises(
UsageError, match=r"package argument cannot contain :: selection parts"
):
resolve_collection_argument(
invocation_path, "pkg::foo::bar", as_pypath=True
)
def test_does_not_exist(self, invocation_path: Path) -> None:
"""Given a file/module that does not exist raises UsageError."""
with pytest.raises(
UsageError, match=re.escape("file or directory not found: foobar")
):
resolve_collection_argument(invocation_path, "foobar")
with pytest.raises(
UsageError,
match=re.escape(
"module or package not found: foobar (missing __init__.py?)"
),
):
resolve_collection_argument(invocation_path, "foobar", as_pypath=True)
def test_absolute_paths_are_resolved_correctly(
self, invocation_dir: py.path.local, invocation_path: Path
) -> None:
"""Absolute paths resolve back to absolute paths."""
full_path = str(invocation_dir / "src")
assert resolve_collection_argument(invocation_path, full_path) == (
py.path.local(os.path.abspath("src")),
[],
)
# ensure full paths given in the command-line without the drive letter resolve
# to the full path correctly (#7628)
drive, full_path_without_drive = os.path.splitdrive(full_path)
assert resolve_collection_argument(
invocation_path, full_path_without_drive
) == (py.path.local(os.path.abspath("src")), [])
def test_module_full_path_without_drive(testdir):
"""Collect and run test using full path except for the drive letter (#7628).
Passing a full path without a drive letter would trigger a bug in py.path.local
where it would keep the full path without the drive letter around, instead of resolving
to the full path, resulting in fixtures node ids not matching against test node ids correctly.
"""
testdir.makepyfile(
**{
"project/conftest.py": """
import pytest
@pytest.fixture
def fix(): return 1
""",
}
)
testdir.makepyfile(
**{
"project/tests/dummy_test.py": """
def test(fix):
assert fix == 1
"""
}
)
fn = testdir.tmpdir.join("project/tests/dummy_test.py")
assert fn.isfile()
drive, path = os.path.splitdrive(str(fn))
result = testdir.runpytest(path, "-v")
result.stdout.fnmatch_lines(
[
os.path.join("project", "tests", "dummy_test.py") + "::test PASSED *",
"* 1 passed in *",
]
)
| pexip/os-pytest | testing/test_main.py | Python | mit | 8,774 |
"""
Profiling hooks
This module contains a couple of decorators (`profile` and `coverage`) that
can be used to wrap functions and/or methods to produce profiles and line
coverage reports. There's a third convenient decorator (`timecall`) that
measures the duration of function execution without the extra profiling
overhead.
Usage example (Python 2.4 or newer)::
from profilehooks import profile, coverage
@profile # or @coverage
def fn(n):
if n < 2: return 1
else: return n * fn(n-1)
print(fn(42))
Usage example (Python 2.3 or older)::
from profilehooks import profile, coverage
def fn(n):
if n < 2: return 1
else: return n * fn(n-1)
# Now wrap that function in a decorator
fn = profile(fn) # or coverage(fn)
print fn(42)
Reports for all thusly decorated functions will be printed to sys.stdout
on program termination. You can alternatively request for immediate
reports for each call by passing immediate=True to the profile decorator.
There's also a @timecall decorator for printing the time to sys.stderr
every time a function is called, when you just want to get a rough measure
instead of a detailed (but costly) profile.
Caveats
A thread on python-dev convinced me that hotshot produces bogus numbers.
See http://mail.python.org/pipermail/python-dev/2005-November/058264.html
I don't know what will happen if a decorated function will try to call
another decorated function. All decorators probably need to explicitly
support nested profiling (currently TraceFuncCoverage is the only one
that supports this, while HotShotFuncProfile has support for recursive
functions.)
Profiling with hotshot creates temporary files (*.prof for profiling,
*.cprof for coverage) in the current directory. These files are not
cleaned up. Exception: when you specify a filename to the profile
decorator (to store the pstats.Stats object for later inspection),
the temporary file will be the filename you specified with '.raw'
appended at the end.
Coverage analysis with hotshot seems to miss some executions resulting
in lower line counts and some lines errorneously marked as never
executed. For this reason coverage analysis now uses trace.py which is
slower, but more accurate.
Copyright (c) 2004--2012 Marius Gedminas <marius@pov.lt>
Copyright (c) 2007 Hanno Schlichting
Copyright (c) 2008 Florian Schulze
Released under the MIT licence since December 2006:
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
(Previously it was distributed under the GNU General Public Licence.)
"""
from __future__ import print_function
__author__ = "Marius Gedminas (marius@gedmin.as)"
__copyright__ = "Copyright 2004-2012 Marius Gedminas"
__license__ = "MIT"
__version__ = "1.6"
__date__ = "2012-05-05"
import atexit
import inspect
import sys
import re
# For profiling
from profile import Profile
import pstats
# For hotshot profiling (inaccurate!)
try:
import hotshot
import hotshot.stats
except ImportError:
hotshot = None
# For trace.py coverage
import trace
# For hotshot coverage (inaccurate!; uses undocumented APIs; might break)
if hotshot is not None:
import _hotshot
import hotshot.log
# For cProfile profiling (best)
try:
import cProfile
except ImportError:
cProfile = None
# For timecall
import time
# registry of available profilers
AVAILABLE_PROFILERS = {}
def profile(fn=None, skip=0, filename=None, immediate=False, dirs=False,
sort=None, entries=40,
profiler=('cProfile', 'profile', 'hotshot')):
"""Mark `fn` for profiling.
If `skip` is > 0, first `skip` calls to `fn` will not be profiled.
If `immediate` is False, profiling results will be printed to
sys.stdout on program termination. Otherwise results will be printed
after each call.
If `dirs` is False only the name of the file will be printed.
Otherwise the full path is used.
`sort` can be a list of sort keys (defaulting to ['cumulative',
'time', 'calls']). The following ones are recognized::
'calls' -- call count
'cumulative' -- cumulative time
'file' -- file name
'line' -- line number
'module' -- file name
'name' -- function name
'nfl' -- name/file/line
'pcalls' -- call count
'stdname' -- standard name
'time' -- internal time
`entries` limits the output to the first N entries.
`profiler` can be used to select the preferred profiler, or specify a
sequence of them, in order of preference. The default is ('cProfile'.
'profile', 'hotshot').
If `filename` is specified, the profile stats will be stored in the
named file. You can load them pstats.Stats(filename).
Usage::
def fn(...):
...
fn = profile(fn, skip=1)
If you are using Python 2.4, you should be able to use the decorator
syntax::
@profile(skip=3)
def fn(...):
...
or just ::
@profile
def fn(...):
...
"""
if fn is None: # @profile() syntax -- we are a decorator maker
def decorator(fn):
return profile(fn, skip=skip, filename=filename,
immediate=immediate, dirs=dirs,
sort=sort, entries=entries,
profiler=profiler)
return decorator
# @profile syntax -- we are a decorator.
if isinstance(profiler, str):
profiler = [profiler]
for p in profiler:
if p in AVAILABLE_PROFILERS:
profiler_class = AVAILABLE_PROFILERS[p]
break
else:
raise ValueError('only these profilers are available: %s'
% ', '.join(AVAILABLE_PROFILERS))
fp = profiler_class(fn, skip=skip, filename=filename,
immediate=immediate, dirs=dirs,
sort=sort, entries=entries)
# fp = HotShotFuncProfile(fn, skip=skip, filename=filename, ...)
# or HotShotFuncProfile
# We cannot return fp or fp.__call__ directly as that would break method
# definitions, instead we need to return a plain function.
def new_fn(*args, **kw):
return fp(*args, **kw)
new_fn.__doc__ = fn.__doc__
new_fn.__name__ = fn.__name__
new_fn.__dict__ = fn.__dict__
new_fn.__module__ = fn.__module__
return new_fn
def coverage(fn):
"""Mark `fn` for line coverage analysis.
Results will be printed to sys.stdout on program termination.
Usage::
def fn(...):
...
fn = coverage(fn)
If you are using Python 2.4, you should be able to use the decorator
syntax::
@coverage
def fn(...):
...
"""
fp = TraceFuncCoverage(fn) # or HotShotFuncCoverage
# We cannot return fp or fp.__call__ directly as that would break method
# definitions, instead we need to return a plain function.
def new_fn(*args, **kw):
return fp(*args, **kw)
new_fn.__doc__ = fn.__doc__
new_fn.__name__ = fn.__name__
new_fn.__dict__ = fn.__dict__
new_fn.__module__ = fn.__module__
return new_fn
def coverage_with_hotshot(fn):
"""Mark `fn` for line coverage analysis.
Uses the 'hotshot' module for fast coverage analysis.
BUG: Produces inaccurate results.
See the docstring of `coverage` for usage examples.
"""
fp = HotShotFuncCoverage(fn)
# We cannot return fp or fp.__call__ directly as that would break method
# definitions, instead we need to return a plain function.
def new_fn(*args, **kw):
return fp(*args, **kw)
new_fn.__doc__ = fn.__doc__
new_fn.__name__ = fn.__name__
new_fn.__dict__ = fn.__dict__
new_fn.__module__ = fn.__module__
return new_fn
class FuncProfile(object):
"""Profiler for a function (uses profile)."""
# This flag is shared between all instances
in_profiler = False
Profile = Profile
def __init__(self, fn, skip=0, filename=None, immediate=False, dirs=False,
sort=None, entries=40):
"""Creates a profiler for a function.
Every profiler has its own log file (the name of which is derived
from the function name).
FuncProfile registers an atexit handler that prints profiling
information to sys.stderr when the program terminates.
"""
self.fn = fn
self.skip = skip
self.filename = filename
self.immediate = immediate
self.dirs = dirs
self.sort = sort or ('cumulative', 'time', 'calls')
if isinstance(self.sort, str):
self.sort = (self.sort, )
self.entries = entries
self.reset_stats()
atexit.register(self.atexit)
def __call__(self, *args, **kw):
"""Profile a singe call to the function."""
self.ncalls += 1
if self.skip > 0:
self.skip -= 1
self.skipped += 1
return self.fn(*args, **kw)
if FuncProfile.in_profiler:
# handle recursive calls
return self.fn(*args, **kw)
# You cannot reuse the same profiler for many calls and accumulate
# stats that way. :-/
profiler = self.Profile()
try:
FuncProfile.in_profiler = True
return profiler.runcall(self.fn, *args, **kw)
finally:
FuncProfile.in_profiler = False
self.stats.add(profiler)
if self.immediate:
self.print_stats()
self.reset_stats()
def print_stats(self):
"""Print profile information to sys.stdout."""
funcname = self.fn.__name__
filename = self.fn.__code__.co_filename
lineno = self.fn.__code__.co_firstlineno
print("")
print("*** PROFILER RESULTS ***")
print(("%s (%s:%s)" % (funcname, filename, lineno)))
if self.skipped:
skipped = "(%d calls not profiled)" % self.skipped
else:
skipped = ""
print(("function called %d times%s" % (self.ncalls, skipped)))
print("")
stats = self.stats
if self.filename:
stats.dump_stats(self.filename)
if not self.dirs:
stats.strip_dirs()
stats.sort_stats(*self.sort)
stats.print_stats(self.entries)
def reset_stats(self):
"""Reset accumulated profiler statistics."""
# Note: not using self.Profile, since pstats.Stats() fails then
self.stats = pstats.Stats(Profile())
self.ncalls = 0
self.skipped = 0
def atexit(self):
"""Stop profiling and print profile information to sys.stdout.
This function is registered as an atexit hook.
"""
if not self.immediate:
self.print_stats()
AVAILABLE_PROFILERS['profile'] = FuncProfile
if cProfile is not None:
class CProfileFuncProfile(FuncProfile):
"""Profiler for a function (uses cProfile)."""
Profile = cProfile.Profile
AVAILABLE_PROFILERS['cProfile'] = CProfileFuncProfile
if hotshot is not None:
class HotShotFuncProfile(object):
"""Profiler for a function (uses hotshot)."""
# This flag is shared between all instances
in_profiler = False
def __init__(self, fn, skip=0, filename=None):
"""Creates a profiler for a function.
Every profiler has its own log file (the name of which is derived
from the function name).
HotShotFuncProfile registers an atexit handler that prints
profiling information to sys.stderr when the program terminates.
The log file is not removed and remains there to clutter the
current working directory.
"""
self.fn = fn
self.filename = filename
if self.filename:
self.logfilename = filename + ".raw"
else:
self.logfilename = fn.__name__ + ".prof"
self.profiler = hotshot.Profile(self.logfilename)
self.ncalls = 0
self.skip = skip
self.skipped = 0
atexit.register(self.atexit)
def __call__(self, *args, **kw):
"""Profile a singe call to the function."""
self.ncalls += 1
if self.skip > 0:
self.skip -= 1
self.skipped += 1
return self.fn(*args, **kw)
if HotShotFuncProfile.in_profiler:
# handle recursive calls
return self.fn(*args, **kw)
try:
HotShotFuncProfile.in_profiler = True
return self.profiler.runcall(self.fn, *args, **kw)
finally:
HotShotFuncProfile.in_profiler = False
def atexit(self):
"""Stop profiling and print profile information to sys.stderr.
This function is registered as an atexit hook.
"""
self.profiler.close()
funcname = self.fn.__name__
filename = self.fn.__code__.co_filename
lineno = self.fn.__code__.co_firstlineno
print("")
print("*** PROFILER RESULTS ***")
print(("%s (%s:%s)" % (funcname, filename, lineno)))
if self.skipped:
skipped = "(%d calls not profiled)" % self.skipped
else:
skipped = ""
print(("function called %d times%s" % (self.ncalls, skipped)))
print("")
stats = hotshot.stats.load(self.logfilename)
# hotshot.stats.load takes ages, and the .prof file eats megabytes, but
# a saved stats object is small and fast
if self.filename:
stats.dump_stats(self.filename)
# it is best to save before strip_dirs
stats.strip_dirs()
stats.sort_stats('cumulative', 'time', 'calls')
stats.print_stats(40)
AVAILABLE_PROFILERS['hotshot'] = HotShotFuncProfile
class HotShotFuncCoverage:
"""Coverage analysis for a function (uses _hotshot).
HotShot coverage is reportedly faster than trace.py, but it appears to
have problems with exceptions; also line counts in coverage reports
are generally lower from line counts produced by TraceFuncCoverage.
Is this my bug, or is it a problem with _hotshot?
"""
def __init__(self, fn):
"""Creates a profiler for a function.
Every profiler has its own log file (the name of which is derived
from the function name).
HotShotFuncCoverage registers an atexit handler that prints
profiling information to sys.stderr when the program terminates.
The log file is not removed and remains there to clutter the
current working directory.
"""
self.fn = fn
self.logfilename = fn.__name__ + ".cprof"
self.profiler = _hotshot.coverage(self.logfilename)
self.ncalls = 0
atexit.register(self.atexit)
def __call__(self, *args, **kw):
"""Profile a singe call to the function."""
self.ncalls += 1
return self.profiler.runcall(self.fn, args, kw)
def atexit(self):
"""Stop profiling and print profile information to sys.stderr.
This function is registered as an atexit hook.
"""
self.profiler.close()
funcname = self.fn.__name__
filename = self.fn.__code__.co_filename
lineno = self.fn.__code__.co_firstlineno
print("")
print("*** COVERAGE RESULTS ***")
print(("%s (%s:%s)" % (funcname, filename, lineno)))
print(("function called %d times" % self.ncalls))
print("")
fs = FuncSource(self.fn)
reader = hotshot.log.LogReader(self.logfilename)
for what, (filename, lineno, funcname), tdelta in reader:
if filename != fs.filename:
continue
if what == hotshot.log.LINE:
fs.mark(lineno)
if what == hotshot.log.ENTER:
# hotshot gives us the line number of the function definition
# and never gives us a LINE event for the first statement in
# a function, so if we didn't perform this mapping, the first
# statement would be marked as never executed
if lineno == fs.firstlineno:
lineno = fs.firstcodelineno
fs.mark(lineno)
reader.close()
print(fs)
class TraceFuncCoverage:
"""Coverage analysis for a function (uses trace module).
HotShot coverage analysis is reportedly faster, but it appears to have
problems with exceptions.
"""
# Shared between all instances so that nested calls work
tracer = trace.Trace(count=True, trace=False,
ignoredirs=[sys.prefix, sys.exec_prefix])
# This flag is also shared between all instances
tracing = False
def __init__(self, fn):
"""Creates a profiler for a function.
Every profiler has its own log file (the name of which is derived
from the function name).
TraceFuncCoverage registers an atexit handler that prints
profiling information to sys.stderr when the program terminates.
The log file is not removed and remains there to clutter the
current working directory.
"""
self.fn = fn
self.logfilename = fn.__name__ + ".cprof"
self.ncalls = 0
atexit.register(self.atexit)
def __call__(self, *args, **kw):
"""Profile a singe call to the function."""
self.ncalls += 1
if TraceFuncCoverage.tracing:
return self.fn(*args, **kw)
try:
TraceFuncCoverage.tracing = True
return self.tracer.runfunc(self.fn, *args, **kw)
finally:
TraceFuncCoverage.tracing = False
def atexit(self):
"""Stop profiling and print profile information to sys.stderr.
This function is registered as an atexit hook.
"""
funcname = self.fn.__name__
filename = self.fn.__code__.co_filename
lineno = self.fn.__code__.co_firstlineno
print("")
print("*** COVERAGE RESULTS ***")
print(("%s (%s:%s)" % (funcname, filename, lineno)))
print(("function called %d times" % self.ncalls))
print("")
fs = FuncSource(self.fn)
for (filename, lineno), count in self.tracer.counts.items():
if filename != fs.filename:
continue
fs.mark(lineno, count)
print(fs)
never_executed = fs.count_never_executed()
if never_executed:
print(("%d lines were not executed." % never_executed))
class FuncSource:
"""Source code annotator for a function."""
blank_rx = re.compile(r"^\s*finally:\s*(#.*)?$")
def __init__(self, fn):
self.fn = fn
self.filename = inspect.getsourcefile(fn)
self.source, self.firstlineno = inspect.getsourcelines(fn)
self.sourcelines = {}
self.firstcodelineno = self.firstlineno
self.find_source_lines()
def find_source_lines(self):
"""Mark all executable source lines in fn as executed 0 times."""
strs = trace.find_strings(self.filename)
lines = trace.find_lines_from_code(self.fn.__code__, strs)
self.firstcodelineno = sys.maxsize
for lineno in lines:
self.firstcodelineno = min(self.firstcodelineno, lineno)
self.sourcelines.setdefault(lineno, 0)
if self.firstcodelineno == sys.maxsize:
self.firstcodelineno = self.firstlineno
def mark(self, lineno, count=1):
"""Mark a given source line as executed count times.
Multiple calls to mark for the same lineno add up.
"""
self.sourcelines[lineno] = self.sourcelines.get(lineno, 0) + count
def count_never_executed(self):
"""Count statements that were never executed."""
lineno = self.firstlineno
counter = 0
for line in self.source:
if self.sourcelines.get(lineno) == 0:
if not self.blank_rx.match(line):
counter += 1
lineno += 1
return counter
def __str__(self):
"""Return annotated source code for the function."""
lines = []
lineno = self.firstlineno
for line in self.source:
counter = self.sourcelines.get(lineno)
if counter is None:
prefix = ' ' * 7
elif counter == 0:
if self.blank_rx.match(line):
prefix = ' ' * 7
else:
prefix = '>' * 6 + ' '
else:
prefix = '%5d: ' % counter
lines.append(prefix + line)
lineno += 1
return ''.join(lines)
def timecall(fn=None, immediate=True, timer=time.time):
"""Wrap `fn` and print its execution time.
Example::
@timecall
def somefunc(x, y):
time.sleep(x * y)
somefunc(2, 3)
will print the time taken by somefunc on every call. If you want just
a summary at program termination, use
@timecall(immediate=False)
You can also choose a timing method other than the default ``time.time()``,
e.g.:
@timecall(timer=time.clock)
"""
if fn is None: # @timecall() syntax -- we are a decorator maker
def decorator(fn):
return timecall(fn, immediate=immediate, timer=timer)
return decorator
# @timecall syntax -- we are a decorator.
fp = FuncTimer(fn, immediate=immediate, timer=timer)
# We cannot return fp or fp.__call__ directly as that would break method
# definitions, instead we need to return a plain function.
def new_fn(*args, **kw):
return fp(*args, **kw)
new_fn.__doc__ = fn.__doc__
new_fn.__name__ = fn.__name__
new_fn.__dict__ = fn.__dict__
new_fn.__module__ = fn.__module__
return new_fn
class FuncTimer(object):
def __init__(self, fn, immediate, timer):
self.fn = fn
self.ncalls = 0
self.totaltime = 0
self.immediate = immediate
self.timer = timer
if not immediate:
atexit.register(self.atexit)
def __call__(self, *args, **kw):
"""Profile a singe call to the function."""
fn = self.fn
timer = self.timer
self.ncalls += 1
try:
start = timer()
return fn(*args, **kw)
finally:
duration = timer() - start
self.totaltime += duration
if self.immediate:
funcname = fn.__name__
filename = fn.__code__.co_filename
lineno = fn.__code__.co_firstlineno
sys.stderr.write("\n %s (%s:%s):\n %.3f seconds\n\n" % (
funcname, filename, lineno, duration))
sys.stderr.flush()
def atexit(self):
if not self.ncalls:
return
funcname = self.fn.__name__
filename = self.fn.__code__.co_filename
lineno = self.fn.__code__.co_firstlineno
print(("\n %s (%s:%s):\n"
" %d calls, %.3f seconds (%.3f seconds per call)\n" % (
funcname, filename, lineno, self.ncalls,
self.totaltime, self.totaltime / self.ncalls)))
| Aleks31/pychess | lib/pychess/Database/profilehooks.py | Python | gpl-3.0 | 24,872 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-03-05 13:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('elearning', '0027_auto_20160305_1333'),
]
operations = [
migrations.AlterField(
model_name='setting',
name='active',
field=models.BooleanField(default=False),
),
]
| tkupek/tkupek-elearning | tkupek_elearning/elearning/migrations/0028_auto_20160305_1338.py | Python | gpl-3.0 | 456 |
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from toontown.toonbase.ToonBaseGlobal import *
from toontown.toonbase.ToontownGlobals import *
from direct.gui.DirectGui import *
from direct.distributed.ClockDelta import *
from toontown.hood import Place
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.task.Task import Task
from toontown.toonbase import TTLocalizer
import random
from direct.showbase import PythonUtil
from toontown.hood import Place
from toontown.hood import SkyUtil
from toontown.pets import PetTutorial
from otp.distributed.TelemetryLimiter import RotationLimitToH, TLGatherAllAvs, TLNull
import HouseGlobals
class Estate(Place.Place):
notify = DirectNotifyGlobal.directNotify.newCategory('Estate')
def __init__(self, loader, avId, zoneId, parentFSMState, doneEvent):
Place.Place.__init__(self, None, doneEvent)
self.id = MyEstate
self.avId = avId
self.zoneId = zoneId
self.loader = loader
self.cameraSubmerged = -1
self.toonSubmerged = -1
self.fsm = ClassicFSM.ClassicFSM('Estate', [State.State('init', self.enterInit, self.exitInit, ['final',
'teleportIn',
'doorIn',
'walk']),
State.State('petTutorial', self.enterPetTutorial, self.exitPetTutorial, ['walk']),
State.State('walk', self.enterWalk, self.exitWalk, ['final',
'sit',
'stickerBook',
'options',
'quest',
'fishing',
'mailbox',
'stopped',
'DFA',
'trialerFA',
'doorOut',
'push',
'pet']),
State.State('stopped', self.enterStopped, self.exitStopped, ['walk', 'teleportOut']),
State.State('sit', self.enterSit, self.exitSit, ['walk']),
State.State('push', self.enterPush, self.exitPush, ['walk']),
State.State('stickerBook', self.enterStickerBook, self.exitStickerBook, ['walk',
'sit',
'quest',
'fishing',
'mailbox',
'stopped',
'doorOut',
'push',
'pet',
'DFA',
'trialerFA']),
State.State('teleportIn', self.enterTeleportIn, self.exitTeleportIn, ['walk', 'petTutorial']),
State.State('teleportOut', self.enterTeleportOut, self.exitTeleportOut, ['teleportIn', 'walk', 'final']),
State.State('doorIn', self.enterDoorIn, self.exitDoorIn, ['walk']),
State.State('doorOut', self.enterDoorOut, self.exitDoorOut, ['final', 'walk']),
State.State('final', self.enterFinal, self.exitFinal, ['teleportIn']),
State.State('quest', self.enterQuest, self.exitQuest, ['walk']),
State.State('fishing', self.enterFishing, self.exitFishing, ['walk', 'stopped']),
State.State('mailbox', self.enterMailbox, self.exitMailbox, ['walk', 'stopped']),
State.State('stopped', self.enterStopped, self.exitStopped, ['walk']),
State.State('pet', self.enterPet, self.exitPet, ['walk', 'trialerFA']),
State.State('trialerFA', self.enterTrialerFA, self.exitTrialerFA, ['trialerFAReject', 'DFA']),
State.State('trialerFAReject', self.enterTrialerFAReject, self.exitTrialerFAReject, ['walk']),
State.State('DFA', self.enterDFA, self.exitDFA, ['DFAReject', 'teleportOut']),
State.State('DFAReject', self.enterDFAReject, self.exitDFAReject, ['walk'])], 'init', 'final')
self.fsm.enterInitialState()
self.doneEvent = doneEvent
self.parentFSMState = parentFSMState
return
def delete(self):
self.unload()
def load(self):
Place.Place.load(self)
self.fog = Fog('EstateFog')
taskMgr.add(self.__checkCameraUnderwater, 'estate-check-cam-underwater')
path = self.loader.geom.find('**/Path')
path.setBin('ground', 10, 1)
self.parentFSMState.addChild(self.fsm)
def unload(self):
self.ignoreAll()
self.notify.info('remove estate-check-toon-underwater to TaskMgr in unload()')
taskMgr.remove('estate-check-toon-underwater')
taskMgr.remove('estate-check-cam-underwater')
self.parentFSMState.removeChild(self.fsm)
del self.fsm
self.fog = None
Place.Place.unload(self)
return
def enter(self, requestStatus):
hoodId = requestStatus['hoodId']
zoneId = requestStatus['zoneId']
newsManager = base.cr.newsManager
if config.GetBool('want-estate-telemetry-limiter', 1):
limiter = TLGatherAllAvs('Estate', RotationLimitToH)
else:
limiter = TLNull()
self._telemLimiter = limiter
if newsManager:
holidayIds = base.cr.newsManager.getDecorationHolidayId()
if (ToontownGlobals.HALLOWEEN_COSTUMES in holidayIds or ToontownGlobals.SPOOKY_COSTUMES in holidayIds) and self.loader.hood.spookySkyFile:
lightsOff = Sequence(LerpColorScaleInterval(base.cr.playGame.hood.loader.geom, 0.1, Vec4(0.55, 0.55, 0.65, 1)), Func(self.loader.hood.startSpookySky))
lightsOff.start()
else:
self.loader.hood.startSky()
lightsOn = LerpColorScaleInterval(base.cr.playGame.hood.loader.geom, 0.1, Vec4(1, 1, 1, 1))
lightsOn.start()
else:
self.loader.hood.startSky()
lightsOn = LerpColorScaleInterval(base.cr.playGame.hood.loader.geom, 0.1, Vec4(1, 1, 1, 1))
lightsOn.start()
self.loader.hood.sky.setFogOff()
self.__setFaintFog()
for i in self.loader.nodeList:
self.loader.enterAnimatedProps(i)
self.loader.geom.reparentTo(render)
# The client April Toons Manager is currently broken, so we have to do this hacky thing instead. :(
#if hasattr(base.cr, 'aprilToonsMgr'):
#if self.isEventActive(AprilToonsGlobals.EventEstateGravity):
#base.localAvatar.startAprilToonsControls()
if base.config.GetBool('want-april-toons'):
base.localAvatar.startAprilToonsControls()
self.accept('doorDoneEvent', self.handleDoorDoneEvent)
self.accept('DistributedDoor_doorTrigger', self.handleDoorTrigger)
self.fsm.request(requestStatus['how'], [requestStatus])
def exit(self):
base.localAvatar.stopChat()
if base.config.GetBool('want-april-toons'):
base.localAvatar.stopAprilToonsControls()
self._telemLimiter.destroy()
del self._telemLimiter
if hasattr(self, 'fsm'):
self.fsm.requestFinalState()
self.loader.geom.reparentTo(hidden)
for i in self.loader.nodeList:
self.loader.exitAnimatedProps(i)
self.loader.hood.stopSky()
render.setFogOff()
base.cr.cache.flush()
def __setZoneId(self, zoneId):
self.zoneId = zoneId
def detectedMailboxCollision(self):
self.fsm.request('mailbox')
def detectedGardenPlotUse(self):
if hasattr(self, 'fsm'):
self.fsm.request('stopped')
def detectedGardenPlotDone(self):
if hasattr(self, 'fsm'):
self.fsm.request('walk')
def detectedFlowerSellUse(self):
if hasattr(self, 'fsm'):
self.fsm.request('stopped')
def detectedFlowerSellDone(self):
if hasattr(self, 'fsm'):
self.fsm.request('walk')
def doRequestLeave(self, requestStatus):
self.fsm.request('trialerFA', [requestStatus])
def enterInit(self):
pass
def exitInit(self):
pass
def enterPetTutorial(self, bDummy = True):
self.notify.info('remove estate-check-toon-underwater to TaskMgr in enterPetTutorial()')
taskMgr.remove('estate-check-toon-underwater')
self.petTutorialDoneEvent = 'PetTutorialDone'
self.acceptOnce(self.petTutorialDoneEvent, self.petTutorialDone)
self.petTutorial = PetTutorial.PetTutorial(self.petTutorialDoneEvent)
def exitPetTutorial(self):
self.notify.info('add estate-check-toon-underwater to TaskMgr in exitPetTutorial()')
if hasattr(self, 'fsm'):
taskMgr.add(self.__checkToonUnderwater, 'estate-check-toon-underwater')
if hasattr(self, 'petTutorial') and self.petTutorial is not None:
self.petTutorial.destroy()
return
def petTutorialDone(self):
self.ignore(self.petTutorialDoneEvent)
self.petTutorial.destroy()
self.petTutorial = None
self.fsm.request('walk', [1])
return
def enterMailbox(self):
Place.Place.enterPurchase(self)
base.localAvatar.startSleepWatch(self.__handleFallingAsleepMailbox)
self.enablePeriodTimer()
def __handleFallingAsleepMailbox(self, arg):
if hasattr(self, 'fsm'):
self.fsm.request('walk')
messenger.send('mailboxAsleep')
base.localAvatar.forceGotoSleep()
def exitMailbox(self):
Place.Place.exitPurchase(self)
base.localAvatar.stopSleepWatch()
self.disablePeriodTimer()
def enterTeleportIn(self, requestStatus):
self._etiToken = self.addSetZoneCompleteCallback(Functor(self._teleportToHouse, requestStatus))
Place.Place.enterTeleportIn(self, requestStatus)
def _teleportToHouse(self, requestStatus):
try:
houseDo = base.cr.doId2do.get(base.localAvatar.houseId)
house = houseDo.house
pos = house.getPos(render)
base.localAvatar.detachNode()
base.localAvatar.setPosHpr(house, 17, 3, 0, 125, 0, 0)
except:
x, y, z, h, p, r = HouseGlobals.defaultEntryPoint
base.localAvatar.detachNode()
base.localAvatar.setPosHpr(render, x, y, z, h, p, r)
base.localAvatar.setScale(1, 1, 1)
self.toonSubmerged = -1
self.notify.info('remove estate-check-toon-underwater to TaskMgr in enterTeleportIn()')
taskMgr.remove('estate-check-toon-underwater')
if base.wantPets:
if base.localAvatar.hasPet() and not base.localAvatar.bPetTutorialDone:
self.nextState = 'petTutorial'
def teleportInDone(self):
self.notify.debug('teleportInDone')
self.toonSubmerged = -1
if self.nextState is not 'petTutorial':
self.notify.info('add estate-check-toon-underwater to TaskMgr in teleportInDone()')
if hasattr(self, 'fsm'):
taskMgr.add(self.__checkToonUnderwater, 'estate-check-toon-underwater')
Place.Place.teleportInDone(self)
def exitTeleportIn(self):
self.removeSetZoneCompleteCallback(self._etiToken)
Place.Place.exitTeleportIn(self)
def enterTeleportOut(self, requestStatus):
Place.Place.enterTeleportOut(self, requestStatus, self.__teleportOutDone)
def __teleportOutDone(self, requestStatus):
if hasattr(self, 'fsm'):
self.fsm.requestFinalState()
hoodId = requestStatus['hoodId']
zoneId = requestStatus['zoneId']
avId = requestStatus['avId']
shardId = requestStatus['shardId']
if hoodId == ToontownGlobals.MyEstate and zoneId == self.getZoneId() and shardId == None:
self.fsm.request('teleportIn', [requestStatus])
elif hoodId == ToontownGlobals.MyEstate and shardId == None:
self.doneStatus = requestStatus
self.getEstateZoneAndGoHome(requestStatus)
else:
self.doneStatus = requestStatus
messenger.send(self.doneEvent, [self.doneStatus])
return
def goHomeFailed(self, task):
self.notifyUserGoHomeFailed()
self.ignore('setLocalEstateZone')
self.doneStatus['avId'] = -1
self.doneStatus['zoneId'] = self.getZoneId()
self.fsm.request('teleportIn', [self.doneStatus])
return Task.done
def exitTeleportOut(self):
Place.Place.exitTeleportOut(self)
def exitDoorIn(self):
self.toonSubmerged = -1
self.notify.info('add estate-check-toon-underwater to TaskMgr in exitDoorIn()')
if hasattr(self, 'fsm'):
taskMgr.add(self.__checkToonUnderwater, 'estate-check-toon-underwater')
Place.Place.exitDoorIn(self)
def getZoneId(self):
if self.zoneId:
return self.zoneId
else:
self.notify.warning('no zone id available')
def __checkCameraUnderwater(self, task):
if camera.getZ(render) < -1.2:
self.__submergeCamera()
else:
self.__emergeCamera()
return Task.cont
def __checkToonUnderwater(self, task):
if base.localAvatar.getZ() < -4.0:
self.__submergeToon()
else:
self.__emergeToon()
return Task.cont
def __submergeCamera(self):
if self.cameraSubmerged == 1:
return
self.__setUnderwaterFog()
base.playSfx(self.loader.underwaterSound, looping=1, volume=0.8)
self.cameraSubmerged = 1
self.walkStateData.setSwimSoundAudible(1)
def __emergeCamera(self):
if self.cameraSubmerged == 0:
return
self.loader.underwaterSound.stop()
self.loader.hood.sky.setFogOff()
self.__setFaintFog()
self.cameraSubmerged = 0
self.walkStateData.setSwimSoundAudible(0)
def forceUnderWater(self):
self.toonSubmerged = 0
self.__submergeToon()
def __submergeToon(self):
if self.toonSubmerged == 1:
return
self.notify.debug('continuing in __submergeToon')
if hasattr(self, 'loader') and self.loader:
base.playSfx(self.loader.submergeSound)
if base.config.GetBool('disable-flying-glitch') == 0:
self.fsm.request('walk')
self.walkStateData.fsm.request('swimming', [self.loader.swimSound])
pos = base.localAvatar.getPos(render)
base.localAvatar.d_playSplashEffect(pos[0], pos[1], -2.3)
self.toonSubmerged = 1
def __emergeToon(self):
if self.toonSubmerged == 0:
return
self.notify.debug('continuing in __emergeToon')
if hasattr(self, 'walkStateData'):
self.walkStateData.fsm.request('walking')
self.toonSubmerged = 0
# The client April Toons Manager is currently broken, so we have to do this hacky thing instead. :(
#if hasattr(base.cr, 'aprilToonsMgr'):
#if self.isEventActive(AprilToonsGlobals.EventEstateGravity):
#base.localAvatar.startAprilToonsControls()
if base.config.GetBool('want-april-toons'):
base.localAvatar.startAprilToonsControls()
def __setUnderwaterFog(self):
if base.wantFog:
self.fog.setColor(Vec4(0.0, 0.0, 0.6, 1.0))
self.fog.setLinearRange(0.1, 100.0)
render.setFog(self.fog)
self.loader.hood.sky.setFog(self.fog)
def __setWhiteFog(self):
if base.wantFog:
self.fog.setColor(Vec4(0.8, 0.8, 0.8, 1.0))
self.fog.setLinearRange(0.0, 400.0)
render.setFog(self.fog)
self.loader.hood.sky.setFog(self.fog)
def __setFaintFog(self):
if base.wantFog:
self.fog.setColor(Vec4(0.8, 0.8, 0.8, 1.0))
self.fog.setLinearRange(0.0, 700.0)
render.setFog(self.fog)
| Spiderlover/Toontown | toontown/estate/Estate.py | Python | mit | 15,440 |
"""
Copyright (c) 2017-2022, Jairus Martin.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
Created on May 20, 2017
@author: jrm
"""
from atom.api import Typed
from enamlnative.widgets.radio_button import ProxyRadioButton
from .android_compound_button import AndroidCompoundButton, CompoundButton
class RadioButton(CompoundButton):
__nativeclass__ = "android.widget.RadioButton"
class AndroidRadioButton(AndroidCompoundButton, ProxyRadioButton):
"""An Android implementation of an Enaml ProxyRadioButton."""
#: A reference to the widget created by the proxy.
widget = Typed(RadioButton)
# -------------------------------------------------------------------------
# Initialization API
# -------------------------------------------------------------------------
def create_widget(self):
"""Create the underlying widget."""
d = self.declaration
self.widget = RadioButton(
self.get_context(), None, d.style or "@attr/radioButtonStyle"
)
| codelv/enaml-native | src/enamlnative/android/android_radio_button.py | Python | mit | 1,092 |
#
# The MIT License (MIT)
#
# Copyright (c) 2014 William T. James
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from heartbeat import SwPriv,Merkle
tests = {"Merkle": Merkle.Merkle,
"SwPriv": SwPriv.SwPriv }
def test_heartbeat(heartbeat, n=10):
beat = heartbeat()
beat.gen()
public_beat = beat.get_public()
with open("tests/files/test7.txt", "rb") as f:
(tag, state) = beat.encode(f)
for i in range(n):
challenge = beat.gen_challenge(state)
with open("tests/files/test7.txt", "rb") as f:
proof = public_beat.prove(f, challenge, tag)
if (beat.verify(proof, challenge, state)):
print("passed test "+str(i))
else:
print("failed test "+str(i))
return False
return True
for b in tests:
if (test_heartbeat(tests[b])):
print(b+" seems correct.")
else:
print(b+" is incorrect.")
| Storj/heartbeat | tests/correctness.py | Python | mit | 1,941 |
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2
short_description: create, terminate, start or stop an instance in ec2
description:
- Creates or terminates ec2 instances.
- C(state=restarted) was added in 2.2
version_added: "0.9"
options:
key_name:
description:
- key pair to use on the instance
required: false
default: null
aliases: ['keypair']
id:
version_added: "1.1"
description:
- identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances. This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on. For details, see the description of client token at U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html).
required: false
default: null
aliases: []
group:
description:
- security group (or list of groups) to use with the instance
required: false
default: null
aliases: [ 'groups' ]
group_id:
version_added: "1.1"
description:
- security group id (or list of ids) to use with the instance
required: false
default: null
aliases: []
region:
version_added: "1.2"
description:
- The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used. See U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)
required: false
default: null
aliases: [ 'aws_region', 'ec2_region' ]
zone:
version_added: "1.2"
description:
- AWS availability zone in which to launch the instance
required: false
default: null
aliases: [ 'aws_zone', 'ec2_zone' ]
instance_type:
description:
- instance type to use for the instance, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
required: true
default: null
aliases: []
tenancy:
version_added: "1.9"
description:
- An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Note that to use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances.
required: false
default: default
choices: [ "default", "dedicated" ]
aliases: []
spot_price:
version_added: "1.5"
description:
- Maximum spot price to bid, If not set a regular on-demand instance is requested. A spot request is made with this maximum bid. When it is filled, the instance is started.
required: false
default: null
aliases: []
spot_type:
version_added: "2.0"
description:
- Type of spot request; one of "one-time" or "persistent". Defaults to "one-time" if not supplied.
required: false
default: "one-time"
choices: [ "one-time", "persistent" ]
aliases: []
image:
description:
- I(ami) ID to use for the instance
required: true
default: null
aliases: []
kernel:
description:
- kernel I(eki) to use for the instance
required: false
default: null
aliases: []
ramdisk:
description:
- ramdisk I(eri) to use for the instance
required: false
default: null
aliases: []
wait:
description:
- wait for the instance to be 'running' before returning. Does not wait for SSH, see 'wait_for' example for details.
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
aliases: []
spot_wait_timeout:
version_added: "1.5"
description:
- how long to wait for the spot instance request to be fulfilled
default: 600
aliases: []
count:
description:
- number of instances to launch
required: False
default: 1
aliases: []
monitoring:
version_added: "1.1"
description:
- enable detailed monitoring (CloudWatch) for instance
required: false
default: null
choices: [ "yes", "no" ]
aliases: []
user_data:
version_added: "0.9"
description:
- opaque blob of data which is made available to the ec2 instance
required: false
default: null
aliases: []
instance_tags:
version_added: "1.0"
description:
- a hash/dictionary of tags to add to the new instance or for starting/stopping instance by tag; '{"key":"value"}' and '{"key":"value","key":"value"}'
required: false
default: null
aliases: []
placement_group:
version_added: "1.3"
description:
- placement group for the instance when using EC2 Clustered Compute
required: false
default: null
aliases: []
vpc_subnet_id:
version_added: "1.1"
description:
- the subnet ID in which to launch the instance (VPC)
required: false
default: null
aliases: []
assign_public_ip:
version_added: "1.5"
description:
- when provisioning within vpc, assign a public IP address. Boto library must be 2.13.0+
required: false
default: null
choices: [ "yes", "no" ]
aliases: []
private_ip:
version_added: "1.2"
description:
- the private ip address to assign the instance (from the vpc subnet)
required: false
default: null
aliases: []
instance_profile_name:
version_added: "1.3"
description:
- Name of the IAM instance profile to use. Boto library must be 2.5.0+
required: false
default: null
aliases: []
instance_ids:
version_added: "1.3"
description:
- "list of instance ids, currently used for states: absent, running, stopped"
required: false
default: null
aliases: ['instance_id']
source_dest_check:
version_added: "1.6"
description:
- Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers)
required: false
default: yes
choices: [ "yes", "no" ]
termination_protection:
version_added: "2.0"
description:
- Enable or Disable the Termination Protection
required: false
default: no
choices: [ "yes", "no" ]
instance_initiated_shutdown_behavior:
version_added: "2.2"
description:
- Set whether AWS will Stop or Terminate an instance on shutdown
required: false
default: 'stop'
choices: [ "stop", "terminate" ]
state:
version_added: "1.3"
description:
- create or terminate instances
required: false
default: 'present'
aliases: []
choices: ['present', 'absent', 'running', 'restarted', 'stopped']
volumes:
version_added: "1.5"
description:
- a list of hash/dictionaries of volumes to add to the new instance; '[{"key":"value", "key":"value"}]'; keys allowed are - device_name (str; required), delete_on_termination (bool; False), device_type (deprecated), ephemeral (str), encrypted (bool; False), snapshot (str), volume_type (str), iops (int) - device_type is deprecated use volume_type, iops must be set when volume_type='io1', ephemeral and snapshot are mutually exclusive.
required: false
default: null
aliases: []
ebs_optimized:
version_added: "1.6"
description:
- whether instance is using optimized EBS volumes, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html)
required: false
default: 'false'
exact_count:
version_added: "1.5"
description:
- An integer value which indicates how many instances that match the 'count_tag' parameter should be running. Instances are either created or terminated based on this value.
required: false
default: null
aliases: []
count_tag:
version_added: "1.5"
description:
- Used with 'exact_count' to determine how many nodes based on a specific tag criteria should be running. This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers that are tagged with "class=webserver". The specified tag must already exist or be passed in as the 'instance_tags' option.
required: false
default: null
aliases: []
network_interfaces:
version_added: "2.0"
description:
- A list of existing network interfaces to attach to the instance at launch. When specifying existing network interfaces, none of the assign_public_ip, private_ip, vpc_subnet_id, group, or group_id parameters may be used. (Those parameters are for creating a new network interface at launch.)
required: false
default: null
aliases: ['network_interface']
spot_launch_group:
version_added: "2.1"
description:
- Launch group for spot request, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-spot-instances-work.html#spot-launch-group)
required: false
default: null
author:
- "Tim Gerla (@tgerla)"
- "Lester Wade (@lwade)"
- "Seth Vidal"
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Basic provisioning example
- ec2:
key_name: mykey
instance_type: t2.micro
image: ami-123456
wait: yes
group: webserver
count: 3
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Advanced example with tagging and CloudWatch
- ec2:
key_name: mykey
group: databases
instance_type: t2.micro
image: ami-123456
wait: yes
wait_timeout: 500
count: 5
instance_tags:
db: postgres
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Single instance with additional IOPS volume from snapshot and volume delete on termination
- ec2:
key_name: mykey
group: webserver
instance_type: c3.medium
image: ami-123456
wait: yes
wait_timeout: 500
volumes:
- device_name: /dev/sdb
snapshot: snap-abcdef12
volume_type: io1
iops: 1000
volume_size: 100
delete_on_termination: true
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Single instance with ssd gp2 root volume
- ec2:
key_name: mykey
group: webserver
instance_type: c3.medium
image: ami-123456
wait: yes
wait_timeout: 500
volumes:
- device_name: /dev/xvda
volume_type: gp2
volume_size: 8
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
exact_count: 1
# Multiple groups example
- ec2:
key_name: mykey
group: ['databases', 'internal-services', 'sshable', 'and-so-forth']
instance_type: m1.large
image: ami-6e649707
wait: yes
wait_timeout: 500
count: 5
instance_tags:
db: postgres
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Multiple instances with additional volume from snapshot
- ec2:
key_name: mykey
group: webserver
instance_type: m1.large
image: ami-6e649707
wait: yes
wait_timeout: 500
count: 5
volumes:
- device_name: /dev/sdb
snapshot: snap-abcdef12
volume_size: 10
monitoring: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Dedicated tenancy example
- local_action:
module: ec2
assign_public_ip: yes
group_id: sg-1dc53f72
key_name: mykey
image: ami-6e649707
instance_type: m1.small
tenancy: dedicated
vpc_subnet_id: subnet-29e63245
wait: yes
# Spot instance example
- ec2:
spot_price: 0.24
spot_wait_timeout: 600
keypair: mykey
group_id: sg-1dc53f72
instance_type: m1.small
image: ami-6e649707
wait: yes
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
spot_launch_group: report_generators
# Examples using pre-existing network interfaces
- ec2:
key_name: mykey
instance_type: t2.small
image: ami-f005ba11
network_interface: eni-deadbeef
- ec2:
key_name: mykey
instance_type: t2.small
image: ami-f005ba11
network_interfaces: ['eni-deadbeef', 'eni-5ca1ab1e']
# Launch instances, runs some tasks
# and then terminate them
- name: Create a sandbox instance
hosts: localhost
gather_facts: False
vars:
key_name: my_keypair
instance_type: m1.small
security_group: my_securitygroup
image: my_ami_id
region: us-east-1
tasks:
- name: Launch instance
ec2:
key_name: "{{ keypair }}"
group: "{{ security_group }}"
instance_type: "{{ instance_type }}"
image: "{{ image }}"
wait: true
region: "{{ region }}"
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
register: ec2
- name: Add new instance to host group
add_host: hostname={{ item.public_ip }} groupname=launched
with_items: '{{ec2.instances}}'
- name: Wait for SSH to come up
wait_for: host={{ item.public_dns_name }} port=22 delay=60 timeout=320 state=started
with_items: '{{ec2.instances}}'
- name: Configure instance(s)
hosts: launched
become: True
gather_facts: True
roles:
- my_awesome_role
- my_awesome_test
- name: Terminate instances
hosts: localhost
connection: local
tasks:
- name: Terminate instances that were previously launched
ec2:
state: 'absent'
instance_ids: '{{ ec2.instance_ids }}'
# Start a few existing instances, run some tasks
# and stop the instances
- name: Start sandbox instances
hosts: localhost
gather_facts: false
connection: local
vars:
instance_ids:
- 'i-xxxxxx'
- 'i-xxxxxx'
- 'i-xxxxxx'
region: us-east-1
tasks:
- name: Start the sandbox instances
ec2:
instance_ids: '{{ instance_ids }}'
region: '{{ region }}'
state: running
wait: True
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
roles:
- do_neat_stuff
- do_more_neat_stuff
- name: Stop sandbox instances
hosts: localhost
gather_facts: false
connection: local
vars:
instance_ids:
- 'i-xxxxxx'
- 'i-xxxxxx'
- 'i-xxxxxx'
region: us-east-1
tasks:
- name: Stop the sandbox instances
ec2:
instance_ids: '{{ instance_ids }}'
region: '{{ region }}'
state: stopped
wait: True
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
#
# Start stopped instances specified by tag
#
- local_action:
module: ec2
instance_tags:
Name: ExtraPower
state: running
#
# Restart instances specified by tag
#
- local_action:
module: ec2
instance_tags:
Name: ExtraPower
state: restarted
#
# Enforce that 5 instances with a tag "foo" are running
# (Highly recommended!)
#
- ec2:
key_name: mykey
instance_type: c1.medium
image: ami-40603AD1
wait: yes
group: webserver
instance_tags:
foo: bar
exact_count: 5
count_tag: foo
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
#
# Enforce that 5 running instances named "database" with a "dbtype" of "postgres"
#
- ec2:
key_name: mykey
instance_type: c1.medium
image: ami-40603AD1
wait: yes
group: webserver
instance_tags:
Name: database
dbtype: postgres
exact_count: 5
count_tag:
Name: database
dbtype: postgres
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
#
# count_tag complex argument examples
#
# instances with tag foo
count_tag:
foo:
# instances with tag foo=bar
count_tag:
foo: bar
# instances with tags foo=bar & baz
count_tag:
foo: bar
baz:
# instances with tags foo & bar & baz=bang
count_tag:
- foo
- bar
- baz: bang
'''
import time
from ast import literal_eval
try:
import boto.ec2
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
from boto.exception import EC2ResponseError
from boto.vpc import VPCConnection
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def find_running_instances_by_count_tag(module, ec2, count_tag, zone=None):
# get reservations for instances that match tag(s) and are running
reservations = get_reservations(module, ec2, tags=count_tag, state="running", zone=zone)
instances = []
for res in reservations:
if hasattr(res, 'instances'):
for inst in res.instances:
instances.append(inst)
return reservations, instances
def _set_none_to_blank(dictionary):
result = dictionary
for k in result.iterkeys():
if type(result[k]) == dict:
result[k] = _set_none_to_blank(result[k])
elif not result[k]:
result[k] = ""
return result
def get_reservations(module, ec2, tags=None, state=None, zone=None):
# TODO: filters do not work with tags that have underscores
filters = dict()
if tags is not None:
if type(tags) is str:
try:
tags = literal_eval(tags)
except:
pass
# if string, we only care that a tag of that name exists
if type(tags) is str:
filters.update({"tag-key": tags})
# if list, append each item to filters
if type(tags) is list:
for x in tags:
if type(x) is dict:
x = _set_none_to_blank(x)
filters.update(dict(("tag:"+tn, tv) for (tn,tv) in x.iteritems()))
else:
filters.update({"tag-key": x})
# if dict, add the key and value to the filter
if type(tags) is dict:
tags = _set_none_to_blank(tags)
filters.update(dict(("tag:"+tn, tv) for (tn,tv) in tags.iteritems()))
if state:
# http://stackoverflow.com/questions/437511/what-are-the-valid-instancestates-for-the-amazon-ec2-api
filters.update({'instance-state-name': state})
if zone:
filters.update({'availability-zone': zone})
results = ec2.get_all_instances(filters=filters)
return results
def get_instance_info(inst):
"""
Retrieves instance information from an instance
ID and returns it as a dictionary
"""
instance_info = {'id': inst.id,
'ami_launch_index': inst.ami_launch_index,
'private_ip': inst.private_ip_address,
'private_dns_name': inst.private_dns_name,
'public_ip': inst.ip_address,
'dns_name': inst.dns_name,
'public_dns_name': inst.public_dns_name,
'state_code': inst.state_code,
'architecture': inst.architecture,
'image_id': inst.image_id,
'key_name': inst.key_name,
'placement': inst.placement,
'region': inst.placement[:-1],
'kernel': inst.kernel,
'ramdisk': inst.ramdisk,
'launch_time': inst.launch_time,
'instance_type': inst.instance_type,
'root_device_type': inst.root_device_type,
'root_device_name': inst.root_device_name,
'state': inst.state,
'hypervisor': inst.hypervisor,
'tags': inst.tags,
'groups': dict((group.id, group.name) for group in inst.groups),
}
try:
instance_info['virtualization_type'] = getattr(inst,'virtualization_type')
except AttributeError:
instance_info['virtualization_type'] = None
try:
instance_info['ebs_optimized'] = getattr(inst, 'ebs_optimized')
except AttributeError:
instance_info['ebs_optimized'] = False
try:
bdm_dict = {}
bdm = getattr(inst, 'block_device_mapping')
for device_name in bdm.keys():
bdm_dict[device_name] = {
'status': bdm[device_name].status,
'volume_id': bdm[device_name].volume_id,
'delete_on_termination': bdm[device_name].delete_on_termination
}
instance_info['block_device_mapping'] = bdm_dict
except AttributeError:
instance_info['block_device_mapping'] = False
try:
instance_info['tenancy'] = getattr(inst, 'placement_tenancy')
except AttributeError:
instance_info['tenancy'] = 'default'
return instance_info
def boto_supports_associate_public_ip_address(ec2):
"""
Check if Boto library has associate_public_ip_address in the NetworkInterfaceSpecification
class. Added in Boto 2.13.0
ec2: authenticated ec2 connection object
Returns:
True if Boto library accepts associate_public_ip_address argument, else false
"""
try:
network_interface = boto.ec2.networkinterface.NetworkInterfaceSpecification()
getattr(network_interface, "associate_public_ip_address")
return True
except AttributeError:
return False
def boto_supports_profile_name_arg(ec2):
"""
Check if Boto library has instance_profile_name argument. instance_profile_name has been added in Boto 2.5.0
ec2: authenticated ec2 connection object
Returns:
True if Boto library accept instance_profile_name argument, else false
"""
run_instances_method = getattr(ec2, 'run_instances')
return 'instance_profile_name' in run_instances_method.func_code.co_varnames
def create_block_device(module, ec2, volume):
# Not aware of a way to determine this programatically
# http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/
MAX_IOPS_TO_SIZE_RATIO = 30
# device_type has been used historically to represent volume_type,
# however ec2_vol uses volume_type, as does the BlockDeviceType, so
# we add handling for either/or but not both
if all(key in volume for key in ['device_type','volume_type']):
module.fail_json(msg = 'device_type is a deprecated name for volume_type. Do not use both device_type and volume_type')
# get whichever one is set, or NoneType if neither are set
volume_type = volume.get('device_type') or volume.get('volume_type')
if 'snapshot' not in volume and 'ephemeral' not in volume:
if 'volume_size' not in volume:
module.fail_json(msg = 'Size must be specified when creating a new volume or modifying the root volume')
if 'snapshot' in volume:
if volume_type == 'io1' and 'iops' not in volume:
module.fail_json(msg = 'io1 volumes must have an iops value set')
if 'iops' in volume:
snapshot = ec2.get_all_snapshots(snapshot_ids=[volume['snapshot']])[0]
size = volume.get('volume_size', snapshot.volume_size)
if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size:
module.fail_json(msg = 'IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO)
if 'encrypted' in volume:
module.fail_json(msg = 'You can not set encryption when creating a volume from a snapshot')
if 'ephemeral' in volume:
if 'snapshot' in volume:
module.fail_json(msg = 'Cannot set both ephemeral and snapshot')
return BlockDeviceType(snapshot_id=volume.get('snapshot'),
ephemeral_name=volume.get('ephemeral'),
size=volume.get('volume_size'),
volume_type=volume_type,
delete_on_termination=volume.get('delete_on_termination', False),
iops=volume.get('iops'),
encrypted=volume.get('encrypted', None))
def boto_supports_param_in_spot_request(ec2, param):
"""
Check if Boto library has a <param> in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0.
ec2: authenticated ec2 connection object
Returns:
True if boto library has the named param as an argument on the request_spot_instances method, else False
"""
method = getattr(ec2, 'request_spot_instances')
return param in method.func_code.co_varnames
def await_spot_requests(module, ec2, spot_requests, count):
"""
Wait for a group of spot requests to be fulfilled, or fail.
module: Ansible module object
ec2: authenticated ec2 connection object
spot_requests: boto.ec2.spotinstancerequest.SpotInstanceRequest object returned by ec2.request_spot_instances
count: Total number of instances to be created by the spot requests
Returns:
list of instance ID's created by the spot request(s)
"""
spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
wait_complete = time.time() + spot_wait_timeout
spot_req_inst_ids = dict()
while time.time() < wait_complete:
reqs = ec2.get_all_spot_instance_requests()
for sirb in spot_requests:
if sirb.id in spot_req_inst_ids:
continue
for sir in reqs:
if sir.id != sirb.id:
continue # this is not our spot instance
if sir.instance_id is not None:
spot_req_inst_ids[sirb.id] = sir.instance_id
elif sir.state == 'open':
continue # still waiting, nothing to do here
elif sir.state == 'active':
continue # Instance is created already, nothing to do here
elif sir.state == 'failed':
module.fail_json(msg="Spot instance request %s failed with status %s and fault %s:%s" % (
sir.id, sir.status.code, sir.fault.code, sir.fault.message))
elif sir.state == 'cancelled':
module.fail_json(msg="Spot instance request %s was cancelled before it could be fulfilled." % sir.id)
elif sir.state == 'closed':
# instance is terminating or marked for termination
# this may be intentional on the part of the operator,
# or it may have been terminated by AWS due to capacity,
# price, or group constraints in this case, we'll fail
# the module if the reason for the state is anything
# other than termination by user. Codes are documented at
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html
if sir.status.code == 'instance-terminated-by-user':
# do nothing, since the user likely did this on purpose
pass
else:
spot_msg = "Spot instance request %s was closed by AWS with the status %s and fault %s:%s"
module.fail_json(msg=spot_msg % (sir.id, sir.status.code, sir.fault.code, sir.fault.message))
if len(spot_req_inst_ids) < count:
time.sleep(5)
else:
return spot_req_inst_ids.values()
module.fail_json(msg = "wait for spot requests timeout on %s" % time.asctime())
def enforce_count(module, ec2, vpc):
exact_count = module.params.get('exact_count')
count_tag = module.params.get('count_tag')
zone = module.params.get('zone')
# fail here if the exact count was specified without filtering
# on a tag, as this may lead to a undesired removal of instances
if exact_count and count_tag is None:
module.fail_json(msg="you must use the 'count_tag' option with exact_count")
reservations, instances = find_running_instances_by_count_tag(module, ec2, count_tag, zone)
changed = None
checkmode = False
instance_dict_array = []
changed_instance_ids = None
if len(instances) == exact_count:
changed = False
elif len(instances) < exact_count:
changed = True
to_create = exact_count - len(instances)
if not checkmode:
(instance_dict_array, changed_instance_ids, changed) \
= create_instances(module, ec2, vpc, override_count=to_create)
for inst in instance_dict_array:
instances.append(inst)
elif len(instances) > exact_count:
changed = True
to_remove = len(instances) - exact_count
if not checkmode:
all_instance_ids = sorted([ x.id for x in instances ])
remove_ids = all_instance_ids[0:to_remove]
instances = [ x for x in instances if x.id not in remove_ids]
(changed, instance_dict_array, changed_instance_ids) \
= terminate_instances(module, ec2, remove_ids)
terminated_list = []
for inst in instance_dict_array:
inst['state'] = "terminated"
terminated_list.append(inst)
instance_dict_array = terminated_list
# ensure all instances are dictionaries
all_instances = []
for inst in instances:
if type(inst) is not dict:
inst = get_instance_info(inst)
all_instances.append(inst)
return (all_instances, instance_dict_array, changed_instance_ids, changed)
def create_instances(module, ec2, vpc, override_count=None):
"""
Creates new instances
module : AnsibleModule object
ec2: authenticated ec2 connection object
Returns:
A list of dictionaries with instance information
about the instances that were launched
"""
key_name = module.params.get('key_name')
id = module.params.get('id')
group_name = module.params.get('group')
group_id = module.params.get('group_id')
zone = module.params.get('zone')
instance_type = module.params.get('instance_type')
tenancy = module.params.get('tenancy')
spot_price = module.params.get('spot_price')
spot_type = module.params.get('spot_type')
image = module.params.get('image')
if override_count:
count = override_count
else:
count = module.params.get('count')
monitoring = module.params.get('monitoring')
kernel = module.params.get('kernel')
ramdisk = module.params.get('ramdisk')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
placement_group = module.params.get('placement_group')
user_data = module.params.get('user_data')
instance_tags = module.params.get('instance_tags')
vpc_subnet_id = module.params.get('vpc_subnet_id')
assign_public_ip = module.boolean(module.params.get('assign_public_ip'))
private_ip = module.params.get('private_ip')
instance_profile_name = module.params.get('instance_profile_name')
volumes = module.params.get('volumes')
ebs_optimized = module.params.get('ebs_optimized')
exact_count = module.params.get('exact_count')
count_tag = module.params.get('count_tag')
source_dest_check = module.boolean(module.params.get('source_dest_check'))
termination_protection = module.boolean(module.params.get('termination_protection'))
network_interfaces = module.params.get('network_interfaces')
spot_launch_group = module.params.get('spot_launch_group')
instance_initiated_shutdown_behavior = module.params.get('instance_initiated_shutdown_behavior')
# group_id and group_name are exclusive of each other
if group_id and group_name:
module.fail_json(msg = str("Use only one type of parameter (group_name) or (group_id)"))
vpc_id = None
if vpc_subnet_id:
if not vpc:
module.fail_json(msg="region must be specified")
else:
vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id
else:
vpc_id = None
try:
# Here we try to lookup the group id from the security group name - if group is set.
if group_name:
if vpc_id:
grp_details = ec2.get_all_security_groups(filters={'vpc_id': vpc_id})
else:
grp_details = ec2.get_all_security_groups()
if isinstance(group_name, basestring):
group_name = [group_name]
unmatched = set(group_name).difference(str(grp.name) for grp in grp_details)
if len(unmatched) > 0:
module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched))
group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
# Now we try to lookup the group id testing if group exists.
elif group_id:
#wrap the group_id in a list if it's not one already
if isinstance(group_id, basestring):
group_id = [group_id]
grp_details = ec2.get_all_security_groups(group_ids=group_id)
group_name = [grp_item.name for grp_item in grp_details]
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg = str(e))
# Lookup any instances that much our run id.
running_instances = []
count_remaining = int(count)
if id != None:
filter_dict = {'client-token':id, 'instance-state-name' : 'running'}
previous_reservations = ec2.get_all_instances(None, filter_dict)
for res in previous_reservations:
for prev_instance in res.instances:
running_instances.append(prev_instance)
count_remaining = count_remaining - len(running_instances)
# Both min_count and max_count equal count parameter. This means the launch request is explicit (we want count, or fail) in how many instances we want.
if count_remaining == 0:
changed = False
else:
changed = True
try:
params = {'image_id': image,
'key_name': key_name,
'monitoring_enabled': monitoring,
'placement': zone,
'instance_type': instance_type,
'kernel_id': kernel,
'ramdisk_id': ramdisk,
'user_data': user_data}
if ebs_optimized:
params['ebs_optimized'] = ebs_optimized
# 'tenancy' always has a default value, but it is not a valid parameter for spot instance request
if not spot_price:
params['tenancy'] = tenancy
if boto_supports_profile_name_arg(ec2):
params['instance_profile_name'] = instance_profile_name
else:
if instance_profile_name is not None:
module.fail_json(
msg="instance_profile_name parameter requires Boto version 2.5.0 or higher")
if assign_public_ip:
if not boto_supports_associate_public_ip_address(ec2):
module.fail_json(
msg="assign_public_ip parameter requires Boto version 2.13.0 or higher.")
elif not vpc_subnet_id:
module.fail_json(
msg="assign_public_ip only available with vpc_subnet_id")
else:
if private_ip:
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
subnet_id=vpc_subnet_id,
private_ip_address=private_ip,
groups=group_id,
associate_public_ip_address=assign_public_ip)
else:
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
subnet_id=vpc_subnet_id,
groups=group_id,
associate_public_ip_address=assign_public_ip)
interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface)
params['network_interfaces'] = interfaces
else:
if network_interfaces:
if isinstance(network_interfaces, basestring):
network_interfaces = [network_interfaces]
interfaces = []
for i, network_interface_id in enumerate(network_interfaces):
interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
network_interface_id=network_interface_id,
device_index=i)
interfaces.append(interface)
params['network_interfaces'] = \
boto.ec2.networkinterface.NetworkInterfaceCollection(*interfaces)
else:
params['subnet_id'] = vpc_subnet_id
if vpc_subnet_id:
params['security_group_ids'] = group_id
else:
params['security_groups'] = group_name
if volumes:
bdm = BlockDeviceMapping()
for volume in volumes:
if 'device_name' not in volume:
module.fail_json(msg = 'Device name must be set for volume')
# Minimum volume size is 1GB. We'll use volume size explicitly set to 0
# to be a signal not to create this volume
if 'volume_size' not in volume or int(volume['volume_size']) > 0:
bdm[volume['device_name']] = create_block_device(module, ec2, volume)
params['block_device_map'] = bdm
# check to see if we're using spot pricing first before starting instances
if not spot_price:
if assign_public_ip and private_ip:
params.update(dict(
min_count = count_remaining,
max_count = count_remaining,
client_token = id,
placement_group = placement_group,
))
else:
params.update(dict(
min_count = count_remaining,
max_count = count_remaining,
client_token = id,
placement_group = placement_group,
private_ip_address = private_ip,
))
# For ordinary (not spot) instances, we can select 'stop'
# (the default) or 'terminate' here.
params['instance_initiated_shutdown_behavior'] = instance_initiated_shutdown_behavior or 'stop'
res = ec2.run_instances(**params)
instids = [ i.id for i in res.instances ]
while True:
try:
ec2.get_all_instances(instids)
break
except boto.exception.EC2ResponseError as e:
if "<Code>InvalidInstanceID.NotFound</Code>" in str(e):
# there's a race between start and get an instance
continue
else:
module.fail_json(msg = str(e))
# The instances returned through ec2.run_instances above can be in
# terminated state due to idempotency. See commit 7f11c3d for a complete
# explanation.
terminated_instances = [
str(instance.id) for instance in res.instances if instance.state == 'terminated'
]
if terminated_instances:
module.fail_json(msg = "Instances with id(s) %s " % terminated_instances +
"were created previously but have since been terminated - " +
"use a (possibly different) 'instanceid' parameter")
else:
if private_ip:
module.fail_json(
msg='private_ip only available with on-demand (non-spot) instances')
if boto_supports_param_in_spot_request(ec2, 'placement_group'):
params['placement_group'] = placement_group
elif placement_group :
module.fail_json(
msg="placement_group parameter requires Boto version 2.3.0 or higher.")
# You can't tell spot instances to 'stop'; they will always be
# 'terminate'd. For convenience, we'll ignore the latter value.
if instance_initiated_shutdown_behavior and instance_initiated_shutdown_behavior != 'terminate':
module.fail_json(
msg="instance_initiated_shutdown_behavior=stop is not supported for spot instances.")
if spot_launch_group and isinstance(spot_launch_group, basestring):
params['launch_group'] = spot_launch_group
params.update(dict(
count = count_remaining,
type = spot_type,
))
res = ec2.request_spot_instances(spot_price, **params)
# Now we have to do the intermediate waiting
if wait:
instids = await_spot_requests(module, ec2, res, count)
except boto.exception.BotoServerError as e:
module.fail_json(msg = "Instance creation failed => %s: %s" % (e.error_code, e.error_message))
# wait here until the instances are up
num_running = 0
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and num_running < len(instids):
try:
res_list = ec2.get_all_instances(instids)
except boto.exception.BotoServerError as e:
if e.error_code == 'InvalidInstanceID.NotFound':
time.sleep(1)
continue
else:
raise
num_running = 0
for res in res_list:
num_running += len([ i for i in res.instances if i.state=='running' ])
if len(res_list) <= 0:
# got a bad response of some sort, possibly due to
# stale/cached data. Wait a second and then try again
time.sleep(1)
continue
if wait and num_running < len(instids):
time.sleep(5)
else:
break
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime())
#We do this after the loop ends so that we end up with one list
for res in res_list:
running_instances.extend(res.instances)
# Enabled by default by AWS
if source_dest_check is False:
for inst in res.instances:
inst.modify_attribute('sourceDestCheck', False)
# Disabled by default by AWS
if termination_protection is True:
for inst in res.instances:
inst.modify_attribute('disableApiTermination', True)
# Leave this as late as possible to try and avoid InvalidInstanceID.NotFound
if instance_tags:
try:
ec2.create_tags(instids, instance_tags)
except boto.exception.EC2ResponseError as e:
module.fail_json(msg = "Instance tagging failed => %s: %s" % (e.error_code, e.error_message))
instance_dict_array = []
created_instance_ids = []
for inst in running_instances:
inst.update()
d = get_instance_info(inst)
created_instance_ids.append(inst.id)
instance_dict_array.append(d)
return (instance_dict_array, created_instance_ids, changed)
def terminate_instances(module, ec2, instance_ids):
"""
Terminates a list of instances
module: Ansible module object
ec2: authenticated ec2 connection object
termination_list: a list of instances to terminate in the form of
[ {id: <inst-id>}, ..]
Returns a dictionary of instance information
about the instances terminated.
If the instance to be terminated is running
"changed" will be set to False.
"""
# Whether to wait for termination to complete before returning
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
instance_dict_array = []
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
module.fail_json(msg='instance_ids should be a list of instances, aborting')
terminated_instance_ids = []
for res in ec2.get_all_instances(instance_ids):
for inst in res.instances:
if inst.state == 'running' or inst.state == 'stopped':
terminated_instance_ids.append(inst.id)
instance_dict_array.append(get_instance_info(inst))
try:
ec2.terminate_instances([inst.id])
except EC2ResponseError as e:
module.fail_json(msg='Unable to terminate instance {0}, error: {1}'.format(inst.id, e))
changed = True
# wait here until the instances are 'terminated'
if wait:
num_terminated = 0
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and num_terminated < len(terminated_instance_ids):
response = ec2.get_all_instances( \
instance_ids=terminated_instance_ids, \
filters={'instance-state-name':'terminated'})
try:
num_terminated = sum([len(res.instances) for res in response])
except Exception as e:
# got a bad response of some sort, possibly due to
# stale/cached data. Wait a second and then try again
time.sleep(1)
continue
if num_terminated < len(terminated_instance_ids):
time.sleep(5)
# waiting took too long
if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids):
module.fail_json(msg = "wait for instance termination timeout on %s" % time.asctime())
#Lets get the current state of the instances after terminating - issue600
instance_dict_array = []
for res in ec2.get_all_instances(instance_ids=terminated_instance_ids,\
filters={'instance-state-name':'terminated'}):
for inst in res.instances:
instance_dict_array.append(get_instance_info(inst))
return (changed, instance_dict_array, terminated_instance_ids)
def startstop_instances(module, ec2, instance_ids, state, instance_tags):
"""
Starts or stops a list of existing instances
module: Ansible module object
ec2: authenticated ec2 connection object
instance_ids: The list of instances to start in the form of
[ {id: <inst-id>}, ..]
instance_tags: A dict of tag keys and values in the form of
{key: value, ... }
state: Intended state ("running" or "stopped")
Returns a dictionary of instance information
about the instances started/stopped.
If the instance was not able to change state,
"changed" will be set to False.
Note that if instance_ids and instance_tags are both non-empty,
this method will process the intersection of the two
"""
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
source_dest_check = module.params.get('source_dest_check')
termination_protection = module.params.get('termination_protection')
changed = False
instance_dict_array = []
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
# Fail unless the user defined instance tags
if not instance_tags:
module.fail_json(msg='instance_ids should be a list of instances, aborting')
# To make an EC2 tag filter, we need to prepend 'tag:' to each key.
# An empty filter does no filtering, so it's safe to pass it to the
# get_all_instances method even if the user did not specify instance_tags
filters = {}
if instance_tags:
for key, value in instance_tags.items():
filters["tag:" + key] = value
# Check that our instances are not in the state we want to take
# Check (and eventually change) instances attributes and instances state
existing_instances_array = []
for res in ec2.get_all_instances(instance_ids, filters=filters):
for inst in res.instances:
# Check "source_dest_check" attribute
try:
if inst.vpc_id is not None and inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check:
inst.modify_attribute('sourceDestCheck', source_dest_check)
changed = True
except boto.exception.EC2ResponseError as exc:
# instances with more than one Elastic Network Interface will
# fail, because they have the sourceDestCheck attribute defined
# per-interface
if exc.code == 'InvalidInstanceID':
for interface in inst.interfaces:
if interface.source_dest_check != source_dest_check:
ec2.modify_network_interface_attribute(interface.id, "sourceDestCheck", source_dest_check)
changed = True
else:
module.fail_json(msg='Failed to handle source_dest_check state for instance {0}, error: {1}'.format(inst.id, exc),
exception=traceback.format_exc(exc))
# Check "termination_protection" attribute
if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection
and termination_protection is not None):
inst.modify_attribute('disableApiTermination', termination_protection)
changed = True
# Check instance state
if inst.state != state:
instance_dict_array.append(get_instance_info(inst))
try:
if state == 'running':
inst.start()
else:
inst.stop()
except EC2ResponseError as e:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
changed = True
existing_instances_array.append(inst.id)
instance_ids = list(set(existing_instances_array + (instance_ids or [])))
## Wait for all the instances to finish starting or stopping
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time():
instance_dict_array = []
matched_instances = []
for res in ec2.get_all_instances(instance_ids):
for i in res.instances:
if i.state == state:
instance_dict_array.append(get_instance_info(i))
matched_instances.append(i)
if len(matched_instances) < len(instance_ids):
time.sleep(5)
else:
break
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime())
return (changed, instance_dict_array, instance_ids)
def restart_instances(module, ec2, instance_ids, state, instance_tags):
"""
Restarts a list of existing instances
module: Ansible module object
ec2: authenticated ec2 connection object
instance_ids: The list of instances to start in the form of
[ {id: <inst-id>}, ..]
instance_tags: A dict of tag keys and values in the form of
{key: value, ... }
state: Intended state ("restarted")
Returns a dictionary of instance information
about the instances.
If the instance was not able to change state,
"changed" will be set to False.
Wait will not apply here as this is a OS level operation.
Note that if instance_ids and instance_tags are both non-empty,
this method will process the intersection of the two.
"""
source_dest_check = module.params.get('source_dest_check')
termination_protection = module.params.get('termination_protection')
changed = False
instance_dict_array = []
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
# Fail unless the user defined instance tags
if not instance_tags:
module.fail_json(msg='instance_ids should be a list of instances, aborting')
# To make an EC2 tag filter, we need to prepend 'tag:' to each key.
# An empty filter does no filtering, so it's safe to pass it to the
# get_all_instances method even if the user did not specify instance_tags
filters = {}
if instance_tags:
for key, value in instance_tags.items():
filters["tag:" + key] = value
# Check that our instances are not in the state we want to take
# Check (and eventually change) instances attributes and instances state
for res in ec2.get_all_instances(instance_ids, filters=filters):
for inst in res.instances:
# Check "source_dest_check" attribute
try:
if inst.vpc_id is not None and inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check:
inst.modify_attribute('sourceDestCheck', source_dest_check)
changed = True
except boto.exception.EC2ResponseError as exc:
# instances with more than one Elastic Network Interface will
# fail, because they have the sourceDestCheck attribute defined
# per-interface
if exc.code == 'InvalidInstanceID':
for interface in inst.interfaces:
if interface.source_dest_check != source_dest_check:
ec2.modify_network_interface_attribute(interface.id, "sourceDestCheck", source_dest_check)
changed = True
else:
module.fail_json(msg='Failed to handle source_dest_check state for instance {0}, error: {1}'.format(inst.id, exc),
exception=traceback.format_exc(exc))
# Check "termination_protection" attribute
if (inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection
and termination_protection is not None):
inst.modify_attribute('disableApiTermination', termination_protection)
changed = True
# Check instance state
if inst.state != state:
instance_dict_array.append(get_instance_info(inst))
try:
inst.reboot()
except EC2ResponseError as e:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
changed = True
return (changed, instance_dict_array, instance_ids)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
key_name = dict(aliases = ['keypair']),
id = dict(),
group = dict(type='list', aliases=['groups']),
group_id = dict(type='list'),
zone = dict(aliases=['aws_zone', 'ec2_zone']),
instance_type = dict(aliases=['type']),
spot_price = dict(),
spot_type = dict(default='one-time', choices=["one-time", "persistent"]),
spot_launch_group = dict(),
image = dict(),
kernel = dict(),
count = dict(type='int', default='1'),
monitoring = dict(type='bool', default=False),
ramdisk = dict(),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
spot_wait_timeout = dict(default=600),
placement_group = dict(),
user_data = dict(),
instance_tags = dict(type='dict'),
vpc_subnet_id = dict(),
assign_public_ip = dict(type='bool', default=False),
private_ip = dict(),
instance_profile_name = dict(),
instance_ids = dict(type='list', aliases=['instance_id']),
source_dest_check = dict(type='bool', default=True),
termination_protection = dict(type='bool', default=None),
state = dict(default='present', choices=['present', 'absent', 'running', 'restarted', 'stopped']),
instance_initiated_shutdown_behavior=dict(default=None, choices=['stop', 'terminate']),
exact_count = dict(type='int', default=None),
count_tag = dict(),
volumes = dict(type='list'),
ebs_optimized = dict(type='bool', default=False),
tenancy = dict(default='default'),
network_interfaces = dict(type='list', aliases=['network_interface'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [
['exact_count', 'count'],
['exact_count', 'state'],
['exact_count', 'instance_ids'],
['network_interfaces', 'assign_public_ip'],
['network_interfaces', 'group'],
['network_interfaces', 'group_id'],
['network_interfaces', 'private_ip'],
['network_interfaces', 'vpc_subnet_id'],
],
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
ec2 = ec2_connect(module)
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if region:
try:
vpc = connect_to_aws(boto.vpc, region, **aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg = str(e))
else:
vpc = None
tagged_instances = []
state = module.params['state']
if state == 'absent':
instance_ids = module.params['instance_ids']
if not instance_ids:
module.fail_json(msg='instance_ids list is required for absent state')
(changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids)
elif state in ('running', 'stopped'):
instance_ids = module.params.get('instance_ids')
instance_tags = module.params.get('instance_tags')
if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
(changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state, instance_tags)
elif state in ('restarted'):
instance_ids = module.params.get('instance_ids')
instance_tags = module.params.get('instance_tags')
if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
(changed, instance_dict_array, new_instance_ids) = restart_instances(module, ec2, instance_ids, state, instance_tags)
elif state == 'present':
# Changed is always set to true when provisioning new instances
if not module.params.get('image'):
module.fail_json(msg='image parameter is required for new instance')
if module.params.get('exact_count') is None:
(instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2, vpc)
else:
(tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2, vpc)
module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| trondhindenes/ansible-modules-core | cloud/amazon/ec2.py | Python | gpl-3.0 | 61,264 |
"""Init
Revision ID: 4feb484b3fd4
Revises: None
Create Date: 2014-06-20 21:57:46.271797
"""
# revision identifiers, used by Alembic.
revision = '4feb484b3fd4'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('inboxes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('userMail', sa.String(length=255), nullable=True),
sa.Column('account', sa.String(length=255), nullable=True),
sa.Column('password', sa.String(length=255), nullable=True),
sa.Column('firstName', sa.String(length=255), nullable=True),
sa.Column('lastName', sa.String(length=255), nullable=True),
sa.Column('imapServer', sa.String(length=255), nullable=True),
sa.Column('smtpServer', sa.String(length=255), nullable=True),
sa.Column('imapPort', sa.String(length=255), nullable=True),
sa.Column('smtpPort', sa.String(length=255), nullable=True),
sa.Column('imapSSL', sa.Integer(), nullable=True),
sa.Column('smtpSSL', sa.Integer(), nullable=True),
sa.Column('smtpAuth', sa.Integer(), nullable=True),
sa.Column('nbr_mails', sa.Integer(), nullable=True),
sa.Column('nbr_addresses', sa.Integer(), nullable=True),
sa.Column('colourblind_mode', sa.Integer(), nullable=True),
sa.Column('font_size', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_inboxes_password', 'inboxes', ['password'], unique=False)
op.create_table('contacts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('emailAddress', sa.String(length=255), nullable=True),
sa.Column('picture', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_contacts_name', 'contacts', ['name'], unique=False)
op.create_table('mails',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('remoteID', sa.String(length=255),nullable=True),
sa.Column('date', sa.String(length=255), nullable=True),
sa.Column('subject', sa.String(length=255), nullable=True),
sa.Column('_from', sa.String(length=255), nullable=True),
sa.Column('to', sa.String(length=255), nullable=True),
sa.Column('cc', sa.Text(), nullable=True),
sa.Column('bcc', sa.Text(), nullable=True),
sa.Column('inReplyTo', sa.String(length=255), nullable=True),
sa.Column('message', sa.String(length=255), nullable=True),
sa.Column('inboxId', sa.Integer(), nullable=True),
sa.Column('read', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['inboxId'], ['inboxes.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_mails_inReplyTo', 'mails', ['inReplyTo'], unique=False)
op.create_index('ix_mails_message', 'mails', ['message'], unique=False)
op.create_index('ix_mails_subject', 'mails', ['subject'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_mails_subject', 'mails')
op.drop_index('ix_mails_message', 'mails')
op.drop_index('ix_mails_inReplyTo', 'mails')
op.drop_table('mails')
op.drop_index('ix_contacts_lastName', 'contacts')
op.drop_table('contacts')
op.drop_index('ix_inboxes_password', 'inboxes')
op.drop_table('inboxes')
### end Alembic commands ###
| grafgustav/accessmail | alembic/versions/4feb484b3fd4_init.py | Python | mit | 3,441 |
#!/usr/bin/env python3
# Copyright (c) 2009-2019 The Bitcoin Core developers
# Copyright (c) 2014-2019 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests NODE_NETWORK_LIMITED.
Tests that a node configured with -prune=550 signals NODE_NETWORK_LIMITED correctly
and that it responds to getdata requests for blocks correctly:
- send a block within 288 + 2 of the tip
- disconnect peers who request blocks older than that."""
from test_framework.messages import CInv, msg_getdata, msg_verack, NODE_BLOOM, NODE_NETWORK_LIMITED, NODE_WITNESS
from test_framework.mininode import P2PInterface, mininode_lock
from test_framework.test_framework import DigiByteTestFramework
from test_framework.util import assert_equal, disconnect_nodes, connect_nodes_bi, sync_blocks, wait_until
class P2PIgnoreInv(P2PInterface):
firstAddrnServices = 0
def on_inv(self, message):
# The node will send us invs for other blocks. Ignore them.
pass
def on_addr(self, message):
self.firstAddrnServices = message.addrs[0].nServices
def wait_for_addr(self, timeout=5):
test_function = lambda: self.last_message.get("addr")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def send_getdata_for_block(self, blockhash):
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, int(blockhash, 16)))
self.send_message(getdata_request)
class NodeNetworkLimitedTest(DigiByteTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [['-prune=550', '-addrmantest'], [], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def disconnect_all(self):
disconnect_nodes(self.nodes[0], 1)
disconnect_nodes(self.nodes[1], 0)
disconnect_nodes(self.nodes[2], 1)
disconnect_nodes(self.nodes[2], 0)
disconnect_nodes(self.nodes[0], 2)
disconnect_nodes(self.nodes[1], 2)
def setup_network(self):
super(NodeNetworkLimitedTest, self).setup_network()
self.disconnect_all()
def run_test(self):
node = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
expected_services = NODE_BLOOM | NODE_WITNESS | NODE_NETWORK_LIMITED
self.log.info("Check that node has signalled expected services.")
assert_equal(node.nServices, expected_services)
self.log.info("Check that the localservices is as expected.")
assert_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16), expected_services)
self.log.info("Mine enough blocks to reach the NODE_NETWORK_LIMITED range.")
connect_nodes_bi(self.nodes, 0, 1)
blocks = self.nodes[1].generate(292)
sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Make sure we can max retrieve block at tip-288.")
node.send_getdata_for_block(blocks[1]) # last block in valid range
node.wait_for_block(int(blocks[1], 16), timeout=3)
self.log.info("Requesting block at height 2 (tip-289) must fail (ignored).")
node.send_getdata_for_block(blocks[0]) # first block outside of the 288+2 limit
node.wait_for_disconnect(5)
self.log.info("Check local address relay, do a fresh connection.")
self.nodes[0].disconnect_p2ps()
node1 = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
node1.send_message(msg_verack())
node1.wait_for_addr()
#must relay address with NODE_NETWORK_LIMITED
assert_equal(node1.firstAddrnServices, 1036)
self.nodes[0].disconnect_p2ps()
node1.wait_for_disconnect()
# connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer
# because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, sync must not be possible
connect_nodes_bi(self.nodes, 0, 2)
try:
sync_blocks([self.nodes[0], self.nodes[2]], timeout=5)
except:
pass
# node2 must remain at heigh 0
assert_equal(self.nodes[2].getblockheader(self.nodes[2].getbestblockhash())['height'], 0)
# now connect also to node 1 (non pruned)
connect_nodes_bi(self.nodes, 1, 2)
# sync must be possible
sync_blocks(self.nodes)
# disconnect all peers
self.disconnect_all()
# mine 10 blocks on node 0 (pruned node)
self.nodes[0].generate(10)
# connect node1 (non pruned) with node0 (pruned) and check if the can sync
connect_nodes_bi(self.nodes, 0, 1)
# sync must be possible, node 1 is no longer in IBD and should therefore connect to node 0 (NODE_NETWORK_LIMITED)
sync_blocks([self.nodes[0], self.nodes[1]])
if __name__ == '__main__':
NodeNetworkLimitedTest().main()
| digibyte/digibyte | test/functional/p2p_node_network_limited.py | Python | mit | 4,934 |
from ._base import DanubeCloudCommand, CommandOption, CommandError
class Command(DanubeCloudCommand):
help = 'Check connection to internal/admin services.'
options = (
CommandOption('-q', '--que', '--node', action='store_true', dest='que_only', default=False,
help='Check only services related to erigonesd on a compute node.'),
)
def _ok(self, ssl_on):
if ssl_on:
return self.colors.green('OK') + ' (SSL)'
else:
return self.colors.yellow('OK') + ' (no SSL)'
def _failed(self):
return self.colors.red('FAILED')
def check_rabbitmq(self):
from que.erigonesd import cq
con = cq.broker_connection()
try:
con.connect().send_heartbeat()
except Exception as exc:
self.display('RabbitMQ connection [{}]: {} ({})'.format(con.as_uri(), self._failed(), exc))
return False
else:
self.display('RabbitMQ connection [{}]: {}'.format(con.as_uri(), self._ok(con.ssl)))
return True
def check_redis(self):
from que.erigonesd import cq
con = cq.backend
try:
con.client.ping()
except Exception as exc:
self.display('Redis connection [{}]: {} ({})'.format(con.as_uri(), self._failed(), exc))
return False
else:
ssl = 'SSLConnection' in repr(con.client)
self.display('Redis connection [{}]: {}'.format(con.as_uri(), self._ok(ssl)))
return True
def check_db(self):
from django.db import connections
res = []
for db_name in connections:
con = connections[db_name]
try:
con.connect()
except Exception as exc:
self.display('Database "{}" connection: {} ({})'.format(db_name, self._failed(), exc))
res.append(False)
else:
self.display('Database "{}" connection [{}]: {}'.format(db_name, con.connection.dsn, self._ok(False)))
res.append(True)
return res
def handle(self, que_only=False, **options):
result = [self.check_redis(), self.check_rabbitmq()]
if not que_only:
result.extend(self.check_db())
if not all(result):
raise CommandError('Test connection to some services has failed')
| erigones/esdc-ce | core/management/commands/service_check.py | Python | apache-2.0 | 2,406 |
from django.core.urlresolvers import reverse
from django.db.models import Value
from django.db.models.functions import Concat
from ..main.tables_ajax import SecuredTableJson
from ..main.mixins import capitalize
from .models import Contractor, Contract, SupportContract, ContractorContact
class ContractTableJson(SecuredTableJson):
model = Contract
columns = ['no', 'sicet_type', 'service_amount', 'material_amount', 'start_date', 'end_date', 'remarks']
column_names = [x for x in capitalize(columns)]
# column_names[-1] = 'Options'
# define column names that will be used in sorting
# order is important and should be same as order of columns
# displayed by datatables. For non sortable columns use empty
# value like ''
order_columns = columns
def render_column(self, row, column):
# We want to render user as a custom column
change_type = ' {}'.format(row.change_type) if row.change_type else ''
version = '-{}'.format(row.version) if row.version else ''
if column == 'no':
html = '<a href="{}" class="btn default btn-xs green-stripe">{}</a>'.\
format(reverse("main:default-update",
kwargs={'mgt': 'contract-mgt',
'model':'Contract',
'pk': row.id
}
),
'{}{}{}'.format(row.no, change_type, version),
)
return html
elif column == '_latest_status':
status = row.dutyhistory_set.first()
if not status:
status = 'INITIAL'
else:
status = status.status
labels = {
'ON-HOLD': 'grey-mint',
'OPEN': 'yellow-gold',
'CLOSED': 'green-jungle',
'INITIAL': 'yellow',
}
html = '''
<span class="label bg-{0}">
{1}
</span>
</a>
'''.format(
labels[status],
status
)
return html
elif column == '_latest_action':
action = row.dutyhistory_set.first()
if not action:
action = 'No Action Taken'
else:
action = action.action_taken
html = '<a href="{}?duty-pk={}">{}</a>'.format(
reverse('main:default-table',
args=['team-mgt', 'DutyHistory']),
row.id,
action
)
return html
elif column == 'user.username':
val = [user.username for user in row.user.all()]
return '; '.join(val)
else:
return super(ContractTableJson, self).render_column(row, column)
class ContractorTableJson(SecuredTableJson):
model = Contractor
# def get_initial_queryset(self):
# # return queryset used as base for futher sorting/filtering
# # these are simply objects displayed in datatable
# # You should not filter data returned here by any filter values entered by user. This is because
# # we need some base queryset to count total number of records.
#
# return self.model.objects.all()
columns = ['full_name', 'alias', 'remarks', 'id']
column_names = [x for x in capitalize(columns)]
column_names[-1] = 'Options'
# define column names that will be used in sorting
# order is important and should be same as order of columns
# displayed by datatables. For non sortable columns use empty
# value like ''
order_columns = columns
def render_column(self, row, column):
# We want to render user as a custom column
if column == 'id':
btn_contracts = '<a href="{}?contractor__pk={}" class="btn default btn-xs green-stripe">Contracts</a>'.\
format(reverse("main:default-table",
kwargs={'mgt': 'contract-mgt',
'model':'Contract'}),
row.id)
btn_support_contracts = '<a href="{}?contractor__pk={}" class="btn default btn-xs yellow-stripe">Support Contracts</a>'.\
format(reverse("main:default-table",
kwargs={'mgt': 'contract-mgt',
'model':'SupportContract'}),
row.id)
btn_contacts = '<a href="{}?contractor__pk={}" class="btn default btn-xs red-stripe">Contacts</a>'.\
format(reverse("main:default-table",
kwargs={'mgt': 'contract-mgt',
'model':'ContractorContact'}),
row.id)
return btn_contracts + btn_support_contracts + btn_contacts
elif column == '_latest_status':
status = row.dutyhistory_set.first()
if not status:
status = 'INITIAL'
else:
status = status.status
labels = {
'ON-HOLD': 'grey-mint',
'OPEN': 'yellow-gold',
'CLOSED': 'green-jungle',
'INITIAL': 'yellow',
}
html = '''
<span class="label bg-{0}">
{1}
</span>
</a>
'''.format(
labels[status],
status
)
return html
elif column == '_latest_action':
action = row.dutyhistory_set.first()
if not action:
action = 'No Action Taken'
else:
action = action.action_taken
html = '<a href="{}?duty-pk={}">{}</a>'.format(
reverse('main:default-table',
args=['team-mgt', 'DutyHistory']),
row.id,
action
)
return html
elif column == 'user.username':
val = [user.username for user in row.user.all()]
return '; '.join(val)
else:
return super(ContractorTableJson, self).render_column(row, column)
class SupportContractTableJson(SecuredTableJson):
model = SupportContract
# def get_initial_queryset(self):
# # return queryset used as base for futher sorting/filtering
# # these are simply objects displayed in datatable
# # You should not filter data returned here by any filter values entered by user. This is because
# # we need some base queryset to count total number of records.
#
# return self.model.objects.all()
columns = ['no', 'cost_center', 'amount', 'id']
column_names = [x for x in capitalize(columns)]
column_names[-1] = 'Options'
# define column names that will be used in sorting
# order is important and should be same as order of columns
# displayed by datatables. For non sortable columns use empty
# value like ''
order_columns = columns
def render_column(self, row, column):
# We want to render user as a custom column
if column == 'id':
btn_contracts = '<a href="{}?contractor__pk={}" class="btn default btn-xs green-stripe">Contracts</a>'.\
format(reverse("main:default-table",
kwargs={'mgt': 'contract-mgt',
'model':'Contract'}),
row.id)
btn_support_contracts = '<a href="{}?contractor__pk={}" class="btn default btn-xs yellow-stripe">Support Contracts</a>'.\
format(reverse("main:default-table",
kwargs={'mgt': 'contract-mgt',
'model':'SupportContract'}),
row.id)
btn_contacts = '<a href="{}?contractor__pk={}" class="btn default btn-xs red-stripe">Contacts</a>'.\
format(reverse("main:default-table",
kwargs={'mgt': 'contract-mgt',
'model':'ContractorContact'}),
row.id)
return btn_contracts + btn_support_contracts + btn_contacts
elif column == '_latest_status':
status = row.dutyhistory_set.first()
if not status:
status = 'INITIAL'
else:
status = status.status
labels = {
'ON-HOLD': 'grey-mint',
'OPEN': 'yellow-gold',
'CLOSED': 'green-jungle',
'INITIAL': 'yellow',
}
html = '''
<span class="label bg-{0}">
{1}
</span>
</a>
'''.format(
labels[status],
status
)
return html
elif column == '_latest_action':
action = row.dutyhistory_set.first()
if not action:
action = 'No Action Taken'
else:
action = action.action_taken
html = '<a href="{}?duty-pk={}">{}</a>'.format(
reverse('main:default-table',
args=['team-mgt', 'DutyHistory']),
row.id,
action
)
return html
elif column == 'user.username':
val = [user.username for user in row.user.all()]
return '; '.join(val)
else:
return super(SupportContractTableJson, self).render_column(row, column)
class ContractorContactTableJson(SecuredTableJson):
model = ContractorContact
def get_initial_queryset(self):
# return queryset used as base for futher sorting/filtering
# these are simply objects displayed in datatable
# You should not filter data returned here by any filter values entered by user. This is because
# we need some base queryset to count total number of records.
# Datatable passes querystring with columns, draws and other specs
# But the first element is draw
# &draw=1&columns[data] . . . . .
# so filter query string can be validated by checking the first element if it is not draw
model = self.model.objects.annotate(
full_name=Concat('first_name', Value(' '), 'last_name')
)
query_string = self.request.META.get('QUERY_STRING', None).split('&')
if 'draw' not in query_string[0]:
# taking the first element and spliting to get the key
qf = query_string[0].split('=')[0]
return model.filter(**{ qf : self.request.GET.get(qf) }).all()
return model.all()
columns = ['full_name', 'position', 'email', 'mobile_no', 'office_no', 'fax_no', 'remarks', 'authority_level']
column_names = [x for x in capitalize(columns)]
# column_names[-1] = 'Options'
# define column names that will be used in sorting
# order is important and should be same as order of columns
# displayed by datatables. For non sortable columns use empty
# value like ''
order_columns = columns
def render_column(self, row, column):
return super(ContractorContactTableJson, self).render_column(row, column)
| mpdevilleres/tbpc_app | tbpc/contract_mgt/tables_ajax.py | Python | mit | 12,161 |
import pytest
from tests.algorithms_tests.factories import AlgorithmFactory
from tests.archives_tests.factories import ArchiveFactory
from tests.evaluation_tests.test_permissions import get_groups_with_set_perms
from tests.factories import ChallengeFactory, ExternalChallengeFactory
from tests.organizations_tests.factories import OrganizationFactory
from tests.reader_studies_tests.factories import ReaderStudyFactory
@pytest.mark.django_db
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize(
"factory,related_name,perm",
(
(AlgorithmFactory, "algorithms", "view_algorithm"),
(ArchiveFactory, "archives", "view_archive"),
(ChallengeFactory, "challenges", "view_challenge"),
(
ExternalChallengeFactory,
"externalchallenges",
"view_externalchallenge",
),
(ReaderStudyFactory, "readerstudies", "view_readerstudy"),
),
)
def test_related_permissions_assigned(
client, reverse, factory, related_name, perm
):
org1, org2 = OrganizationFactory(), OrganizationFactory()
obj1, obj2, obj3, obj4 = (
factory(),
factory(),
factory(),
factory(),
)
if reverse:
for obj in [obj1, obj2, obj3, obj4]:
obj.organizations.add(org1, org2)
for obj in [obj3, obj4]:
obj.organizations.remove(org1, org2)
for obj in [obj1, obj2]:
obj.organizations.remove(org2)
else:
getattr(org1, related_name).add(obj1, obj2, obj3, obj4)
getattr(org1, related_name).remove(obj3, obj4)
# We end up with org1 only being related to obj1 and obj2
expected_perms = {
obj1: {org1.editors_group: {perm}, org1.members_group: {perm}},
obj2: {org1.editors_group: {perm}, org1.members_group: {perm}},
}
for obj in [obj1, obj2, obj3, obj4]:
for group in [
org1.editors_group,
org1.members_group,
org2.editors_group,
org2.members_group,
]:
assert get_groups_with_set_perms(obj).get(
group
) == expected_perms.get(obj, {}).get(group)
# Test clearing
if reverse:
obj1.organizations.clear()
obj2.organizations.clear()
else:
getattr(org1, related_name).clear()
for obj in [obj1, obj2, obj3, obj4]:
for group in [
org1.editors_group,
org1.members_group,
org2.editors_group,
org2.members_group,
]:
assert get_groups_with_set_perms(obj).get(group) is None
| comic/comic-django | app/tests/organizations_tests/test_signals.py | Python | apache-2.0 | 2,610 |
#-*- coding:utf-8 -*-
"""
This file is part of OpenSesame.
OpenSesame is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenSesame is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenSesame. If not, see <http://www.gnu.org/licenses/>.
"""
from libopensesame.py3compat import *
from qtpy import QtWidgets
from libqtopensesame.misc.base_subcomponent import base_subcomponent
from libqtopensesame.misc.translate import translation_context
_ = translation_context(u'item_combobox', category=u'core')
class item_combobox(QtWidgets.QComboBox, base_subcomponent):
"""
desc:
A combobox to select existing items.
"""
def __init__(self, main_window, exclude=[]):
QtWidgets.QComboBox.__init__(self, main_window)
self.setup(main_window)
self.exclude = exclude
self.refresh()
@property
def selected_item(self):
if self.currentIndex() == 0:
return None
return self.currentText()
@property
def items(self):
return [item for item in self.item_store if item not in self.exclude]
def select(self, item_name):
i = self.findText(item_name)
if i < 0:
self.setCurrentIndex(0)
return False
self.setCurrentIndex(i)
return True
def refresh(self):
prev_selected = self.selected_item
while self.count():
self.removeItem(0)
self.addItem(_(u'No item selected'))
self.setItemIcon(0, self.theme.qicon(u'go-down'))
for i, item_name in enumerate(self.items):
self.addItem(item_name)
self.setItemIcon(i+1,
self.theme.qicon(self.item_store[item_name].item_icon()))
self.select(prev_selected)
| eort/OpenSesame | libqtopensesame/_input/item_combobox.py | Python | gpl-3.0 | 1,970 |
#!/usr/bin/env python
from tables import *
import numarray as NA
import struct, sys
import shelve
import psyco
# This class is accessible only for the examples
class Small(IsDescription):
""" A record has several columns. They are represented here as
class attributes, whose names are the column names and their
values will become their types. The IsDescription class will take care
the user will not add any new variables and that its type is
correct."""
var1 = StringCol(itemsize=4)
var2 = Int32Col()
var3 = Float64Col()
# Define a user record to characterize some kind of particles
class Medium(IsDescription):
name = StringCol(itemsize=16) # 16-character String
float1 = Float64Col(shape=2, dflt=2.3)
#float1 = Float64Col(dflt=1.3)
#float2 = Float64Col(dflt=2.3)
ADCcount = Int16Col() # signed short integer
grid_i = Int32Col() # integer
grid_j = Int32Col() # integer
pressure = Float32Col() # float (single-precision)
energy = Flaot64Col() # double (double-precision)
# Define a user record to characterize some kind of particles
class Big(IsDescription):
name = StringCol(itemsize=16) # 16-character String
#float1 = Float64Col(shape=32, dflt=NA.arange(32))
#float2 = Float64Col(shape=32, dflt=NA.arange(32))
float1 = Float64Col(shape=32, dflt=range(32))
float2 = Float64Col(shape=32, dflt=[2.2]*32)
ADCcount = Int16Col() # signed short integer
grid_i = Int32Col() # integer
grid_j = Int32Col() # integer
pressure = Float32Col() # float (single-precision)
energy = Float64Col() # double (double-precision)
def createFile(filename, totalrows, recsize):
# Open a 'n'ew file
fileh = shelve.open(filename, flag = "n")
rowswritten = 0
# Get the record object associated with the new table
if recsize == "big":
d = Big()
arr = NA.array(NA.arange(32), type=NA.Float64)
arr2 = NA.array(NA.arange(32), type=NA.Float64)
elif recsize == "medium":
d = Medium()
else:
d = Small()
#print d
#sys.exit(0)
for j in range(3):
# Create a table
#table = fileh.createTable(group, 'tuple'+str(j), Record(), title,
# compress = 6, expectedrows = totalrows)
# Create a Table instance
tablename = 'tuple'+str(j)
table = []
# Fill the table
if recsize == "big" or recsize == "medium":
for i in xrange(totalrows):
d.name = 'Particle: %6d' % (i)
#d.TDCcount = i % 256
d.ADCcount = (i * 256) % (1 << 16)
if recsize == "big":
#d.float1 = NA.array([i]*32, NA.Float64)
#d.float2 = NA.array([i**2]*32, NA.Float64)
arr[0] = 1.1
d.float1 = arr
arr2[0] = 2.2
d.float2 = arr2
pass
else:
d.float1 = NA.array([i**2]*2, NA.Float64)
#d.float1 = float(i)
#d.float2 = float(i)
d.grid_i = i
d.grid_j = 10 - i
d.pressure = float(i*i)
d.energy = float(d.pressure ** 4)
table.append((d.ADCcount, d.energy, d.float1, d.float2,
d.grid_i, d.grid_j, d.name, d.pressure))
# Only on float case
#table.append((d.ADCcount, d.energy, d.float1,
# d.grid_i, d.grid_j, d.name, d.pressure))
else:
for i in xrange(totalrows):
d.var1 = str(i)
d.var2 = i
d.var3 = 12.1e10
table.append((d.var1, d.var2, d.var3))
# Save this table on disk
fileh[tablename] = table
rowswritten += totalrows
# Close the file
fileh.close()
return (rowswritten, struct.calcsize(d._v_fmt))
def readFile(filename, recsize):
# Open the HDF5 file in read-only mode
fileh = shelve.open(filename, "r")
for table in ['tuple0', 'tuple1', 'tuple2']:
if recsize == "big" or recsize == "medium":
e = [ t[2] for t in fileh[table] if t[4] < 20 ]
# if there is only one float (array)
#e = [ t[1] for t in fileh[table] if t[3] < 20 ]
else:
e = [ t[1] for t in fileh[table] if t[1] < 20 ]
print "resulting selection list ==>", e
print "Total selected records ==> ", len(e)
# Close the file (eventually destroy the extended type)
fileh.close()
# Add code to test here
if __name__=="__main__":
import getopt
import time
usage = """usage: %s [-f] [-s recsize] [-i iterations] file
-s use [big] record, [medium] or [small]
-i sets the number of rows in each table\n""" % sys.argv[0]
try:
opts, pargs = getopt.getopt(sys.argv[1:], 's:fi:')
except:
sys.stderr.write(usage)
sys.exit(0)
# if we pass too much parameters, abort
if len(pargs) != 1:
sys.stderr.write(usage)
sys.exit(0)
# default options
recsize = "medium"
iterations = 100
# Get the options
for option in opts:
if option[0] == '-s':
recsize = option[1]
if recsize not in ["big", "medium", "small"]:
sys.stderr.write(usage)
sys.exit(0)
elif option[0] == '-i':
iterations = int(option[1])
# Catch the hdf5 file passed as the last argument
file = pargs[0]
t1 = time.clock()
psyco.bind(createFile)
(rowsw, rowsz) = createFile(file, iterations, recsize)
t2 = time.clock()
tapprows = round(t2-t1, 3)
t1 = time.clock()
psyco.bind(readFile)
readFile(file, recsize)
t2 = time.clock()
treadrows = round(t2-t1, 3)
print "Rows written:", rowsw, " Row size:", rowsz
print "Time appending rows:", tapprows
print "Write rows/sec: ", int(iterations * 3/ float(tapprows))
print "Write KB/s :", int(rowsw * rowsz / (tapprows * 1024))
print "Time reading rows:", treadrows
print "Read rows/sec: ", int(iterations * 3/ float(treadrows))
print "Read KB/s :", int(rowsw * rowsz / (treadrows * 1024))
| cpcloud/PyTables | bench/shelve-bench.py | Python | bsd-3-clause | 6,421 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import os
from datetime import datetime, timedelta
from superdesk.io.commands.update_ingest import LAST_ITEM_UPDATE
import superdesk
import superdesk.tests as tests
from behave import given, when, then # @UnresolvedImport
from flask import json
from eve.methods.common import parse
from superdesk import default_user_preferences, get_resource_service, utc
from superdesk.utc import utcnow, get_expiry_date
from eve.io.mongo import MongoJSONEncoder
from base64 import b64encode
from wooper.general import fail_and_print_body, apply_path, \
parse_json_response, WooperAssertionError
from wooper.expect import (
expect_status, expect_status_in,
expect_json, expect_json_length,
expect_json_contains, expect_json_not_contains,
expect_headers_contain,
)
from wooper.assertions import (
assert_in, assert_equal)
from urllib.parse import urlparse
from os.path import basename
from superdesk.tests import test_user, get_prefixed_url, set_placeholder
from re import findall
from eve.utils import ParsedRequest
import shutil
from apps.dictionaries.resource import DICTIONARY_FILE
from test_factory import setup_auth_user
external_url = 'http://thumbs.dreamstime.com/z/digital-nature-10485007.jpg'
def test_json(context):
try:
response_data = json.loads(context.response.get_data())
except Exception:
fail_and_print_body(context.response, 'response is not valid json')
context_data = json.loads(apply_placeholders(context, context.text))
assert_equal(json_match(context_data, response_data), True,
msg=str(context_data) + '\n != \n' + str(response_data))
return response_data
def test_key_is_present(key, context, response):
"""Test if given key is present in response.
In case the context value is empty - "", {}, [] - it checks if it's non empty in response.
If it's set in context to false, it will check that it's falsy/empty in response too.
:param key
:param context
:param response
"""
assert not isinstance(context[key], bool) or not response[key], \
'"%s" should be empty or false, but it was "%s" in (%s)' % (key, response[key], response)
def json_match(context_data, response_data):
if isinstance(context_data, dict):
assert isinstance(response_data, dict), 'response data is not dict, but %s' % type(response_data)
for key in context_data:
if key not in response_data:
print(key, ' not in ', response_data)
return False
if context_data[key] == "__any_value__":
test_key_is_present(key, context_data, response_data)
continue
if not json_match(context_data[key], response_data[key]):
return False
return True
elif isinstance(context_data, list):
for item_context in context_data:
found = False
for item_response in response_data:
if json_match(item_context, item_response):
found = True
break
if not found:
print(item_context, ' not in ', response_data)
return False
return True
elif not isinstance(context_data, dict):
if context_data != response_data:
print(context_data, ' != ', response_data)
return context_data == response_data
def get_fixture_path(fixture):
abspath = os.path.abspath(os.path.dirname(__file__))
return os.path.join(abspath, 'fixtures', fixture)
def get_macro_path(macro):
abspath = os.path.abspath("macros")
return os.path.join(abspath, macro)
def get_self_href(resource, context):
assert '_links' in resource, 'expted "_links", but got only %s' % (resource)
return resource['_links']['self']['href']
def get_res(url, context):
response = context.client.get(get_prefixed_url(context.app, url), headers=context.headers)
expect_status(response, 200)
return json.loads(response.get_data())
def parse_date(datestr):
return datetime.strptime(datestr, "%Y-%m-%dT%H:%M:%S%z")
def assert_200(response):
"""Assert we get status code 200."""
expect_status_in(response, (200, 201, 204))
def assert_404(response):
"""Assert we get status code 404."""
assert response.status_code == 404, 'Expected 404, got %d' % (response.status_code)
def assert_ok(response):
"""Assert we get ok status within api response."""
expect_status_in(response, (200, 201))
expect_json_contains(response, {'_status': 'OK'})
def get_json_data(response):
return json.loads(response.get_data())
def get_it(context):
it = context.data[0]
res = get_res('/%s/%s' % (context.resource, it['_id']), context)
return get_self_href(res, context), res.get('_etag')
def if_match(context, etag):
headers = []
if etag:
headers = [('If-Match', etag)]
headers = unique_headers(headers, context.headers)
return headers
def unique_headers(headers_to_add, old_headers):
headers = dict(old_headers)
for item in headers_to_add:
headers.update({item[0]: item[1]})
unique_headers = [(k, v) for k, v in headers.items()]
return unique_headers
def patch_current_user(context, data):
response = context.client.get(get_prefixed_url(context.app, '/users/%s' % context.user['_id']),
headers=context.headers)
user = json.loads(response.get_data())
headers = if_match(context, user.get('_etag'))
response = context.client.patch(get_prefixed_url(context.app, '/users/%s' % context.user['_id']),
data=data, headers=headers)
assert_ok(response)
return response
def apply_placeholders(context, text):
placeholders = getattr(context, 'placeholders', {})
for placeholder in findall('#([^#"]+)#', text):
if placeholder.startswith('DATE'):
value = utcnow()
unit = placeholder.find('+')
if unit != -1:
value += timedelta(days=int(placeholder[unit + 1]))
else:
unit = placeholder.find('-')
if unit != -1:
value -= timedelta(days=int(placeholder[unit + 1]))
value = value.strftime("%Y-%m-%dT%H:%M:%S%z")
elif placeholder not in placeholders:
try:
resource_name, field_name = placeholder.lower().split('.', maxsplit=1)
except:
continue
resource = getattr(context, resource_name, None)
if resource and field_name in resource:
value = str(resource[field_name])
else:
continue
else:
value = placeholders[placeholder]
text = text.replace('#%s#' % placeholder, value)
return text
def get_resource_name(url):
parsed_url = urlparse(url)
return basename(parsed_url.path)
@given('empty "{resource}"')
def step_impl_given_empty(context, resource):
if not is_user_resource(resource):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
get_resource_service(resource).delete_action()
@given('"{resource}"')
def step_impl_given_(context, resource):
data = apply_placeholders(context, context.text)
with context.app.test_request_context(context.app.config['URL_PREFIX']):
if not is_user_resource(resource):
get_resource_service(resource).delete_action()
items = [parse(item, resource) for item in json.loads(data)]
if is_user_resource(resource):
for item in items:
item.setdefault('needs_activation', False)
get_resource_service(resource).post(items)
context.data = items
context.resource = resource
setattr(context, resource, items[-1])
@given('the "{resource}"')
def step_impl_given_the(context, resource):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
if not is_user_resource(resource):
get_resource_service(resource).delete_action()
orig_items = {}
items = [parse(item, resource) for item in json.loads(context.text)]
get_resource_service(resource).post(items)
context.data = orig_items or items
context.resource = resource
@given('ingest from "{provider}"')
def step_impl_given_resource_with_provider(context, provider):
resource = 'ingest'
with context.app.test_request_context(context.app.config['URL_PREFIX']):
get_resource_service(resource).delete_action()
items = [parse(item, resource) for item in json.loads(context.text)]
for item in items:
item['ingest_provider'] = context.providers[provider]
get_resource_service(resource).post(items)
context.data = items
context.resource = resource
@given('config')
def step_impl_given_config(context):
tests.setup(context, json.loads(context.text))
setup_auth_user(context)
@given('we have "{role_name}" role')
def step_impl_given_role(context, role_name):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
role = get_resource_service('roles').find_one(name=role_name, req=None)
data = MongoJSONEncoder().encode({'role': role.get('_id')})
response = patch_current_user(context, data)
assert_ok(response)
@given('we have "{user_type}" as type of user')
def step_impl_given_user_type(context, user_type):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
data = json.dumps({'user_type': user_type})
response = patch_current_user(context, data)
assert_ok(response)
@when('we post to auth')
def step_impl_when_auth(context):
data = context.text
context.response = context.client.post(get_prefixed_url(context.app, '/auth'), data=data, headers=context.headers)
if context.response.status_code == 200 or context.response.status_code == 201:
item = json.loads(context.response.get_data())
if item.get('_id'):
set_placeholder(context, 'AUTH_ID', item['_id'])
context.headers.append(('Authorization', b'basic ' + b64encode(item['token'].encode('ascii') + b':')))
context.user = item['user']
@given('we create a new macro "{macro_name}"')
def step_create_new_macro(context, macro_name):
src = get_fixture_path(macro_name)
dst = get_macro_path(macro_name)
shutil.copyfile(src, dst)
@when('we fetch from "{provider_name}" ingest "{guid}"')
def step_impl_fetch_from_provider_ingest(context, provider_name, guid):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
fetch_from_provider(context, provider_name, guid)
def embed_routing_scheme_rules(scheme):
"""Fetch all content filters referenced by the given routing scheme and
embed them into the latter (replacing the plain references to filters).
:param dict scheme: routing scheme configuration
"""
filters_service = superdesk.get_resource_service('content_filters')
rules_filters = (
(rule, str(rule['filter']))
for rule in scheme['rules'] if rule.get('filter'))
for rule, filter_id in rules_filters:
content_filter = filters_service.find_one(_id=filter_id, req=None)
rule['filter'] = content_filter
@when('we fetch from "{provider_name}" ingest "{guid}" using routing_scheme')
def step_impl_fetch_from_provider_ingest_using_routing(context, provider_name, guid):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
_id = apply_placeholders(context, context.text)
routing_scheme = get_resource_service('routing_schemes').find_one(_id=_id, req=None)
embed_routing_scheme_rules(routing_scheme)
fetch_from_provider(context, provider_name, guid, routing_scheme)
@when('we ingest and fetch "{provider_name}" "{guid}" to desk "{desk}" stage "{stage}" using routing_scheme')
def step_impl_fetch_from_provider_ingest_using_routing_with_desk(context, provider_name, guid, desk, stage):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
_id = apply_placeholders(context, context.text)
desk_id = apply_placeholders(context, desk)
stage_id = apply_placeholders(context, stage)
routing_scheme = get_resource_service('routing_schemes').find_one(_id=_id, req=None)
embed_routing_scheme_rules(routing_scheme)
fetch_from_provider(context, provider_name, guid, routing_scheme, desk_id, stage_id)
def fetch_from_provider(context, provider_name, guid, routing_scheme=None, desk_id=None, stage_id=None):
ingest_provider_service = get_resource_service('ingest_providers')
provider = ingest_provider_service.find_one(name=provider_name, req=None)
provider['routing_scheme'] = routing_scheme
provider_service = context.provider_services[provider.get('type')]
provider_service.provider = provider
if provider.get('type') == 'aap' or provider.get('type') == 'teletype':
items = provider_service.parse_file(guid, provider)
else:
items = provider_service.fetch_ingest(guid)
for item in items:
item['versioncreated'] = utcnow()
item['expiry'] = utcnow() + timedelta(minutes=20)
if desk_id:
from bson.objectid import ObjectId
item['task'] = {'desk': ObjectId(desk_id), 'stage': ObjectId(stage_id)}
failed = context.ingest_items(items, provider, rule_set=provider.get('rule_set'),
routing_scheme=provider.get('routing_scheme'))
assert len(failed) == 0, failed
provider = ingest_provider_service.find_one(name=provider_name, req=None)
ingest_provider_service.system_update(provider['_id'], {LAST_ITEM_UPDATE: utcnow()}, provider)
for item in items:
set_placeholder(context, '{}.{}'.format(provider_name, item['guid']), item['_id'])
@when('we post to "{url}"')
def step_impl_when_post_url(context, url):
post_data(context, url)
def set_user_default(url, data):
if is_user_resource(url):
user = json.loads(data)
user.setdefault('needs_activation', False)
data = json.dumps(user)
def get_response_etag(response):
return json.loads(response.get_data())['_etag']
@when('we save etag')
def step_when_we_save_etag(context):
context.etag = get_response_etag(context.response)
@then('we get same etag')
def step_then_we_get_same_etag(context):
assert context.etag == get_response_etag(context.response), 'etags not matching'
def store_placeholder(context, url):
if context.response.status_code in (200, 201):
item = json.loads(context.response.get_data())
if item['_status'] == 'OK' and item.get('_id'):
setattr(context, get_resource_name(url), item)
def post_data(context, url, success=False):
with context.app.mail.record_messages() as outbox:
data = apply_placeholders(context, context.text)
url = apply_placeholders(context, url)
set_user_default(url, data)
context.response = context.client.post(get_prefixed_url(context.app, url),
data=data, headers=context.headers)
if success:
assert_ok(context.response)
item = json.loads(context.response.get_data())
context.outbox = outbox
store_placeholder(context, url)
return item
@when('we post to "{url}" with "{tag}" and success')
def step_impl_when_post_url_with_tag(context, url, tag):
item = post_data(context, url, True)
if item.get('_id'):
set_placeholder(context, tag, item.get('_id'))
@given('we have "{url}" with "{tag}" and success')
def step_impl_given_post_url_with_tag(context, url, tag):
item = post_data(context, url, True)
if item.get('_id'):
set_placeholder(context, tag, item.get('_id'))
@when('we post to "{url}" with success')
def step_impl_when_post_url_with_success(context, url):
post_data(context, url, True)
@when('we put to "{url}"')
def step_impl_when_put_url(context, url):
with context.app.mail.record_messages() as outbox:
data = apply_placeholders(context, context.text)
href = get_self_href(url)
context.response = context.client.put(get_prefixed_url(context.app, href), data=data, headers=context.headers)
assert_ok(context.response)
context.outbox = outbox
@when('we get "{url}"')
def when_we_get_url(context, url):
url = apply_placeholders(context, url).encode('ascii').decode('unicode-escape')
headers = []
if context.text:
for line in context.text.split('\n'):
key, val = line.split(': ')
headers.append((key, val))
headers = unique_headers(headers, context.headers)
url = apply_placeholders(context, url)
context.response = context.client.get(get_prefixed_url(context.app, url), headers=headers)
@when('we get dictionary "{dictionary_id}"')
def when_we_get_dictionary(context, dictionary_id):
dictionary_id = apply_placeholders(context, dictionary_id)
url = '/dictionaries/' + dictionary_id + '?projection={"content": 1}'
return when_we_get_url(context, url)
@then('we get latest')
def step_impl_we_get_latest(context):
data = get_json_data(context.response)
href = get_self_href(data, context)
headers = if_match(context, data.get('_etag'))
href = get_prefixed_url(context.app, href)
context.response = context.client.get(href, headers=headers)
assert_200(context.response)
@when('we find for "{resource}" the id as "{name}" by "{search_criteria}"')
def when_we_find_for_resource_the_id_as_name_by_search_criteria(context, resource, name, search_criteria):
url = '/' + resource + '?where=' + search_criteria
context.response = context.client.get(get_prefixed_url(context.app, url), headers=context.headers)
if context.response.status_code == 200:
expect_json_length(context.response, 1, path='_items')
item = json.loads(context.response.get_data())
item = item['_items'][0]
if item.get('_id'):
set_placeholder(context, name, item['_id'])
@when('we delete "{url}"')
def step_impl_when_delete_url(context, url):
with context.app.mail.record_messages() as outbox:
url = apply_placeholders(context, url)
res = get_res(url, context)
href = get_self_href(res, context)
headers = if_match(context, res.get('_etag'))
href = get_prefixed_url(context.app, href)
context.response = context.client.delete(href, headers=headers)
context.outbox = outbox
@when('we delete latest')
def when_we_delete_it(context):
with context.app.mail.record_messages() as outbox:
res = get_json_data(context.response)
href = get_self_href(res, context)
headers = if_match(context, res.get('_etag'))
href = get_prefixed_url(context.app, href)
context.response = context.client.delete(href, headers=headers)
context.email = outbox
@when('we patch "{url}"')
def step_impl_when_patch_url(context, url):
with context.app.mail.record_messages() as outbox:
url = apply_placeholders(context, url)
res = get_res(url, context)
href = get_self_href(res, context)
headers = if_match(context, res.get('_etag'))
data = apply_placeholders(context, context.text)
href = get_prefixed_url(context.app, href)
context.response = context.client.patch(href, data=data, headers=headers)
context.outbox = outbox
@when('we patch latest')
def step_impl_when_patch_again(context):
with context.app.mail.record_messages() as outbox:
data = get_json_data(context.response)
href = get_prefixed_url(context.app, get_self_href(data, context))
headers = if_match(context, data.get('_etag'))
data2 = apply_placeholders(context, context.text)
context.response = context.client.patch(href, data=data2, headers=headers)
if context.response.status_code in (200, 201):
item = json.loads(context.response.get_data())
if item['_status'] == 'OK' and item.get('_id'):
setattr(context, get_resource_name(href), item)
assert_ok(context.response)
context.outbox = outbox
@when('we patch latest without assert')
def step_impl_when_patch_without_assert(context):
data = get_json_data(context.response)
href = get_prefixed_url(context.app, get_self_href(data, context))
headers = if_match(context, data.get('_etag'))
data2 = apply_placeholders(context, context.text)
context.response = context.client.patch(href, data=data2, headers=headers)
@when('we patch routing scheme "{url}"')
def step_impl_when_patch_routing_scheme(context, url):
with context.app.mail.record_messages() as outbox:
url = apply_placeholders(context, url)
res = get_res(url, context)
href = get_self_href(res, context)
headers = if_match(context, res.get('_etag'))
data = json.loads(apply_placeholders(context, context.text))
res.get('rules', []).append(data)
context.response = context.client.patch(get_prefixed_url(context.app, href),
data=json.dumps({'rules': res.get('rules', [])}),
headers=headers)
context.outbox = outbox
@when('we patch given')
def step_impl_when_patch(context):
with context.app.mail.record_messages() as outbox:
href, etag = get_it(context)
headers = if_match(context, etag)
context.response = context.client.patch(get_prefixed_url(context.app, href), data=context.text, headers=headers)
assert_ok(context.response)
context.outbox = outbox
@when('we get given')
def step_impl_when_get(context):
href, _etag = get_it(context)
context.response = context.client.get(get_prefixed_url(context.app, href), headers=context.headers)
@when('we restore version {version}')
def step_impl_when_restore_version(context, version):
data = get_json_data(context.response)
href = get_self_href(data, context)
headers = if_match(context, data.get('_etag'))
text = '{"type": "text", "old_version": %s, "last_version": %s}' % (version, data.get('_current_version'))
context.response = context.client.put(get_prefixed_url(context.app, href), data=text, headers=headers)
assert_ok(context.response)
@when('we upload a file "{filename}" to "{dest}"')
def step_impl_when_upload_image(context, filename, dest):
upload_file(context, dest, filename, 'media')
@when('we upload a binary file with cropping')
def step_impl_when_upload_with_crop(context):
data = {'CropTop': '0', 'CropLeft': '0', 'CropBottom': '333', 'CropRight': '333'}
upload_file(context, '/upload', 'bike.jpg', 'media', data)
@when('upload a file "{file_name}" to "{destination}" with "{guid}"')
def step_impl_when_upload_image_with_guid(context, file_name, destination, guid):
upload_file(context, destination, file_name, 'media', {'guid': guid})
@when('we upload a new dictionary with success')
def when_upload_dictionary(context):
data = json.loads(apply_placeholders(context, context.text))
upload_file(context, '/dictionaries', 'test_dict.txt', DICTIONARY_FILE, data)
assert_ok(context.response)
@when('we upload to an existing dictionary with success')
def when_upload_patch_dictionary(context):
data = json.loads(apply_placeholders(context, context.text))
url = apply_placeholders(context, '/dictionaries/#dictionaries._id#')
etag = apply_placeholders(context, '#dictionaries._etag#')
upload_file(context, url, 'test_dict2.txt', DICTIONARY_FILE, data, 'patch', [('If-Match', etag)])
assert_ok(context.response)
def upload_file(context, dest, filename, file_field, extra_data=None, method='post', user_headers=[]):
with open(get_fixture_path(filename), 'rb') as f:
data = {file_field: f}
if extra_data:
data.update(extra_data)
headers = [('Content-Type', 'multipart/form-data')]
headers.extend(user_headers)
headers = unique_headers(headers, context.headers)
url = get_prefixed_url(context.app, dest)
context.response = getattr(context.client, method)(url, data=data, headers=headers)
assert_ok(context.response)
store_placeholder(context, url)
@when('we upload a file from URL')
def step_impl_when_upload_from_url(context):
data = {'URL': external_url}
headers = [('Content-Type', 'multipart/form-data')]
headers = unique_headers(headers, context.headers)
context.response = context.client.post(get_prefixed_url(context.app, '/upload'), data=data, headers=headers)
@when('we upload a file from URL with cropping')
def step_impl_when_upload_from_url_with_crop(context):
data = {'URL': external_url,
'CropTop': '0',
'CropLeft': '0',
'CropBottom': '333',
'CropRight': '333'}
headers = [('Content-Type', 'multipart/form-data')]
headers = unique_headers(headers, context.headers)
context.response = context.client.post(get_prefixed_url(context.app, '/upload'), data=data, headers=headers)
@when('we get user profile')
def step_impl_when_get_user(context):
profile_url = '/%s/%s' % ('users', context.user['_id'])
context.response = context.client.get(get_prefixed_url(context.app, profile_url), headers=context.headers)
@then('we get new resource')
def step_impl_then_get_new(context):
assert_ok(context.response)
expect_json_contains(context.response, 'self', path='_links')
if context.text is not None:
return test_json(context)
@then('we get next take as "{new_take}"')
def step_impl_then_get_next_take(context, new_take):
step_impl_we_get_latest(context)
data = get_json_data(context.response)
set_placeholder(context, new_take, data['_id'])
set_placeholder(context, 'TAKE_PACKAGE', data['takes']['_id'])
test_json(context)
@then('we get error {code}')
def step_impl_then_get_error(context, code):
expect_status(context.response, int(code))
if context.text:
test_json(context)
@then('we get list with {total_count} items')
def step_impl_then_get_list(context, total_count):
assert_200(context.response)
data = get_json_data(context.response)
int_count = int(total_count.replace('+', ''))
if '+' in total_count:
assert int_count <= data['_meta']['total'], '%d items is not enough' % data['_meta']['total']
else:
assert int_count == data['_meta']['total'], 'got %d' % (data['_meta']['total'])
if context.text:
test_json(context)
@then('we get "{value}" in formatted output')
def step_impl_then_get_formatted_output(context, value):
assert_200(context.response)
value = apply_placeholders(context, value)
data = get_json_data(context.response)
for item in data['_items']:
if value in item['formatted_item']:
return
assert False
@then('we get "{value}" in formatted output as "{group}" story for subscriber "{sub}"')
def step_impl_then_get_formatted_output(context, value, group, sub):
assert_200(context.response)
value = apply_placeholders(context, value)
data = get_json_data(context.response)
for item in data['_items']:
if item['subscriber_id'] != sub:
continue
try:
formatted_data = json.loads(item['formatted_item'])
except:
continue
for group_item in formatted_data.get('associations', {}).get(group, []):
if group_item.get('_id', '') == value:
return
assert False
@then('we get "{value}" as "{group}" story for subscriber "{sub}" in package "{pck}"')
def step_impl_then_get_formatted_output_pck(context, value, group, sub, pck):
assert_200(context.response)
value = apply_placeholders(context, value)
data = get_json_data(context.response)
for item in data['_items']:
if item['item_id'] != pck:
continue
if item['subscriber_id'] != sub:
continue
try:
formatted_data = json.loads(item['formatted_item'])
except:
continue
for group_item in formatted_data.get('associations', {}).get(group, []):
if group_item.get('_id', '') == value:
return
assert False
@then('we get "{value}" as "{group}" story for subscriber "{sub}" not in package "{pck}" version "{v}"')
def step_impl_then_get_formatted_output_pck(context, value, group, sub, pck, v):
assert_200(context.response)
value = apply_placeholders(context, value)
data = get_json_data(context.response)
for item in data['_items']:
if item['item_id'] == pck:
if item['subscriber_id'] == sub and str(item['item_version']) == v:
try:
formatted_data = json.loads(item['formatted_item'])
except:
continue
for group_item in formatted_data.get('associations', {}).get(group, []):
if group_item.get('_id', '') == value:
assert False
assert True
return
assert False
@then('we get "{value}" in formatted output as "{group}" newsml12 story')
def step_impl_then_get_formatted_output_newsml(context, value, group):
assert_200(context.response)
value = apply_placeholders(context, value)
data = get_json_data(context.response)
for item in data['_items']:
if '<' + group + '>' + value + '</' + group + '>' in item['formatted_item']:
return
assert False
@then('we get no "{field}"')
def step_impl_then_get_nofield(context, field):
assert_200(context.response)
expect_json_not_contains(context.response, field)
@then('expect json in "{path}"')
def step_impl_then_get_nofield_in_path(context, path):
assert_200(context.response)
expect_json(context.response, context.text, path)
@then('we get existing resource')
def step_impl_then_get_existing(context):
assert_200(context.response)
test_json(context)
@then('we get OK response')
def step_impl_then_get_ok(context):
assert_200(context.response)
@then('we get response code {code}')
def step_impl_then_get_code(context, code):
expect_status(context.response, int(code))
@then('we get updated response')
def step_impl_then_get_updated(context):
assert_ok(context.response)
if context.text:
test_json(context)
@then('we get "{key}" in "{url}"')
def step_impl_then_get_key_in_url(context, key, url):
url = apply_placeholders(context, url)
res = context.client.get(get_prefixed_url(context.app, url), headers=context.headers)
assert_200(res)
expect_json_contains(res, key)
@then('we get file metadata')
def step_impl_then_get_file_meta(context):
assert len(
apply_path(
parse_json_response(context.response),
'filemeta'
).items()
) > 0
'expected non empty metadata dictionary'
@then('we get "{filename}" metadata')
def step_impl_then_get_given_file_meta(context, filename):
if filename == 'bike.jpg':
metadata = {
'ycbcrpositioning': 1,
'imagelength': 2448,
'exifimagewidth': 2448,
'meteringmode': 2,
'datetimedigitized': '2013:08:01 16:19:28',
'exposuremode': 0,
'flashpixversion': '0100',
'isospeedratings': 80,
'length': 469900,
'imageuniqueid': 'f3533c05daef2debe6257fd99e058eec',
'datetimeoriginal': '2013:08:01 16:19:28',
'whitebalance': 0,
'exposureprogram': 3,
'colorspace': 1,
'exifimageheight': 3264,
'software': 'Google',
'resolutionunit': 2,
'make': 'SAMSUNG',
'maxaperturevalue': [276, 100],
'aperturevalue': [276, 100],
'scenecapturetype': 0,
'exposuretime': [1, 2004],
'datetime': '2013:08:01 16:19:28',
'exifoffset': 216,
'yresolution': [72, 1],
'orientation': 1,
'componentsconfiguration': '0000',
'exifversion': '0220',
'focallength': [37, 10],
'flash': 0,
'model': 'GT-I9300',
'xresolution': [72, 1],
'fnumber': [26, 10],
'imagewidth': 3264
}
elif filename == 'green.ogg':
metadata = {
'producer': 'Lavf54.59.103',
'music_genre': 'New Age',
'sample_rate': '44100',
'artist': 'Maxime Abbey',
'length': 368058,
'bit_rate': '160000',
'title': 'Green Hills',
'mime_type': 'audio/vorbis',
'format_version': 'Vorbis version 0',
'compression': 'Vorbis',
'duration': '0:00:20.088163',
'endian': 'Little endian',
'nb_channel': '2'
}
elif filename == 'this_week_nasa.mp4':
metadata = {
'mime_type': 'video/mp4',
'creation_date': '1904-01-01T00:00:00+00:00',
'duration': '0:00:10.224000',
'width': '480',
'length': 877869,
'comment': 'User volume: 100.0%',
'height': '270',
'endian': 'Big endian',
'last_modification': '1904-01-01T00:00:00+00:00'
}
else:
raise NotImplementedError("No metadata for file '{}'.".format(filename))
expect_json(
context.response,
metadata,
path='filemeta'
)
@then('we get "{type}" renditions')
def step_impl_then_get_renditions(context, type):
expect_json_contains(context.response, 'renditions')
renditions = apply_path(parse_json_response(context.response), 'renditions')
assert isinstance(renditions, dict), 'expected dict for image renditions'
for rend_name in context.app.config['RENDITIONS'][type]:
desc = renditions[rend_name]
assert isinstance(desc, dict), 'expected dict for rendition description'
assert 'href' in desc, 'expected href in rendition description'
assert 'media' in desc, 'expected media identifier in rendition description'
we_can_fetch_a_file(context, desc['href'], 'image/jpeg')
@then('item "{item_id}" is unlocked')
def then_item_is_unlocked(context, item_id):
assert_200(context.response)
data = json.loads(context.response.get_data())
assert data.get('lock_user', None) is None, 'item is locked by user #{0}'.format(data.get('lock_user'))
@then('item "{item_id}" is locked')
def then_item_is_locked(context, item_id):
assert_200(context.response)
resp = parse_json_response(context.response)
assert resp['lock_user'] is not None
@then('item "{item_id}" is assigned')
def then_item_is_assigned(context, item_id):
resp = parse_json_response(context.response)
assert resp['task'].get('user', None) is not None, 'item is not assigned'
@then('we get rendition "{name}" with mimetype "{mimetype}"')
def step_impl_then_get_rendition_with_mimetype(context, name, mimetype):
expect_json_contains(context.response, 'renditions')
renditions = apply_path(parse_json_response(context.response), 'renditions')
assert isinstance(renditions, dict), 'expected dict for image renditions'
desc = renditions[name]
assert isinstance(desc, dict), 'expected dict for rendition description'
assert 'href' in desc, 'expected href in rendition description'
we_can_fetch_a_file(context, desc['href'], mimetype)
set_placeholder(context, "rendition.{}.href".format(name), desc['href'])
@when('we get updated media from archive')
def get_updated_media_from_archive(context):
url = 'archive/%s' % context._id
when_we_get_url(context, url)
assert_200(context.response)
@then('baseImage rendition is updated')
def check_base_image_rendition(context):
check_rendition(context, 'baseImage')
@then('original rendition is updated with link to file having mimetype "{mimetype}"')
def check_original_rendition(context, mimetype):
rv = parse_json_response(context.response)
link_to_file = rv['renditions']['original']['href']
assert link_to_file
we_can_fetch_a_file(context, link_to_file, mimetype)
@then('thumbnail rendition is updated')
def check_thumbnail_rendition(context):
check_rendition(context, 'thumbnail')
def check_rendition(context, rendition_name):
rv = parse_json_response(context.response)
assert rv['renditions'][rendition_name] != context.renditions[rendition_name], rv['renditions']
@then('we get "{key}"')
def step_impl_then_get_key(context, key):
assert_200(context.response)
expect_json_contains(context.response, key)
item = json.loads(context.response.get_data())
set_placeholder(context, '%s' % key, item[key])
@then('we get action in user activity')
def step_impl_then_get_action(context):
response = context.client.get(get_prefixed_url(context.app, '/activity'), headers=context.headers)
expect_json_contains(response, '_items')
@then('we get a file reference')
def step_impl_then_get_file(context):
assert_200(context.response)
expect_json_contains(context.response, 'renditions')
data = get_json_data(context.response)
url = '/upload/%s' % data['_id']
headers = [('Accept', 'application/json')]
headers = unique_headers(headers, context.headers)
response = context.client.get(get_prefixed_url(context.app, url), headers=headers)
assert_200(response)
assert len(response.get_data()), response
assert response.mimetype == 'application/json', response.mimetype
expect_json_contains(response, 'renditions')
expect_json_contains(response, {'mimetype': 'image/jpeg'})
fetched_data = get_json_data(context.response)
context.fetched_data = fetched_data
@then('we get cropped data smaller than "{max_size}"')
def step_impl_then_get_cropped_file(context, max_size):
assert int(context.fetched_data['filemeta']['length']) < int(max_size), 'was expecting smaller image'
@then('we can fetch a data_uri')
def step_impl_we_fetch_data_uri(context):
we_can_fetch_a_file(context, context.fetched_data['renditions']['original']['href'], 'image/jpeg')
@then('we fetch a file "{url}"')
def step_impl_we_cannot_fetch_file(context, url):
url = apply_placeholders(context, url)
headers = [('Accept', 'application/json')]
headers = unique_headers(headers, context.headers)
context.response = context.client.get(get_prefixed_url(context.app, url), headers=headers)
def we_can_fetch_a_file(context, url, mimetype):
headers = [('Accept', 'application/json')]
headers = unique_headers(headers, context.headers)
response = context.client.get(get_prefixed_url(context.app, url), headers=headers)
assert_200(response)
assert len(response.get_data()), response
assert response.mimetype == mimetype, response.mimetype
@then('we can delete that file')
def step_impl_we_delete_file(context):
url = '/upload/%s' % context.fetched_data['_id']
context.headers.append(('Accept', 'application/json'))
headers = if_match(context, context.fetched_data.get('_etag'))
response = context.client.delete(get_prefixed_url(context.app, url), headers=headers)
assert_200(response)
response = context.client.get(get_prefixed_url(context.app, url), headers=headers)
assert_404(response)
@then('we get a picture url')
def step_impl_then_get_picture(context):
assert_ok(context.response)
expect_json_contains(context.response, 'picture_url')
@then('we get aggregations "{keys}"')
def step_impl_then_get_aggs(context, keys):
assert_200(context.response)
expect_json_contains(context.response, '_aggregations')
data = get_json_data(context.response)
aggs = data['_aggregations']
for key in keys.split(','):
assert_in(key, aggs)
@then('the file is stored localy')
def step_impl_then_file(context):
assert_200(context.response)
folder = context.app.config['UPLOAD_FOLDER']
assert os.path.exists(os.path.join(folder, context.filename))
@then('we get version {version}')
def step_impl_then_get_version(context, version):
assert_200(context.response)
expect_json_contains(context.response, {'_current_version': int(version)})
@then('the field "{field}" value is "{value}"')
def step_impl_then_get_field_value(context, field, value):
assert_200(context.response)
expect_json_contains(context.response, {field: value})
@then('we get etag matching "{url}"')
def step_impl_then_get_etag(context, url):
if context.app.config['IF_MATCH']:
assert_200(context.response)
expect_json_contains(context.response, '_etag')
etag = get_json_data(context.response).get('_etag')
response = context.client.get(get_prefixed_url(context.app, url), headers=context.headers)
expect_json_contains(response, {'_etag': etag})
@then('we get not modified response')
def step_impl_then_not_modified(context):
expect_status(context.response, 304)
@then('we get "{header}" header')
def step_impl_then_get_header(context, header):
expect_headers_contain(context.response, header)
@then('we get link to "{resource}"')
def then_we_get_link_to_resource(context, resource):
doc = get_json_data(context.response)
self_link = doc.get('_links').get('self')
assert resource in self_link['href'], 'expect link to "%s", got %s' % (resource, self_link)
@then('we get deleted response')
def then_we_get_deleted_response(context):
assert_200(context.response)
@when('we post to reset_password we get email with token')
def we_post_to_reset_password(context):
data = {'email': 'foo@bar.org'}
headers = [('Content-Type', 'multipart/form-data')]
headers = unique_headers(headers, context.headers)
with context.app.mail.record_messages() as outbox:
context.response = context.client.post(get_prefixed_url(context.app, '/reset_user_password'),
data=data, headers=headers)
expect_status_in(context.response, (200, 201))
assert len(outbox) == 1
assert outbox[0].subject == "Reset password"
email_text = outbox[0].body
assert "24" in email_text
words = email_text.split()
url = urlparse(words[words.index("link") + 1])
token = url.fragment.split('token=')[-1]
assert token
context.token = token
@then('we can check if token is valid')
def we_can_check_token_is_valid(context):
data = {'token': context.token}
headers = [('Content-Type', 'multipart/form-data')]
headers = unique_headers(headers, context.headers)
context.response = context.client.post(get_prefixed_url(context.app, '/reset_user_password'),
data=data, headers=headers)
expect_status_in(context.response, (200, 201))
@then('we update token to be expired')
def we_update_token_to_expired(context):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
expiry = utc.utcnow() - timedelta(days=2)
reset_request = get_resource_service('reset_user_password').find_one(req=None, token=context.token)
reset_request['expire_time'] = expiry
id = reset_request.pop('_id')
get_resource_service('reset_user_password').patch(id, reset_request)
@then('token is invalid')
def check_token_invalid(context):
data = {'token': context.token}
headers = [('Content-Type', 'multipart/form-data')]
headers = unique_headers(headers, context.headers)
context.response = context.client.post(get_prefixed_url(context.app, '/reset_user_password'),
data=data, headers=headers)
expect_status_in(context.response, (403, 401))
@when('we post to reset_password we do not get email with token')
def we_post_to_reset_password_it_fails(context):
data = {'email': 'foo@bar.org'}
headers = [('Content-Type', 'multipart/form-data')]
headers = unique_headers(headers, context.headers)
with context.app.mail.record_messages() as outbox:
context.response = context.client.post(get_prefixed_url(context.app, '/reset_user_password'),
data=data, headers=headers)
expect_status_in(context.response, (200, 201))
assert len(outbox) == 0
def start_reset_password_for_user(context):
data = {'token': context.token, 'password': 'test_pass'}
headers = [('Content-Type', 'multipart/form-data')]
headers = unique_headers(headers, context.headers)
context.response = context.client.post(get_prefixed_url(context.app, '/reset_user_password'),
data=data, headers=headers)
@then('we fail to reset password for user')
def we_fail_to_reset_password_for_user(context):
start_reset_password_for_user(context)
step_impl_then_get_error(context, 403)
@then('we reset password for user')
def we_reset_password_for_user(context):
start_reset_password_for_user(context)
expect_status_in(context.response, (200, 201))
auth_data = {'username': 'foo', 'password': 'test_pass'}
headers = [('Content-Type', 'multipart/form-data')]
headers = unique_headers(headers, context.headers)
context.response = context.client.post(get_prefixed_url(context.app, '/auth'), data=auth_data, headers=headers)
expect_status_in(context.response, (200, 201))
@when('we switch user')
def when_we_switch_user(context):
user = {'username': 'test-user-2', 'password': 'pwd', 'is_active': True,
'needs_activation': False, 'sign_off': 'foo'}
setup_auth_user(context, user)
set_placeholder(context, 'USERS_ID', str(context.user['_id']))
@when('we setup test user')
def when_we_setup_test_user(context):
setup_auth_user(context, test_user)
@when('we get my "{url}"')
def when_we_get_my_url(context, url):
user_id = str(context.user.get('_id'))
my_url = '{0}?where={1}'.format(url, json.dumps({'user': user_id}))
return when_we_get_url(context, my_url)
@when('we get user "{resource}"')
def when_we_get_user_resource(context, resource):
url = '/users/{0}/{1}'.format(str(context.user.get('_id')), resource)
return when_we_get_url(context, url)
@then('we get embedded items')
def we_get_embedded_items(context):
response_data = json.loads(context.response.get_data())
href = get_self_href(response_data, context)
url = href + '/?embedded={"items": 1}'
context.response = context.client.get(get_prefixed_url(context.app, url), headers=context.headers)
assert_200(context.response)
context.response_data = json.loads(context.response.get_data())
assert len(context.response_data['items']['view_items']) == 2
@when('we reset notifications')
def step_when_we_reset_notifications(context):
context.app.notification_client.reset()
@then('we get notifications')
def then_we_get_notifications(context):
notifications = context.app.notification_client.messages
notifications_data = [json.loads(notification) for notification in notifications]
context_data = json.loads(apply_placeholders(context, context.text))
assert_equal(json_match(context_data, notifications_data), True,
msg=str(context_data) + '\n != \n' + str(notifications_data))
@then('we get default preferences')
def get_default_prefs(context):
response_data = json.loads(context.response.get_data())
assert_equal(response_data['user_preferences'], default_user_preferences)
@when('we spike "{item_id}"')
def step_impl_when_spike_url(context, item_id):
item_id = apply_placeholders(context, item_id)
res = get_res('/archive/' + item_id, context)
headers = if_match(context, res.get('_etag'))
context.response = context.client.patch(get_prefixed_url(context.app, '/archive/spike/' + item_id),
data='{"state": "spiked"}', headers=headers)
@when('we spike fetched item')
def step_impl_when_spike_fetched_item(context):
data = json.loads(apply_placeholders(context, context.text))
item_id = data["_id"]
res = get_res('/archive/' + item_id, context)
headers = if_match(context, res.get('_etag'))
context.response = context.client.patch(get_prefixed_url(context.app, '/archive/spike/' + item_id),
data='{"state": "spiked"}', headers=headers)
@when('we unspike "{item_id}"')
def step_impl_when_unspike_url(context, item_id):
item_id = apply_placeholders(context, item_id)
res = get_res('/archive/' + item_id, context)
headers = if_match(context, res.get('_etag'))
context.response = context.client.patch(get_prefixed_url(context.app, '/archive/unspike/' + item_id),
data='{}', headers=headers)
@then('we get spiked content "{item_id}"')
def get_spiked_content(context, item_id):
item_id = apply_placeholders(context, item_id)
url = 'archive/{0}'.format(item_id)
when_we_get_url(context, url)
assert_200(context.response)
response_data = json.loads(context.response.get_data())
assert_equal(response_data['state'], 'spiked')
assert_equal(response_data['operation'], 'spike')
@then('we get unspiked content "{id}"')
def get_unspiked_content(context, id):
url = 'archive/{0}'.format(id)
when_we_get_url(context, url)
assert_200(context.response)
response_data = json.loads(context.response.get_data())
assert_equal(response_data['state'], 'draft')
assert_equal(response_data['operation'], 'unspike')
# Tolga Akin (05/11/14)
# Expiry value doesn't get set to None properly in Elastic.
# Discussed with Petr so we'll look into this later
# assert_equal(response_data['expiry'], None)
@then('we get global spike expiry')
def get_global_spike_expiry(context):
get_desk_spike_expiry(context, context.app.config['SPIKE_EXPIRY_MINUTES'])
@then('we get global content expiry')
def get_global_content_expiry(context):
get_desk_spike_expiry(context, context.app.config['CONTENT_EXPIRY_MINUTES'])
@then('we get content expiry {minutes}')
def get_content_expiry(context, minutes):
get_desk_spike_expiry(context, int(minutes))
@then('we get desk spike expiry after "{test_minutes}"')
def get_desk_spike_expiry(context, test_minutes):
response_data = json.loads(context.response.get_data())
assert response_data['expiry']
response_expiry = parse_date(response_data['expiry'])
expiry = utc.utcnow() + timedelta(minutes=int(test_minutes))
assert response_expiry <= expiry
@when('we mention user in comment for "{url}"')
def we_mention_user_in_comment(context, url):
with context.app.mail.record_messages() as outbox:
step_impl_when_post_url(context, url)
assert len(outbox) == 1
assert_equal(outbox[0].subject, "You were mentioned in a comment by test_user")
email_text = outbox[0].body
assert email_text
@when('we change user status to "{status}" using "{url}"')
def we_change_user_status(context, status, url):
with context.app.mail.record_messages() as outbox:
step_impl_when_patch_url(context, url)
assert len(outbox) == 1
assert_equal(outbox[0].subject, "Your Superdesk account is " + status)
assert outbox[0].body
@when('we get the default incoming stage')
def we_get_default_incoming_stage(context):
data = json.loads(context.response.get_data())
incoming_stage = data['_items'][0]['incoming_stage']
assert incoming_stage
url = 'stages/{0}'.format(incoming_stage)
when_we_get_url(context, url)
assert_200(context.response)
data = json.loads(context.response.get_data())
assert data['default_incoming'] is True
assert data['name'] == 'New'
@then('we get stage filled in to default_incoming')
def we_get_stage_filled_in(context):
data = json.loads(context.response.get_data())
assert data['task']['stage']
@given('we have sessions "{url}"')
def we_have_sessions_get_id(context, url):
when_we_get_url(context, url)
item = json.loads(context.response.get_data())
context.session_id = item['_items'][0]['_id']
context.data = item
set_placeholder(context, 'SESSION_ID', item['_items'][0]['_id'])
setattr(context, 'users', item['_items'][0]['user'])
@then('we get session by id')
def we_get_session_by_id(context):
url = 'sessions/' + context.session_id
when_we_get_url(context, url)
item = json.loads(context.response.get_data())
returned_id = item["_id"]
assert context.session_id == returned_id
@then('we delete session by id')
def we_delete_session_by_id(context):
url = 'sessions/' + context.session_id
step_impl_when_delete_url(context, url)
assert_200(context.response)
@when('we create a new user')
def step_create_a_user(context):
data = apply_placeholders(context, context.text)
with context.app.mail.record_messages() as outbox:
context.response = context.client.post(get_prefixed_url(context.app, '/users'),
data=data, headers=context.headers)
expect_status_in(context.response, (200, 201))
assert len(outbox) == 1
context.email = outbox[0]
@then('we get activation email')
def step_get_activation_email(context):
assert context.email.subject == 'Superdesk account created'
email_text = context.email.body
words = email_text.split()
url = urlparse(words[words.index("to") + 1])
token = url.fragment.split('token=')[-1]
assert token
@then('we set elastic limit')
def step_set_limit(context):
context.app.settings['MAX_SEARCH_DEPTH'] = 1
@then('we get emails')
def step_we_get_email(context):
data = json.loads(context.text)
for email in data:
assert check_if_email_sent(context, email['body'])
@then('we get no email')
def step_we_get_no_email(context):
assert len(context.outbox) == 0
def check_if_email_sent(context, body):
if context.outbox:
for email in context.outbox:
if body in email.body:
return True
return False
@then('we get activity')
def then_we_get_activity(context):
url = apply_placeholders(context, '/activity?where={"name": "notify"}')
context.response = context.client.get(get_prefixed_url(context.app, url), headers=context.headers)
if context.response.status_code == 200:
expect_json_length(context.response, 1, path='_items')
item = json.loads(context.response.get_data())
item = item['_items'][0]
if item.get('_id'):
setattr(context, 'activity', item)
set_placeholder(context, 'USERS_ID', item['user'])
def login_as(context, username, password, user_type):
user = {'username': username, 'password': password, 'is_active': True,
'is_enabled': True, 'needs_activation': False, user_type: user_type}
if context.text:
user.update(json.loads(context.text))
setup_auth_user(context, user)
@given('we login as user "{username}" with password "{password}" and user type "{user_type}"')
def given_we_login_as_user(context, username, password, user_type):
login_as(context, username, password, user_type)
@when('we login as user "{username}" with password "{password}" and user type "{user_type}"')
def when_we_login_as_user(context, username, password, user_type):
login_as(context, username, password, user_type)
def is_user_resource(resource):
return resource in ('users', '/users')
@then('we get {no_of_stages} invisible stages')
def when_we_get_invisible_stages(context, no_of_stages):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
stages = get_resource_service('stages').get_stages_by_visibility(is_visible=False)
assert len(stages) == int(no_of_stages)
@then('we get {no_of_stages} visible stages')
def when_we_get_visible_stages(context, no_of_stages):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
stages = get_resource_service('stages').get_stages_by_visibility(is_visible=True)
assert len(stages) == int(no_of_stages)
@then('we get {no_of_stages} invisible stages for user')
def when_we_get_invisible_stages_for_user(context, no_of_stages):
data = json.loads(apply_placeholders(context, context.text))
with context.app.test_request_context(context.app.config['URL_PREFIX']):
stages = get_resource_service('users').get_invisible_stages(data['user'])
assert len(stages) == int(no_of_stages)
@then('we get "{field_name}" populated')
def then_field_is_populated(context, field_name):
resp = parse_json_response(context.response)
assert resp[field_name].get('user', None) is not None, 'item is not populated'
@then('we get "{field_name}" not populated')
def then_field_is_not_populated(context, field_name):
resp = parse_json_response(context.response)
assert resp[field_name] is None, 'item is not populated'
@then('we get "{field_name}" not populated in results')
def then_field_is_not_populated(context, field_name):
resps = parse_json_response(context.response)
for resp in resps['_items']:
assert resp[field_name] is None, 'item is not populated'
@when('we delete content filter "{name}"')
def step_delete_content_filter(context, name):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
filter = get_resource_service('content_filters').find_one(req=None, name=name)
url = '/content_filters/{}'.format(filter['_id'])
headers = if_match(context, filter.get('_etag'))
context.response = context.client.delete(get_prefixed_url(context.app, url), headers=headers)
@when('we rewrite "{item_id}"')
def step_impl_when_rewrite(context, item_id):
context_data = {}
_id = apply_placeholders(context, item_id)
if context.text:
context_data.update(json.loads(apply_placeholders(context, context.text)))
data = json.dumps(context_data)
context.response = context.client.post(
get_prefixed_url(context.app, '/archive/{}/rewrite'.format(_id)),
data=data, headers=context.headers)
if context.response.status_code == 400:
return
resp = parse_json_response(context.response)
set_placeholder(context, 'REWRITE_OF', _id)
set_placeholder(context, 'REWRITE_ID', resp['_id']['_id'])
@when('we publish "{item_id}" with "{pub_type}" type and "{state}" state')
def step_impl_when_publish_url(context, item_id, pub_type, state):
item_id = apply_placeholders(context, item_id)
res = get_res('/archive/' + item_id, context)
headers = if_match(context, res.get('_etag'))
context_data = {"state": state}
if context.text:
data = apply_placeholders(context, context.text)
context_data.update(json.loads(data))
data = json.dumps(context_data)
context.response = context.client.patch(get_prefixed_url(context.app, '/archive/{}/{}'.format(pub_type, item_id)),
data=data, headers=headers)
resp = parse_json_response(context.response)
linked_packages = resp.get('linked_in_packages', [])
if linked_packages:
take_package = linked_packages[0].get('package', '')
set_placeholder(context, 'archive.{}.take_package'.format(item_id), take_package)
@when('we get digital item of "{item_id}"')
def step_impl_when_we_get_digital(context, item_id):
item_id = apply_placeholders(context, item_id)
context.response = context.client.get(get_prefixed_url(context.app, '/archive/{}'.format(item_id)),
headers=context.headers)
resp = parse_json_response(context.response)
linked_packages = resp.get('linked_in_packages', [])
for lp in linked_packages:
if lp.get('package_type', '') == 'takes':
take_package = lp.get('package', '')
set_placeholder(context, 'archive.{}.take_package'.format(item_id), take_package)
@then('the ingest item is routed based on routing scheme and rule "{rule_name}"')
def then_ingest_item_is_routed_based_on_routing_scheme(context, rule_name):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
validate_routed_item(context, rule_name, True)
@then('the ingest item is routed and transformed based on routing scheme and rule "{rule_name}"')
def then_ingest_item_is_routed_transformed_based_on_routing_scheme(context, rule_name):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
validate_routed_item(context, rule_name, True, True)
@then('the ingest item is not routed based on routing scheme and rule "{rule_name}"')
def then_ingest_item_is_not_routed_based_on_routing_scheme(context, rule_name):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
validate_routed_item(context, rule_name, False)
def validate_routed_item(context, rule_name, is_routed, is_transformed=False):
data = json.loads(apply_placeholders(context, context.text))
def validate_rule(action, state):
for destination in rule.get('actions', {}).get(action, []):
query = {
'and': [
{'term': {'ingest_id': str(data['ingest'])}},
{'term': {'task.desk': str(destination['desk'])}},
{'term': {'task.stage': str(destination['stage'])}},
{'term': {'state': state}}
]
}
item = get_archive_items(query) + get_published_items(query)
if is_routed:
assert len(item) > 0, 'No routed items found for criteria: ' + str(query)
assert item[0]['ingest_id'] == data['ingest']
assert item[0]['task']['desk'] == str(destination['desk'])
assert item[0]['task']['stage'] == str(destination['stage'])
assert item[0]['state'] == state
if is_transformed:
assert item[0]['abstract'] == 'Abstract has been updated'
assert_items_in_package(item[0], state, str(destination['desk']), str(destination['stage']))
else:
assert len(item) == 0
scheme = get_resource_service('routing_schemes').find_one(_id=data['routing_scheme'], req=None)
rule = next((rule for rule in scheme['rules'] if rule['name'].lower() == rule_name.lower()), {})
validate_rule('fetch', 'routed')
validate_rule('publish', 'published')
@when('we schedule the routing scheme "{scheme_id}"')
def when_we_schedule_the_routing_scheme(context, scheme_id):
with context.app.test_request_context(context.app.config['URL_PREFIX']):
scheme_id = apply_placeholders(context, scheme_id)
url = apply_placeholders(context, 'routing_schemes/%s' % scheme_id)
res = get_res(url, context)
href = get_self_href(res, context)
headers = if_match(context, res.get('_etag'))
rule = res.get('rules')[0]
from apps.rules.routing_rules import Weekdays
rule['schedule'] = {
'day_of_week': [
Weekdays.dayname(datetime.now() + timedelta(days=1)),
Weekdays.dayname(datetime.now() + timedelta(days=2))
],
'hour_of_day_from': '1600',
'hour_of_day_to': '2000'
}
if len(res.get('rules')) > 1:
rule = res.get('rules')[1]
rule['schedule'] = {
'day_of_week': [Weekdays.dayname(datetime.now())]
}
context.response = context.client.patch(get_prefixed_url(context.app, href),
data=json.dumps({'rules': res.get('rules', [])}),
headers=headers)
assert_200(context.response)
def get_archive_items(query):
req = ParsedRequest()
req.max_results = 100
req.args = {'filter': json.dumps(query)}
return list(get_resource_service('archive').get(lookup=None, req=req))
def get_published_items(query):
req = ParsedRequest()
req.max_results = 100
req.args = {'filter': json.dumps(query)}
return list(get_resource_service('published').get(lookup=None, req=req))
def assert_items_in_package(item, state, desk, stage):
if item.get('groups'):
terms = [{'term': {'_id': ref.get('residRef')}}
for ref in [ref for group in item.get('groups', [])
for ref in group.get('refs', []) if 'residRef' in ref]]
query = {'or': terms}
items = get_archive_items(query)
assert len(items) == len(terms)
for item in items:
assert item.get('state') == state
assert item.get('task', {}).get('desk') == desk
assert item.get('task', {}).get('stage') == stage
@given('I logout')
def logout(context):
we_have_sessions_get_id(context, '/sessions')
step_impl_when_delete_url(context, '/auth/{}'.format(context.session_id))
assert_200(context.response)
@then('we get "{url}" and match')
def we_get_and_match(context, url):
url = apply_placeholders(context, url)
response_data = get_res(url, context)
context_data = json.loads(apply_placeholders(context, context.text))
assert_equal(json_match(context_data, response_data), True,
msg=str(context_data) + '\n != \n' + str(response_data))
@then('there is no "{key}" in response')
def there_is_no_key_in_response(context, key):
data = get_json_data(context.response)
assert key not in data, 'key "%s" is in %s' % (key, data)
@then('there is no "{key}" in "{namespace}" preferences')
def there_is_no_key_in_preferences(context, key, namespace):
data = get_json_data(context.response)['user_preferences']
assert key not in data[namespace], 'key "%s" is in %s' % (key, data[namespace])
@then('we check if article has Embargo and Ed. Note of the article has embargo indication')
def step_impl_then_check_embargo(context):
assert_200(context.response)
try:
response_data = json.loads(context.response.get_data())
except Exception:
fail_and_print_body(context.response, 'response is not valid json')
if response_data.get('_meta') and response_data.get('_items'):
for item in response_data.get('_items'):
assert_embargo(context, item)
else:
assert_embargo(context, response_data)
def assert_embargo(context, item):
if not item.get('embargo'):
fail_and_print_body(context, context.response, 'Embargo not found')
if not item.get('ednote'):
fail_and_print_body(context, context.response, 'Embargo indication in "Ed. Note" not found')
assert_equal((item['ednote'].find('Embargoed') > -1), True)
@when('embargo lapses for "{item_id}"')
def embargo_lapses(context, item_id):
item_id = apply_placeholders(context, item_id)
item = get_res("/archive/%s" % item_id, context)
updates = {'embargo': (utcnow() - timedelta(minutes=1))}
with context.app.test_request_context(context.app.config['URL_PREFIX']):
get_resource_service('archive').system_update(id=item['_id'], original=item, updates=updates)
@then('we validate the published item expiry to be after publish expiry set in desk settings {publish_expiry_in_desk}')
def validate_published_item_expiry(context, publish_expiry_in_desk):
assert_200(context.response)
try:
response_data = json.loads(context.response.get_data())
except Exception:
fail_and_print_body(context.response, 'response is not valid json')
if response_data.get('_meta') and response_data.get('_items'):
for item in response_data.get('_items'):
assert_expiry(context, item, publish_expiry_in_desk)
else:
assert_expiry(context, response_data, publish_expiry_in_desk)
def assert_expiry(context, item, publish_expiry_in_desk):
embargo = item.get('embargo')
actual = parse_date(item.get('expiry'))
error_message = 'Published Item Expiry validation fails'
publish_expiry_in_desk = int(publish_expiry_in_desk)
if embargo:
expected = get_expiry_date(minutes=publish_expiry_in_desk,
offset=datetime.strptime(embargo, '%Y-%m-%dT%H:%M:%S%z'))
if actual != expected:
raise WooperAssertionError("{}. Expected: {}, Actual: {}".format(error_message, expected, actual))
else:
expected = get_expiry_date(minutes=publish_expiry_in_desk)
if expected < actual:
raise WooperAssertionError("{}. Expected: {}, Actual: {}".format(error_message, expected, actual))
| amagdas/superdesk | server/features/steps/steps.py | Python | agpl-3.0 | 69,302 |
import numpy as np
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.table import Table
from matplotlib import pyplot as plt
import pdb
from frb.surveys import des
import sys,os
import progressbar as pb #pip install progressbar2
from matplotlib import pyplot as plt
import seaborn as sns
def get_catalog(coords,size=1*u.deg):
"""
Download a catalog objects within
a square of input `size` centered
around `coords`.
Args:
coords (astropy SkyCoord): central coordinates
size (astropy Angle, optional): Size of the square FoV around
the central coordinates
Returns:
catalog (astropy Table): DES DR1 search results
"""
survey = des.DES_Survey(coords,size/np.sqrt(2))
catalog = survey.get_catalog(print_query=False)
select = (catalog['ra']>coords.ra.value-size.value/2)&(catalog['ra']<coords.ra.value+size.value/2)
select = select*(catalog['dec']>coords.dec.value-size.value/2)&(catalog['dec']<coords.dec.value+size.value/2)
catalog = catalog[select]
return catalog
def _generate_coord_grid(coords,size=1*u.deg,resolution=3600):
"""
Genereate a unifrom grid
of SkyCoords centered around `coords` within
an area `size`x`size` with a default `resolution`
of 1000 points along each axis.
Args:
coords (astropy SkyCoord): central coordinates
size (astropy Angle, optional): Size of the square FoV around
Returns:
SkyCoord:
"""
ra,dec = coords.ra.value,coords.dec.value
ra_arr = np.linspace(ra-size.value/2,ra+size.value/2,resolution)
dec_arr = np.linspace(dec-size.value/2,dec+size.value/2,resolution)
rr,dd = np.meshgrid(ra_arr,dec_arr)
return SkyCoord(rr.ravel(),dd.ravel(),unit="deg")
def get_frac_within_sep(coords,catalog,sep=1*u.arcsec,resolution=1000,size=1*u.deg,band='r',crit_mag=22):
"""
Obtain the fraction of sightlines on the unifrom grid
defined in _generate_coord_grid falling within `sep` distance
of a galaxy in `catalog` with mag less than `crit_mag` in `band`.
The catalog and grid are defined in a square centered around `coords`
of side length `size` and grid has input linear `resolution`.
"""
grid = _generate_coord_grid(coords,size,resolution)
catalogcoord = SkyCoord(catalog['ra'],catalog['dec'],unit="deg")
idx, sep2d, _ = grid.match_to_catalog_sky(catalogcoord)
newtab = Table()
newtab['gridcoord'] = grid
newtab['sep'] = sep2d
newtab["DES_"+band] = catalog[idx]["DES_"+band]
#compute frac
select = (newtab["DES_"+band]<crit_mag)&(newtab['sep']<sep.to(u.deg).value)
frac = np.sum(select)/len(grid)
return frac
def random_sightlines(n=100,resolution=3600,sep=1*u.arcsec,size=1*u.deg,
band='r',crit_mag=22,outfile="random_sights.txt"):
"""
Query a contigous quare patch of DES `n` times to obtain output
from `get_frac_within_sep` and store it to `outfile`.
"""
ra = 22.5 + np.random.rand(n)*45 #limit RA to [22.5deg,67.5deg]
dec = -60 + np.random.rand(n)*30 #limit DEC to [-60deg,-30deg]
rand_coords = SkyCoord(ra,dec,unit="deg")
fracs = np.zeros(n)
bar = pb.ProgressBar(max_value=n)
bar.start()
for num,coords in enumerate(rand_coords):
catalog = get_catalog(coords,size)
fracs[num] = get_frac_within_sep(coords,catalog,sep=sep,resolution=resolution,band=band,crit_mag=crit_mag)
bar.update(num+1)
#Save to file
np.savetxt(outfile,fracs)
return fracs
def plot_hist(fracs,bins=5):
"""
Plot a histogram of fractions obtained from `random_sightlines`
"""
sns.set(font_scale=1.3,font="serif",style="ticks")
sns.axes_style(rc={"weight":"bold"})
fig,ax = plt.subplots(nrows=1,ncols=1,figsize=(10,8))
ax.set_xlabel("Percentage of sightlines within 1'' of a galaxy (r<22)",labelpad=20,weight="bold")
ax.set_ylabel("Number of DES patches (1 sq. deg)",labelpad=20,weight="bold")
ax.hist(fracs*100,bins=bins,edgecolor="k",color="#366293")
median_frac = np.median(fracs)*100
ax.axvline(x=median_frac,linestyle="--",color="#262626")
ax.annotate("Median = {:.3f}".format(median_frac),(median_frac+0.01,22),color="#262626")
plt.savefig("/home/sunil/Desktop/DES_fracs.png",dpi=300,bbox_inches="tight",pad_inches=0.1)
plt.show()
"""
If you're running this for the first time or you
want to regenerate the database, uncomment the following line
and run.
"""
#fracs = random_sightlines(n=100)
fracs = np.loadtxt("random_sights.txt")
plot_hist(fracs,bins=9)
| FRBs/DM | frb/scripts/random_assoc.py | Python | bsd-3-clause | 4,628 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urlparse
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import db
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova import exception
from nova import quota
from nova import utils
QUOTAS = quota.QUOTAS
def authorize_action(context, action_name):
action = 'quotas:%s' % action_name
extensions.extension_authorizer('compute', action)(context)
class QuotaTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('quota_set', selector='quota_set')
root.set('id')
for resource in QUOTAS.resources:
elem = xmlutil.SubTemplateElement(root, resource)
elem.text = resource
return xmlutil.MasterTemplate(root, 1)
class QuotaSetsController(object):
def _format_quota_set(self, project_id, quota_set):
"""Convert the quota object to a result dict"""
result = dict(id=str(project_id))
for resource in QUOTAS.resources:
result[resource] = quota_set[resource]
return dict(quota_set=result)
def _validate_quota_limit(self, limit, remain, quota):
# NOTE: -1 is a flag value for unlimited
if limit < -1:
msg = _("Quota limit must be -1 or greater.")
raise webob.exc.HTTPBadRequest(explanation=msg)
# Quota limit must be less than the remains of the project.
if remain != -1 and remain < limit - quota:
msg = _("Quota limit exceed the remains of the project.")
raise webob.exc.HTTPBadRequest(explanation=msg)
def _get_quotas(self, context, id, user_id=None, remaining=False,
usages=False):
# Get the remaining quotas for a project.
if remaining:
values = QUOTAS.get_remaining_quotas(context, id)
return values
if user_id:
# If user_id, return quotas for the given user.
values = QUOTAS.get_user_quotas(context, user_id, id,
usages=usages)
else:
values = QUOTAS.get_project_quotas(context, id, usages=usages)
if usages:
return values
else:
return dict((k, v['limit']) for k, v in values.items())
def _request_params(self, req):
qs = req.environ.get('QUERY_STRING', '')
return urlparse.parse_qs(qs)
@wsgi.serializers(xml=QuotaTemplate)
def show(self, req, id):
context = req.environ['nova.context']
authorize_action(context, 'show')
params = self._request_params(req)
remaining = False
if 'remaining' in params:
remaining = utils.bool_from_str(params["remaining"][0])
user_id = None
if 'user_id' in params:
user_id = params["user_id"][0]
try:
sqlalchemy_api.authorize_project_context(context, id)
return self._format_quota_set(id,
self._get_quotas(context, id, user_id, remaining))
except exception.NotAuthorized:
raise webob.exc.HTTPForbidden()
@wsgi.serializers(xml=QuotaTemplate)
def update(self, req, id, body):
context = req.environ['nova.context']
params = self._request_params(req)
project_id = id
user_id = None
remains = {}
quotas = {}
if 'user_id' in params:
# Project admins are able to modify per-user quotas.
authorize_action(context, 'update_for_user')
user_id = params["user_id"][0]
remains = self._get_quotas(context, project_id, remaining=True)
quotas = db.quota_get_all_by_user(context, user_id, project_id)
else:
# Only admins are able to modify per-project quotas.
authorize_action(context, 'update_for_project')
for key in body['quota_set'].keys():
if key in QUOTAS:
value = int(body['quota_set'][key])
try:
if user_id:
self._validate_quota_limit(value, remains.get(key, 0),
quotas.get(key, 0))
db.quota_update_for_user(context, user_id,
project_id, key, value)
else:
self._validate_quota_limit(value, remains.get(key, -1),
quotas.get(key, 0))
db.quota_update(context, project_id, key, value)
except exception.ProjectQuotaNotFound:
db.quota_create(context, project_id, key, value)
except exception.UserQuotaNotFound:
db.quota_create_for_user(context, user_id,
project_id, key, value)
except exception.AdminRequired:
raise webob.exc.HTTPForbidden()
return {'quota_set': self._get_quotas(context, id, user_id)}
@wsgi.serializers(xml=QuotaTemplate)
def defaults(self, req, id):
context = req.environ['nova.context']
authorize_action(context, 'show')
return self._format_quota_set(id, QUOTAS.get_defaults(context))
class Quotas(extensions.ExtensionDescriptor):
"""Quotas management support"""
name = "Quotas"
alias = "os-quota-sets"
namespace = "http://docs.openstack.org/compute/ext/quotas-sets/api/v1.1"
updated = "2011-08-08T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-quota-sets',
QuotaSetsController(),
member_actions={'defaults': 'GET'})
resources.append(res)
return resources
| NoBodyCam/TftpPxeBootBareMetal | nova/api/openstack/compute/contrib/quotas.py | Python | apache-2.0 | 6,594 |
# This Python file uses the following encoding: utf-8
""" Subject line.
Main text.
"""
from unittest import TestCase
import numpy as np
from classes.Factor import Factor
from classes.Variable import Variable
__author__ = 'Chao Li'
class TestFactor(TestCase):
def setUp(self):
self.A = Variable(1, "A", ['a1', 'a2', 'a3'])
self.B = Variable(5, "B", ['b1', 'b2'])
self.C = Variable(3, "C", ['c1', 'b2'])
def test___mul__(self):
"""
References
----------
D. Koller and N. Friedman (2009). Probabilistic Graphical Models: Principles and Techniques. edited by . MIT Press.
page 107, Figure 4.3 An example of factor product
"""
phi1_scope = [self.B, self.A]
phi2_scope = [self.C, self.B]
phi1_parameters = np.array([0.5, 0.8, 0.1, 0, 0.3, 0.9])
phi2_parameters = np.array([0.5, 0.7, 0.1, 0.2])
phi1 = Factor(phi1_scope, phi1_parameters)
phi2 = Factor(phi2_scope, phi2_parameters)
# Expected
psi_scope = [self.C, self.B, self.A]
psi_parameters = np.array([0.25, 0.35, 0.08, 0.16, 0.05, 0.07, 0., 0., 0.15, 0.21, 0.09, 0.18])
psi = Factor(psi_scope, psi_parameters)
# Actual
results = phi1 * phi2
results = results.reordered_variables(psi_scope)
assert psi == results
def test_marginalization(self):
"""
References
----------
D. Koller and N. Friedman (2009). Probabilistic Graphical Models: Principles and Techniques. edited by . MIT Press.
page 297, Figure 9.7 Example of factor marginalization: summing out B.
"""
psi_scope = [self.C, self.B, self.A]
psi_parameters = np.array([0.25, 0.35, 0.08, 0.16, 0.05, 0.07, 0., 0., 0.15, 0.21, 0.09, 0.18])
psi = Factor(psi_scope, psi_parameters)
# Expected
phi3_scope = [self.C, self.A]
phi3_parameters = np.array([0.33, 0.51, 0.05, 0.07, 0.24, 0.39])
phi3 = Factor(phi3_scope, phi3_parameters)
# Actual
results = psi.marginalization([self.C, self.A])
results = results.reordered_variables(phi3_scope)
assert phi3 == results
def test_summing_out(self):
psi_scope = [self.C, self.B, self.A]
psi_parameters = np.array([0.25, 0.35, 0.08, 0.16, 0.05, 0.07, 0., 0., 0.15, 0.21, 0.09, 0.18])
psi = Factor(psi_scope, psi_parameters)
# Expected
phi3_scope = [self.C, self.A]
phi3_parameters = np.array([0.33, 0.51, 0.05, 0.07, 0.24, 0.39])
phi3 = Factor(phi3_scope, phi3_parameters)
# Actual
results = psi.summing_out(self.B)
results = results.reordered_variables(phi3_scope)
assert phi3 == results
def test___truediv__(self):
"""
References
----------
D. Koller and N. Friedman (2009). Probabilistic Graphical Models: Principles and Techniques. edited by . MIT Press.
page 365, Figure 10.7 An example of factor division
"""
phi1_scope = [self.B, self.A]
phi2_scope = [self.A]
phi1_parameters = np.array([0.5, 0.2, 0., 0., 0.3, 0.45])
phi2_parameters = np.array([0.8, 0., 0.6])
phi1 = Factor(phi1_scope, phi1_parameters)
phi2 = Factor(phi2_scope, phi2_parameters)
# Expected
psi_scope = [self.B, self.A]
psi_parameters = np.array([0.625, 0.25, 0., 0., 0.5, 0.75])
psi = Factor(psi_scope, psi_parameters)
# Actual
results = phi1 / phi2
results = results.reordered_variables(psi_scope)
assert psi == results
| chaoli314/openbn | classes/test_factor.py | Python | apache-2.0 | 3,632 |
'''
#/**
#* @@@ START COPYRIGHT @@@
#
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing,
#software distributed under the License is distributed on an
#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#KIND, either express or implied. See the License for the
#specific language governing permissions and limitations
#under the License.
#
#* @@@ END COPYRIGHT @@@
#*/
PYODBC TESTS
'''
import ConfigParser
import pyodbc
import sys
import unittest
cnxn = 0
Config = ConfigParser.ConfigParser()
Config.read("./config.ini")
dsn = Config.get("pytest", "dsn")
usr = Config.get("pytest", "usr")
pwd = Config.get("pytest", "pwd")
tcp = Config.get("pytest", "tcp")
catalog = Config.get("pytest", "catalog")
schema = Config.get("pytest", "schema")
class ConnectTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test11(self):
try:
connect_str = 'DSN=' + dsn + ';UID=' + usr + ';PWD=' + pwd
sys.stderr.write(connect_str + '\n')
cnxn = pyodbc.connect(connect_str, autocommit=True)
sys.stderr.write('ConnectTest.test11 passed' + '\n')
except Exception, e:
sys.stderr.write(str(e) + '\n')
assert 0, 'DSN Connection in ConnectTest.test11 failed'
else:
cnxn.close()
def test12(self):
try:
cnxn = pyodbc.connect('Driver=Trafodion;Server=' + tcp + ';UID=' + usr + ';PWD=' + pwd + ';', autocommit=True)
sys.stderr.write('ConnectTest.test12 passed' + '\n')
except Exception, e:
sys.stderr.write(str(e) + '\n')
assert 0, 'DSNless connection in ConnectTest.test12 failed'
else:
cnxn.close()
class SQLTest(unittest.TestCase):
def setUp(self):
global cnxn
connect_str = 'DSN=' + dsn + ';UID=' + usr + ';PWD=' + pwd + ';'
try:
cnxn = pyodbc.connect(connect_str, autocommit=True)
except Exception, e:
sys.stderr.write(str(e) + '\n')
assert 0, 'Failed to connect to odbc database.'
else:
try:
cnxn.execute('CREATE SCHEMA ' + catalog + '.' + schema + ';')
except Exception:
pass
cnxn.execute('SET SCHEMA ' + catalog + '.' + schema + ';')
def tearDown(self):
global cnxn
cnxn.close()
def test21(self):
#global cnxn
try:
cnxn.execute('CREATE SCHEMA ' + catalog + '.' + schema + ';')
except Exception:
pass
cnxn.execute('SET SCHEMA ' + catalog + '.' + schema + ';')
try:
cnxn.execute('DROP TABLE IF EXISTS T21')
cnxn.execute('CREATE TABLE T21 (C1 INT NOT NULL, C2 CHAR(10), PRIMARY KEY(C1))')
cursor = cnxn.execute('GET TABLES')
found = 0
while 1:
row = cursor.fetchone()
if not row:
break
if (row[0] == 'T21'):
found = 1
assert found == 1, 'T21 should be listed in the output'
sys.stderr.write('SQLTest.test21 passed' + '\n')
except Exception, e:
sys.stderr.write(str(e) + '\n')
assert 0, 'SQLTest.test21 failed.'
def test22(self):
global cnxn
cursor = cnxn.cursor()
try:
cnxn.execute('CREATE SCHEMA ' + catalog + '.' + schema + ';')
except Exception:
pass
try:
cnxn.execute('SET SCHEMA ' + catalog + '.' + schema + ';')
cursor.execute('DROP TABLE IF EXISTS EMP')
cursor.execute('CREATE TABLE EMP (EMPNUM INT NOT NULL, EMPNAME VARCHAR(20), PRIMARY KEY(EMPNUM))')
cursor.execute('INSERT INTO EMP VALUES (20001, \'VITTAL RAO\')')
cursor.execute('SELECT * FROM EMP')
found = 0
while 1:
row = cursor.fetchone()
if not row:
break
if (row[1] == 'VITTAL RAO'):
found = 1
assert found == 1, 'Fetching data using column number failed'
sys.stderr.write('SQLTest.test22 passed' + '\n')
except Exception, e:
sys.stderr.write(str(e) + '\n')
assert 0, 'SQLTest.test22 failed.'
def test23(self):
global cnxn
cursor = cnxn.cursor()
try:
cnxn.execute('CREATE SCHEMA ' + catalog + '.' + schema + ';')
except Exception:
pass
try:
cnxn.execute('SET SCHEMA ' + catalog + '.' + schema + ';')
cursor.execute('DROP TABLE IF EXISTS EMP')
cursor.execute('CREATE TABLE EMP (EMPNUM INT NOT NULL, EMPNAME VARCHAR(20), PRIMARY KEY(EMPNUM))')
cursor.execute('INSERT INTO EMP VALUES (20001, \'VITTAL RAO\')')
cursor.execute('SELECT * FROM EMP')
found = 0
while 1:
row = cursor.fetchone()
if not row:
break
if (row.EMPNAME == 'VITTAL RAO'):
found = 1
assert found == 1, 'Fetching data using column name failed'
sys.stderr.write('SQLTest.test23 passed' + '\n')
except Exception, e:
sys.stderr.write(str(e) + '\n')
assert 0, 'SQLTest.test23 failed.'
def test24(self):
global cnxn
cursor = cnxn.cursor()
try:
cnxn.execute('CREATE SCHEMA ' + catalog + '.' + schema + ';')
except Exception:
pass
try:
cnxn.execute('SET SCHEMA ' + catalog + '.' + schema + ';')
cursor.execute('DROP TABLE IF EXISTS T24')
cursor.execute('CREATE TABLE T24(C INT)')
cursor.execute('INSERT INTO T24 VALUES (1), (-200), (3467), (0)')
cursor.execute('SELECT * FROM T24 ORDER BY 1')
rows = cursor.fetchall()
l = []
for row in rows:
l.append(row[0])
assert l == [-200, 0, 1, 3467], 'Integer data not returned correctly'
sys.stderr.write('SQLTest.test24 passed' + '\n')
except Exception, e:
sys.stderr.write(str(e) + '\n')
assert 0, 'SQLTest.test24 failed.'
def test25(self):
global cnxn
cursor = cnxn.cursor()
try:
cnxn.execute('CREATE SCHEMA ' + catalog + '.' + schema + ';')
except Exception:
pass
try:
cnxn.execute('SET SCHEMA ' + catalog + '.' + schema + ';')
cursor.execute('DROP TABLE IF EXISTS T25')
cursor.execute('CREATE TABLE T25(C INT)')
cursor.execute('INSERT INTO T25 VALUES (1), (-200), (3467), (0)')
x = 200
cursor.execute('SELECT * FROM T25 WHERE C > ? ORDER BY 1', x)
rows = cursor.fetchall()
l = []
for row in rows:
l.append(row[0])
assert l == [3467], 'Integer data not returned correctly'
sys.stderr.write('SQLTest.test25 passed' + '\n')
except Exception, e:
sys.stderr.write(str(e) + '\n')
assert 0, 'SQLTest.test25 failed.'
def test26(self):
global cnxn
cursor = cnxn.cursor()
try:
cnxn.execute('CREATE SCHEMA ' + catalog + '.' + schema + ';')
except Exception:
pass
try:
cnxn.execute('SET SCHEMA ' + catalog + '.' + schema + ';')
cursor.execute('DROP TABLE IF EXISTS T26')
cursor.execute('CREATE TABLE T26(C INT)')
cursor.execute('INSERT INTO T26 VALUES (1), (-200), (3467), (0)')
cursor.execute('DELETE FROM T26')
assert cursor.rowcount == 4, 'Number of deleted rows must be 4.'
sys.stderr.write('SQLTest.test26 passed' + '\n')
except Exception, e:
sys.stderr.write(str(e) + '\n')
assert 0, 'SQLTest.test26 failed.'
def test27(self):
global cnxn
cursor = cnxn.cursor()
try:
cnxn.execute('CREATE SCHEMA ' + catalog + '.' + schema + ';')
except Exception:
pass
try:
cnxn.execute('SET SCHEMA ' + catalog + '.' + schema + ';')
cursor.execute('DROP TABLE IF EXISTS T27')
cursor.execute('CREATE TABLE T27(C INT)')
cursor.execute('INSERT INTO T27 VALUES (1), (-200), (3467), (0)')
x = 200
assert cursor.execute('DELETE FROM T27 WHERE C > ?', x).rowcount == 1, 'Number of deleted rows must be 1.'
sys.stderr.write('SQLTest.test27 passed' + '\n')
except Exception, e:
sys.stderr.write(str(e) + '\n')
assert 0, 'SQLTest.test27 failed.'
def test28(self):
global cnxn
cursor = cnxn.cursor()
try:
cnxn.execute('CREATE SCHEMA ' + catalog + '.' + schema + ';')
except Exception:
pass
try:
cnxn.execute('SET SCHEMA ' + catalog + '.' + schema + ';')
cursor.execute('DROP TABLE IF EXISTS T28')
cursor.execute('CREATE TABLE T28(C INT)')
cursor.execute('INSERT INTO T28 VALUES (1), (-200), (3467), (0)')
x = 0
assert cursor.execute("UPDATE T28 SET C = 200 WHERE C = ?", x).rowcount == 1, 'Number of updated rows must be 1.'
cursor.execute("SELECT * FROM T28 ORDER BY 1")
rows = cursor.fetchall()
l = []
for row in rows:
l.append(row[0])
assert l == [-200, 1, 200, 3467], 'Integer data not returned correctly'
sys.stderr.write('SQLTest.test28 passed' + '\n')
except Exception, e:
sys.stderr.write(str(e) + '\n')
assert 0, 'SQLTest.test28 failed.'
def test29(self):
global cnxn
cursor = cnxn.cursor()
try:
cnxn.execute('CREATE SCHEMA ' + catalog + '.' + schema + ';')
except Exception:
pass
try:
cnxn.execute('SET SCHEMA ' + catalog + '.' + schema + ';')
cursor.execute('DROP TABLE IF EXISTS T29')
cursor.execute("CREATE TABLE T29(C1 INT NOT NULL, C2 CHAR(10), PRIMARY KEY(C1))")
cursor.execute("INSERT INTO T29 VALUES (1, 'abc'), (-200, 'xyz'), (3467, 'pqr')")
cursor.execute("UPSERT INTO T29 VALUES (1, 'xyz'), (-200, 'xyz'), (3467, 'xyz')")
cursor.execute("SELECT C2 FROM T29")
found = 0
while 1:
row = cursor.fetchone()
if not row:
break
if (row.C2 != 'xyz'):
found = 1
assert found == 1, 'Upsert failed'
sys.stderr.write('SQLTest.test29 passed' + '\n')
except Exception, e:
sys.stderr.write(str(e) + '\n')
assert 0, 'SQLTest.test29 failed.'
class DataTest(unittest.TestCase):
def setUp(self):
global cnxn
connect_str = 'DSN=' + dsn + ';UID=' + usr + ';PWD=' + pwd + ';'
cnxn = pyodbc.connect(connect_str, autocommit=True)
try:
cnxn.execute('CREATE SCHEMA ' + catalog + '.' + schema + ';')
except Exception:
pass
cnxn.execute('SET SCHEMA ' + catalog + '.' + schema + ';')
def tearDown(self):
global cnxn
cnxn.close()
def test31(self):
try:
cnxn.execute('CREATE SCHEMA ' + catalog + '.' + schema + ';')
except Exception:
pass
try:
cnxn.execute('SET SCHEMA ' + catalog + '.' + schema + ';')
cnxn.execute('DROP TABLE IF EXISTS TDATA')
cnxn.execute("""
CREATE TABLE TDATA (
C1 INT NOT NULL, C2 CHAR(10), C3 VARCHAR(1000),
C4 DATE, C5 TIME, C6 TIMESTAMP,
C7 INTERVAL YEAR TO MONTH,
C8 SMALLINT UNSIGNED, C9 LARGEINT, C10 DECIMAL,
C11 FLOAT, C12 REAL, C13 DOUBLE PRECISION, C14 NUMERIC (9,3),
PRIMARY KEY(C1))
""")
cnxn.execute("""
INSERT INTO TDATA VALUES (
1, 'whatever', 'anything goes',
DATE '2001-03-22',TIME '13:40:30.666666',TIMESTAMP '1997-09-03 09:33:30.555555',
INTERVAL '4-5' YEAR TO MONTH,
8, 999999, 10.23,
0.025, 123.456, 12345.67890, 9876.32)
""")
sys.stderr.write('SQLTest.test31 passed' + '\n')
except Exception, e:
sys.stderr.write(str(e) + '\n')
assert 0, 'SQLTest.test31 failed.'
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
#unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(ConnectTest)
unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.TestLoader().loadTestsFromTestCase(SQLTest)
unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.TestLoader().loadTestsFromTestCase(DataTest)
unittest.TextTestRunner(verbosity=2).run(suite)
| robertamarton/incubator-trafodion | dcs/src/test/pytests/test_p2.py | Python | apache-2.0 | 13,579 |
import threading, time
from httplib2 import Http
from urllib.parse import urlencode
from queue import Queue
from utils import tools, env
from utils.log import logger
masterips = env.getenv("MASTER_IPS").split(",")
G_masterips = []
for masterip in masterips:
G_masterips.append(masterip.split("@")[0] + ":" + str(env.getenv("MASTER_PORT")))
# send http request to master
def request_master(url,data):
global G_masterips
#logger.info("master_ip:"+str(G_masterip))
header = {'Content-Type':'application/x-www-form-urlencoded'}
http = Http()
for masterip in G_masterips:
[resp,content] = http.request("http://"+masterip+url,"POST",urlencode(data),headers = header)
logger.info("response from master:"+content.decode('utf-8'))
class StopAllReqMgr(threading.Thread):
def __init__(self, maxsize=100, interval=1):
threading.Thread.__init__(self)
self.thread_stop = False
self.interval = 1
self.q = Queue(maxsize=maxsize)
def add_request(self,username):
self.q.put(username)
def run(self):
while not self.thread_stop:
username = self.q.get()
logger.info("The beans of User(" + str(username) + ") are less than or equal to zero, all his or her vclusters will be stopped.")
auth_key = env.getenv('AUTH_KEY')
form = {'username':username, 'auth_key':auth_key}
request_master("/cluster/stopall/",form)
self.q.task_done()
time.sleep(self.interval)
def stop(self):
self.thread_stop = True
return
| FirmlyReality/docklet | user/stopreqmgr.py | Python | bsd-3-clause | 1,591 |
import unittest
import copy
import json
import os
from mock import patch, MagicMock
from common import RESOURCE_3, SQS_MESSAGE_5
from c7n_mailer.slack_delivery import SlackDelivery
from c7n_mailer.email_delivery import EmailDelivery
SLACK_TOKEN = "slack-token"
SLACK_POST_MESSAGE_API = "https://slack.com/api/chat.postMessage"
class TestSlackDelivery(unittest.TestCase):
def setUp(self):
self.config = {
'slack_token': SLACK_TOKEN,
'templates_folders': [
os.path.abspath(os.path.dirname(__file__)),
os.path.abspath('/'),
os.path.join(os.path.abspath(os.path.dirname(__file__)), "test-templates/")
]
}
self.session = MagicMock()
self.logger = MagicMock()
self.email_delivery = EmailDelivery(self.config, self.session, self.logger)
self.message = copy.deepcopy(SQS_MESSAGE_5)
self.resource = copy.deepcopy(RESOURCE_3)
self.message['resources'] = [self.resource]
self.target_channel = 'test-channel'
def test_map_sending_to_channel(self):
slack = SlackDelivery(self.config, self.logger, self.email_delivery)
result = slack.get_to_addrs_slack_messages_map(self.message)
assert self.target_channel in result
assert json.loads(result[self.target_channel])['channel'] == self.target_channel
def test_map_sending_to_tag_channel_with_hash(self):
self.target_channel = '#tag-channel'
slack = SlackDelivery(self.config, self.logger, self.email_delivery)
message_destination = ['slack://tag/SlackChannel']
self.resource['Tags'].append({"Key": "SlackChannel", "Value": self.target_channel})
self.message['action']['to'] = message_destination
self.message['policy']['actions'][1]['to'] = message_destination
result = slack.get_to_addrs_slack_messages_map(self.message)
assert self.target_channel in result
assert json.loads(result[self.target_channel])['channel'] == self.target_channel
self.logger.debug.assert_called_with("Generating message for specified Slack channel.")
def test_map_sending_to_tag_channel_without_hash(self):
self.target_channel = 'tag-channel'
channel_name = "#" + self.target_channel
slack = SlackDelivery(self.config, self.logger, self.email_delivery)
message_destination = ['slack://tag/SlackChannel']
self.resource['Tags'].append({"Key": "SlackChannel", "Value": self.target_channel})
self.message['action']['to'] = message_destination
self.message['policy']['actions'][1]['to'] = message_destination
result = slack.get_to_addrs_slack_messages_map(self.message)
assert channel_name in result
assert json.loads(result[channel_name])['channel'] == channel_name
self.logger.debug.assert_called_with("Generating message for specified Slack channel.")
def test_map_sending_to_tag_channel_no_tag(self):
slack = SlackDelivery(self.config, self.logger, self.email_delivery)
message_destination = ['slack://tag/SlackChannel']
self.message['action']['to'] = message_destination
self.message['policy']['actions'][1]['to'] = message_destination
result = slack.get_to_addrs_slack_messages_map(self.message)
assert result == {}
self.logger.debug.assert_called_with("No SlackChannel tag found in resource.")
def test_map_sending_to_webhook(self):
webhook = "https://hooks.slack.com/this-is-a-webhook"
slack = SlackDelivery(self.config, self.logger, self.email_delivery)
message_destination = [webhook]
self.message['action']['to'] = message_destination
self.message['policy']['actions'][1]['to'] = message_destination
result = slack.get_to_addrs_slack_messages_map(self.message)
assert webhook in result
assert 'channel' not in json.loads(result[webhook])
@patch('c7n_mailer.slack_delivery.requests.post')
def test_slack_handler(self, mock_post):
mock_post.return_value.status_code = 200
mock_post.return_value.json.return_value = {'ok': True}
slack = SlackDelivery(self.config, self.logger, self.email_delivery)
result = slack.get_to_addrs_slack_messages_map(self.message)
slack.slack_handler(self.message, result)
self.logger.info.assert_called_with("Sending account:core-services-dev "
"policy:ebs-mark-unattached-deletion ebs:1 slack:slack"
"_default to test-channel")
@patch('c7n_mailer.slack_delivery.requests.post')
def test_send_slack_msg_webhook(self, mock_post):
mock_post.return_value.status_code = 200
mock_post.return_value.json.return_value = {'ok': True}
webhook = "https://hooks.slack.com/this-is-a-webhook"
message_destination = [webhook]
self.message['action']['to'] = message_destination
self.message['policy']['actions'][1]['to'] = message_destination
slack = SlackDelivery(self.config, self.logger, self.email_delivery)
result = slack.get_to_addrs_slack_messages_map(self.message)
slack.send_slack_msg(webhook, result[webhook])
args, kwargs = mock_post.call_args
assert webhook == kwargs['url']
assert kwargs['data'] == result[webhook]
@patch('c7n_mailer.slack_delivery.requests.post')
def test_send_slack_msg(self, mock_post):
mock_post.return_value.status_code = 200
mock_post.return_value.json.return_value = {'ok': True}
slack = SlackDelivery(self.config, self.logger, self.email_delivery)
result = slack.get_to_addrs_slack_messages_map(self.message)
slack.send_slack_msg(self.target_channel, result[self.target_channel])
args, kwargs = mock_post.call_args
assert self.target_channel == json.loads(kwargs['data'])['channel']
assert SLACK_POST_MESSAGE_API == kwargs['url']
assert kwargs['data'] == result[self.target_channel]
@patch('c7n_mailer.slack_delivery.requests.post')
def test_send_slack_msg_retry_after(self, mock_post):
retry_after_delay = 1
mock_post.return_value.status_code = 429
mock_post.return_value.headers = {'Retry-After': retry_after_delay}
slack = SlackDelivery(self.config, self.logger, self.email_delivery)
result = slack.get_to_addrs_slack_messages_map(self.message)
slack.send_slack_msg(self.target_channel, result[self.target_channel])
args, kwargs = mock_post.call_args
self.logger.info.assert_called_with("Slack API rate limiting. Waiting %d seconds",
retry_after_delay)
@patch('c7n_mailer.slack_delivery.requests.post')
def test_send_slack_msg_not_200_response(self, mock_post):
mock_post.return_value.status_code = 404
mock_post.return_value.text = "channel_not_found"
slack = SlackDelivery(self.config, self.logger, self.email_delivery)
result = slack.get_to_addrs_slack_messages_map(self.message)
slack.send_slack_msg(self.target_channel, result[self.target_channel])
self.logger.info.assert_called_with('Error in sending Slack message status:%s response: %s',
404, 'channel_not_found')
@patch('c7n_mailer.slack_delivery.requests.post')
def test_send_slack_msg_not_ok_response(self, mock_post):
mock_post.return_value.status_code = 200
mock_post.return_value.json.return_value = {'ok': False, 'error': "failed"}
slack = SlackDelivery(self.config, self.logger, self.email_delivery)
result = slack.get_to_addrs_slack_messages_map(self.message)
slack.send_slack_msg(self.target_channel, result[self.target_channel])
self.logger.info.assert_called_with('Error in sending Slack message. Status:%s, '
'response:%s', 200, 'failed')
| capitalone/cloud-custodian | tools/c7n_mailer/tests/test_slack.py | Python | apache-2.0 | 8,070 |
import codegen
import dataset
import utils.backend as backends_common
def _casekey(size, kind):
return size + '_' + kind
def gen_code(backend, size, kind):
key = _casekey(size, kind)
if key in backend.case_to_code:
return backend.case_to_code[key]
raise RuntimeError('Unrecognized configuration: {}, {}'.format(size, kind))
def create(args):
if args.backend == backends_common.SLTBENCH:
rv = backends_common.BackendSLTBench(install_path=args.backend_install_path)
rv.case_to_code = {
_casekey(dataset.SIZE_MICRO , dataset.KIND_MATH): codegen.SLTBENCH_TEST_MATH_MICRO,
_casekey(dataset.SIZE_SMALL , dataset.KIND_MATH): codegen.SLTBENCH_TEST_MATH_SMALL,
_casekey(dataset.SIZE_MEDIUM, dataset.KIND_MATH): codegen.SLTBENCH_TEST_MATH_MEDIUM,
_casekey(dataset.SIZE_LARGE , dataset.KIND_MATH): codegen.SLTBENCH_TEST_MATH_LARGE,
_casekey(dataset.SIZE_MICRO , dataset.KIND_MALLOC): codegen.SLTBENCH_TEST_MALLOC_MICRO,
_casekey(dataset.SIZE_SMALL , dataset.KIND_MALLOC): codegen.SLTBENCH_TEST_MALLOC_SMALL,
_casekey(dataset.SIZE_MEDIUM, dataset.KIND_MALLOC): codegen.SLTBENCH_TEST_MALLOC_MEDIUM,
_casekey(dataset.SIZE_LARGE , dataset.KIND_MALLOC): codegen.SLTBENCH_TEST_MALLOC_LARGE,
}
return rv
if args.backend == backends_common.GOOGLEBENCH:
rv = backends_common.BackendGooglebench(install_path=args.backend_install_path)
rv.case_to_code = {
_casekey(dataset.SIZE_MICRO , dataset.KIND_MATH): codegen.GOOGLEBENCH_TEST_MATH_MICRO,
_casekey(dataset.SIZE_SMALL , dataset.KIND_MATH): codegen.GOOGLEBENCH_TEST_MATH_SMALL,
_casekey(dataset.SIZE_MEDIUM, dataset.KIND_MATH): codegen.GOOGLEBENCH_TEST_MATH_MEDIUM,
_casekey(dataset.SIZE_LARGE , dataset.KIND_MATH): codegen.GOOGLEBENCH_TEST_MATH_LARGE,
_casekey(dataset.SIZE_MICRO , dataset.KIND_MALLOC): codegen.GOOGLEBENCH_TEST_MALLOC_MICRO,
_casekey(dataset.SIZE_SMALL , dataset.KIND_MALLOC): codegen.GOOGLEBENCH_TEST_MALLOC_SMALL,
_casekey(dataset.SIZE_MEDIUM, dataset.KIND_MALLOC): codegen.GOOGLEBENCH_TEST_MALLOC_MEDIUM,
_casekey(dataset.SIZE_LARGE , dataset.KIND_MALLOC): codegen.GOOGLEBENCH_TEST_MALLOC_LARGE,
}
return rv
if args.backend == backends_common.NONIUS:
rv = backends_common.BackendNonius(install_path=args.backend_install_path)
rv.case_to_code = {
_casekey(dataset.SIZE_MICRO , dataset.KIND_MATH): codegen.NONIUS_TEST_MATH_MICRO,
_casekey(dataset.SIZE_SMALL , dataset.KIND_MATH): codegen.NONIUS_TEST_MATH_SMALL,
_casekey(dataset.SIZE_MEDIUM, dataset.KIND_MATH): codegen.NONIUS_TEST_MATH_MEDIUM,
_casekey(dataset.SIZE_LARGE , dataset.KIND_MATH): codegen.NONIUS_TEST_MATH_LARGE,
_casekey(dataset.SIZE_MICRO , dataset.KIND_MALLOC): codegen.NONIUS_TEST_MALLOC_MICRO,
_casekey(dataset.SIZE_SMALL , dataset.KIND_MALLOC): codegen.NONIUS_TEST_MALLOC_SMALL,
_casekey(dataset.SIZE_MEDIUM, dataset.KIND_MALLOC): codegen.NONIUS_TEST_MALLOC_MEDIUM,
_casekey(dataset.SIZE_LARGE , dataset.KIND_MALLOC): codegen.NONIUS_TEST_MALLOC_LARGE,
}
return rv
raise RuntimeError('Unrecognized backend: {}'.format(args.backend))
| ivafanas/sltbench | tools/metrics/benchbench/impl/backend.py | Python | apache-2.0 | 3,383 |
# -*- coding: utf-8 -*-
"""
Created on 11 janv. 2012
@author: Xavier HINAUT
xavier.hinaut #/at\# inserm.fr
"""
import mdp
import numpy as np
import random
import reservoir
import sys
def write3DArrays(data, fileToCreate):
# Write the array to disk
if len(data) > 0:
with file(fileToCreate, 'w') as outfile:
# I'm writing a header here just for the sake of readability
# Any line starting with "#" will be ignored by numpy.loadtxt
outfile.write('# {0} {1} {2}\n' .format(len(data), data[0].shape[0], data[0].shape[1]))
slice_nb = 0
# Iterating through a ndimensional array produces slices along
# the last axis. This is equivalent to data[i,:,:] in this case
for data_slice in data:
# The formatting string indicates that I'm writing out
# the values in left-justified columns 7 characters in width
# with 2 decimal places.
outfile.write('# {0} \n' .format(slice_nb))
#np.savetxt(outfile, data_slice, fmt='%-7.2f')
np.savetxt(outfile, data_slice, '%.10e')
# Writing out a break to indicate different slices...
slice_nb += 1
### Extraction Methods ###
##########################
def extr_sent(sent):
sent = sent.strip() #removing spaces before and after the sentence
if len(sent)==0:
raise Exception, "No words in sentence."
return sent.split(' ')
def extr_meaning(meaning, verbose=False):
#print " current meaning: "+str(meaning)
m_res = []
a_res = []
meaning = meaning.strip()
meaning=meaning.split("<o>")[:-1]
(m1, x, m2) = meaning[0].partition(',')
assignement = meaning[1]
m1 = m1.strip()
m2 = m2.strip()
assignement = assignement.strip()
if len(m1)<3 or len(m1)>4:
m_res.append(m1.split())
else:
raise Exception, "Number of words not good for 1st meaning: "+str(len(m1))
if m2!='':
if len(m2)<3 or len(m2)>4:
m_res.append(m2.split())
else:
raise Exception, "Number of words not good for 1st meaning: "+str(len(m1))
#print " result extr meaning: "+str(m_res)
a_res.append(assignement)
return m_res, a_res
def extract_line_train(l):
"""
Returns a tuple (sentence, meanings):
sentence is a list of words
meanings is a list of meaning: [[focus relation obj]] or [[focus relation obj], [focus relation obj]]
meaning is a list of relations: [focus relation obj] or [focus relation obj1 obj2]
"""
# ex: guitar over violin;the guitar over the violin is
(meaning, x, sentence) = l.partition(';')
# processing meaning
# ex: guitar over violin
m_res, a_res = extr_meaning(meaning, verbose=True)
# processing sentence
# ex: the guitar over the violin is
s_res = extr_sent(sent=sentence)
return (s_res, m_res, a_res)
def extract_line_test(l, sentence_to_meaning=False):
if ';' in l:
#print "current line:", l
raise Exception, "Ambiguous line: there should be no ';' because it is a line in <test> ... </test>"
if sentence_to_meaning:
return extr_sent(sent=l)
else:
if "<o>" in l:
l = l.strip()
#print " m:["+str(l)+"]"
sentence, assignement = extr_meaning(meaning=l, verbose=True)
return (sentence, assignement) #assignement is currently replacing canonical information
else:
raise Exception, "APOR structure missing"
def extract_data_io(path_file, sentence_to_meaning=False, verbose=True):
flag_train = False
flag_test = False
train = []
test = []
sent_form_info_train = []
sent_form_info_test = []
f = open(path_file, "r")
corpus=f.readlines()
for line in corpus:
#if verbose:
#print " "
#print "* current line:", line
#remove commentaries
(line_tmp,x,x)=line.partition('#')
# remove useless spaces
line_tmp = line_tmp.strip()
#print line_tmp
#if verbose:
#print "after removed commentaries and spaces:", line_tmp
if line_tmp=='':
pass
elif line_tmp[:12]=='<train data>':
if flag_test == True:
raise Exception, "Entering <train data>, but <test data> is not finished. Line concerned: /n"+line
flag_train = True
elif line_tmp[:13]=='</train data>':
if flag_test == True:
raise Exception, "Found </train data>, but <test data> is not finished. Line concerned: /n"+line
flag_train = False
elif line_tmp[:11]=='<test data>':
if flag_train == True:
raise Exception, "Entering <test data>, but <train data> is not finished. Line concerned: /n"+line
flag_test = True
elif line_tmp[:12]=='</test data>':
if flag_train == True:
raise Exception, "Found </test data>, but <train data> is not finished. Line concerned: /n"+line
flag_test = False
else:
if flag_train:
x = extract_line_train(l=line_tmp)
train.append(x[0:2])
sent_form_info_train.append(x[2][0])
elif flag_test:
y = extract_line_test(l=line_tmp, sentence_to_meaning=sentence_to_meaning)
if type(y) is tuple:
assignement = y[1][0]
meaning = y[0]
test.append(meaning)
sent_form_info_test.append(assignement)
f.close()
return [train, test, sent_form_info_train, sent_form_info_test]
### Sentence & Meanning Methods ###
###################################
def txt2corpus_and_meaning(train_txt):
train_corpus, train_meaning = [], []
## For each (sentence,meanings) tuple
for (s,m) in train_txt:
train_corpus.append(s)
train_meaning.append(m)
return train_corpus, train_meaning
def get_closed_class_words():
"""
list of closed class words (the correct list depends on the corpus)
"""
#return ['and', 'is', 'of', 'the', 'to', '.', '-ed', '-ing', '-s', 'by', 'it', 'that', 'was','did',',', 'from']
return ['-ga', '-ni', '-wo', '-yotte', '-o', '-te', 'sore']
def extrac_open_class_words(l_sent, _OCW, l_closed_class=get_closed_class_words()):
"""
make a new sentence (list of words). words that are not in get_closed_class_words() are changed in _OCW (Open Class Word)
Inputs:
l_sent: list of words: sentence is in the form of a list of words
_OCW (Open Class Word): string that will replace open class words in the new formed sentence
if the string is equal to '_', then the open class words will be replaced by a pause in the stimulus
(i.e. there won't be a specific input for open class words, there will just be no stimulus during the same amount of time of a normal word)
l_closed_class: list of closed class words
Outputs:
ocw_array: open class word array (cf. Dominey, Hoen, Inui 2006. A neurolinguistic model of grammatical construction processing. JoCN 18:12, pp.2088-2107.
l_sent_ccw: new sentence with open class words replaced by _OCW
"""
ocw_array, l_sent_ccw = [], []
for w in l_sent:
if w in l_closed_class:
l_sent_ccw.append(w)
else:
l_sent_ccw.append(_OCW)
ocw_array.append(w)
return (ocw_array, l_sent_ccw)
#COLAS : modified function for the recoding of the meaning
def get_and_remove_ocw_in_corpus(corpus, _OCW, _CCW):
new_corpus = []
for s in corpus:
#(ocw_array, l_sent_ccw) = extrac_open_class_words(l_sent=s, _OCW=_OCW, l_closed_class=get_closed_class_words())
(ocw_array, l_sent_ccw) = extrac_open_class_words(l_sent=s, _OCW=_OCW, l_closed_class=_CCW)
new_corpus.append(l_sent_ccw)
if _OCW=='_':
#construction_words = get_closed_class_words()
construction_words = _CCW
else:
#construction_words = get_closed_class_words()+[_OCW]
construction_words = _CCW+[_OCW]
return (new_corpus, construction_words)
def is_nr_ocw_in_construction_ok(construction, ocw_array, _OCW):
"""
Checks if the number of OCW in ocw_array corresponds to the number of _OCW in the construction.
If the number of OCW in construction is OK, then it returns True.
"""
if construction.count(_OCW) != len(ocw_array):
#raise Warning, "The number of OCW in ocw_array do not corresponds to the number of _OCW in the construction"
#print "!!!WARNING!!! The number of OCW in ocw_array do not corresponds to the number of _OCW in the construction !!!"
#print "nr OCW in construction",construction.count(_OCW)
#print "nr OCW in ocw_array", len(ocw_array)
return False
else:
return True
def attribute_ocw_to_constructions(l_constructions, l_ocw_array, _OCW):
#print "---*** attribute_ocw_to_constructions ..."
l_sent = []
for idx_c in range(len(l_constructions)):
sent = []
ocw_arr = list(l_ocw_array[idx_c])
if not is_nr_ocw_in_construction_ok(construction=l_constructions[idx_c], ocw_array=ocw_arr, _OCW=_OCW):
diff = l_constructions[idx_c].count(_OCW) - len(ocw_arr)
if diff>0:
# if the number of OCW in ocw_array is not enough, add one
empty_list = diff*['_X_']
ocw_arr.extend(empty_list)
#print "construction:",l_constructions[idx_c]
#print "OCW array:", l_ocw_array[idx_c]
ocw_arr.reverse()
for word in l_constructions[idx_c]:
if word==_OCW:
w = ocw_arr.pop()
sent.append(w)
else:
sent.append(word)
l_sent.append(sent)
return l_sent
#COLAS: strucure_partition() and generate_l_ocw_array() replace the function generate_l_ocw_array_in_PAOR_order
def structure_partition(structure):
structure1, x, structure2 = structure.partition(']')
structure1=structure1.strip(" [],").split('-')
structure2=structure2.strip(" [],").split('-')
return structure1, structure2
def generate_l_ocw_array(tab_structure, tab_meaning, sLetters, sValues):
l_ocw_array=[]
for structure, meaning in zip(tab_structure, tab_meaning):
ocw_array=[]
#meaning extraction for each sentence
meaning1=meaning[0]
if len(meaning)>1: meaning2=meaning[1]
#structure extraction for each sentence
structure1, structure2 = structure_partition(structure)
#correspondance between words of the meaning and structure
#cor={'P':0, 'A':1, 'O':2, 'R':3}
#cor={'P':0, 'A':1, 'O':2, 'R':3, 'Q':0} # TEST
# convert string array to int array
sValuesInt = [int(i) for i in sValues]
# transform structure letters array and values array in a dictionnary
cor = dict(zip(sLetters,sValuesInt))
if len(structure2)==1:
for role1 in structure1:
if role1!='_': ocw_array.append(meaning1[cor[role1]])
else:
for role1, role2 in zip(structure1, structure2):
if role1!='_':
ocw_array.append(meaning1[cor[role1]])
elif role2!='_':
ocw_array.append(meaning2[cor[role2]])
l_ocw_array.append(ocw_array)
return l_ocw_array
#def get_meaning_coding(max_nr_ocw=8, max_nr_actionrelation=2, elt_pred=['P','A','O','R']):
def get_meaning_coding(max_nr_ocw=8, max_nr_actionrelation=2, elt_pred=['P','A','O','R','Q']): # TEST
"""
# action and relation is a full predicate (with max_nr_elt_pred element describing it)
For 8 open class words maximum: so 2 actions/relations with 4 nouns for each (predicate, focus, o1, o2)
[N1(1) N1(2) N2(1) N2(2) N3(1) N3(2) N4(1) N4(2) N5(1) N5(2) N6(1) N6(2) N7(1) N7(2) N8(1) N8(2)]
trumpet below guitar,guitar right violin; #FPOR order
below trumpet guitar,right guitar violin; #FPOR order
below the guitar and right to the violin the trumpet is
P1 O1 P2 O2 F1&F2
_1-P1 _2-O1 _3-P2 _4-O2 _5-F1&_5-F2 # '_' indicates an open class word
1,0,0,0,0,0,0,0, 0,0,1,0,0,0,0,0, 0,0,0,0,1,0,0,0, 0,0,0,0,0,0,1,0, 0,1,0,0,0,1,0,0 # coded in the order PFOR (Predicate Focus Object Recipient)
"""
l = []
for i in range(1,max_nr_ocw+1):
for j in range(1,max_nr_actionrelation+1):
for elt_p in elt_pred:
l.append('_'+str(i)+'-'+str(elt_p)+str(j))
return l
def is_there_several_time_the_same_elt_in_list(l):
"""
Returns False if each element of the list is unique
Otherwise, returns a list of elements which have several occurrences
"""
l_res = []
for elt in l:
if l.count(elt)>1:
l_res.append(elt)
if len(l_res)==0:
return False
else:
return l_res
#COLAS : function completely recoded
def generate_meaning_stim(l_structure, full_time, l_m_elt):
l_meaning_code = []
l_indices_m_code = []
stim = []
for structure in l_structure:
stim_seq = np.zeros((full_time,len(l_m_elt)))
indices_m_code=[]
meaning_code=[]
structure1, structure2 = structure_partition(structure)
if len(structure2)==1: #if structure2 is empty, i.e simple sentence
for pos,role1 in enumerate(structure1):
if role1!='_':
m_code = '_'+str(pos+1)+'-'+role1+'1'
idx = l_m_elt.index(m_code)
indices_m_code.append(idx)
stim_seq[:full_time,idx] = np.ones((full_time,1)).T
meaning_code.append(m_code)
else: #if complex sentence
for pos,role1, role2 in zip(range(len(structure1)), structure1, structure2):
if role1!='_':
m_code = '_'+str(pos+1)+'-'+role1+'1'
idx = l_m_elt.index(m_code)
indices_m_code.append(idx)
stim_seq[:full_time,idx] = np.ones((full_time,1)).T
meaning_code.append(m_code)
if role2!='_':
m_code = '_'+str(pos+1)+'-'+role2+'2'
idx = l_m_elt.index(m_code)
indices_m_code.append(idx)
stim_seq[:full_time,idx] = np.ones((full_time,1)).T
meaning_code.append(m_code)
stim.append(stim_seq)
l_meaning_code.append(meaning_code)
l_indices_m_code.append(indices_m_code)
return (stim, l_meaning_code)
### Teaching and testing methods ###
####################################
#COLAS : all the teaching and testing function have been deleted. The didn't serve anymore thanks to the new reservoir.
def convert_output_activity_in_signal_idx_max(out_act, thres, eps):
# Each vector should be like this : stim_seq = np.zeros((full_time,len(construction_words)))
signal_indices_max = []
# Threshold array
out_act = out_act*(out_act>thres)
# for each time step
# for arr in len(out_act.shape[10]):
for i in range(out_act.shape[0]):
arr = out_act[i]
# look which output activity signal is maximum
maxi = np.max(arr)
if maxi<eps:
# The max is 0, so no signal is upper than the threshold
idx=-1
else:
idx = list(arr).index(maxi)
tmp = list(arr)
tmp.remove(maxi)
if max(tmp)==maxi:
# there is at least 2 values that are equal to maximum
idx = -2
signal_indices_max.append(idx)
return signal_indices_max
#COLAS: function slightly modified to solve the bug happenning when 2 open class words are juxtaposed
def convert_one_output_activity_in_construction(out_act, construction_words, min_nr_of_val_upper_thres=1):
"""
Inputs:
- min_nr_of_val_upper_thres : number of values upper the threshold needed to take into account the word
The default value is 1: this indicates that this parameters is useless,
because 1 occurrence of an index is enough to add the corresponding word in the sentence.
For instance if min_nr_of_val_upper_thres equals 2, it will not take into account singular
pics into account.
"""
# Each vector should be like this : stim_seq = np.zeros((full_time,len(construction_words)))
signal_indices_max = convert_output_activity_in_signal_idx_max(out_act, thres=0.4, eps = 1e-12)
previous = -1
keep_in_merory = -1
nr_occurrence_same_index = 0
sent = []
for i in range(len(signal_indices_max)):
if signal_indices_max[i]!=previous:
# if the new signal was the same that the one kept in memory
if signal_indices_max[i]==keep_in_merory:
# increment the counter nr_occurrence_same_index
nr_occurrence_same_index = nr_occurrence_same_index + 1
#if signal_indices_max[i]!=-1:
#print "keep in memory this index: ", signal_indices_max[i]
#print " - nr_occurrence_same_index: ", nr_occurrence_same_index
# if we have to wait for more occurrences of this index to take it into account
if (min_nr_of_val_upper_thres-1-nr_occurrence_same_index) > 0:
# keep the index in memory
#if signal_indices_max[i]!=-1:
#print " - still have to wait"
keep_in_merory = signal_indices_max[i]
else:
# add the word corresponding to this index in the final sentence
if signal_indices_max[i]!=-1:
##print "new idx detected:", signal_indices_max[i]
word = construction_words[signal_indices_max[i]]
#print "corresponding word:", word
sent.append(word)
previous = signal_indices_max[i]
# reinitialize temporary variables
nr_occurrence_same_index = 0
keep_in_merory = -1
#if sent==[]:
#raise Exception, "No words has been generated by the network. Output activity may be too low."
return sent
def convert_l_output_activity_in_construction(l_out_act, construction_words, min_nr_of_val_upper_thres=1):
l_sent = [] # list of list of words i.e. list of sentences
sentence_nb=1
for out_act in l_out_act:
#print "training sentence number :", sentence_nb
sent = convert_one_output_activity_in_construction(out_act, construction_words, min_nr_of_val_upper_thres=min_nr_of_val_upper_thres)
l_sent.append(sent)
sentence_nb+=1
return l_sent
#COLAS: treshold function to prevent the divergence of the reservoir (used with feedback)
def treshold_signal(vect, sup, inf):
for i in range(len(vect)):
if vect[i]>=sup: vect[i]=sup
elif vect[i]<=inf: vect[i]=inf
return vect
### Main Methods ###
##########################
#COLAS : function modified in order to give parameters in argument. New reservoir as well. Feedback implemented but it doesn't work for now (see report and readme)
def main(path_file_in, generate, ccw, sLetters, sValues):
def write_list_in_file(l, file=None, file_path=None, ccw= None, sLetters= None, sValues = None):
"""
Write a list in a file with with one item per line (like a one column csv).
If file is given, then it assumes the file is already open for writing.
If file_path is given, then it opens the file for writing, write the list, and then close the file.
"""
if file_path is not None:
if file is not None:
raise Exception, "Too much arguments. You must choose between file and file_path."
else:
file = open(file_path, 'ab')
if file is None:
raise Exception, "No file given in input."
for item in l:
file.write("%s\n" % item)
if file_path is not None:
file.close()
import io_language_coding as CtIolangcod
sentence_to_meaning = False
print ccw
# Definning parameters of stimulus (in a dictionary)
d = {}
d['act_time'] = 5
d['pause'] = True
d['suppl_pause_at_the_end'] = 1*d['act_time']
d['initial_pause'] = True
d['offset'] = False
## Random parameters
import time
millis = int(round(time.time() ))
seed = millis#2#4#2
# seed 2 works with 2 sentences : both with 1 relation, 1 Canonical, 1 Non-canonical
if seed is not None:
mdp.numx.random.seed(seed)
np.random.seed(seed)
[train_data_txt, test_data_txt, sent_form_info_train, sent_form_info_test] = extract_data_io(path_file=path_file_in, sentence_to_meaning=sentence_to_meaning)
#print "**************************"
train_corpus, train_meaning = txt2corpus_and_meaning(train_txt=train_data_txt)
if sentence_to_meaning:
test_corpus = test_data_txt
else:
test_meaning = test_data_txt
# making the list of constructions (refering to "construction grammar"), a construction is a sentence without its open class words (Nouns and Verbs)
(l_construction_train, construction_words) = get_and_remove_ocw_in_corpus(corpus=train_corpus, _OCW='X', _CCW = ccw)
#l_ocw_array_train=generate_l_ocw_array(sent_form_info_train, train_data_txt) même résultat
l_ocw_array_train=generate_l_ocw_array(sent_form_info_train, train_meaning, sLetters, sValues)
l_ocw_array_test=generate_l_ocw_array(sent_form_info_test, test_meaning, sLetters, sValues)
#print "**************************"
if sentence_to_meaning:
(l_construction_test, construction_words_test) = get_and_remove_ocw_in_corpus(corpus=test_corpus, _OCW='X', _CCW = ccw)
#print "l_construction_test", l_construction_test
if construction_words!=construction_words_test:
raise Exception, "The construction words are not the same for the train constructions and the test constructions. So the coding of sentences will be different and should provoque a future problem."
## Generating all the sentence stimulus (in order to have the same length for each sentence)
if sentence_to_meaning:
## Generate the stimulus input for train and test data
l_full_const = l_construction_train + l_construction_test
slice_test = slice(len(l_construction_train),len(l_construction_train)+len(l_construction_test))
else:
l_full_const = l_construction_train
slice_train = slice(0,len(l_construction_train))
(stim_full_data, l_full_offset) = CtIolangcod.generate_stim_input_nodic(l_data=l_full_const,
# act_time=d['act_time'], subset=None, l_input=None,
act_time=d['act_time'], subset=None, l_input=construction_words,
l_nr_word=None, mult=None, full_time=None,
with_offset=d['offset'], pause=d['pause'], initial_pause=d['initial_pause'],
suppl_pause_at_the_end=d['suppl_pause_at_the_end'], verbose=False)
stim_sent_train = stim_full_data[slice_train]
if sentence_to_meaning:
stim_sent_test = stim_full_data[slice_test]
l_m_elt = get_meaning_coding(8,2,sLetters) # test
if generate == "train":
#print "*** Generating meaning for train set ... ***"
(stim_mean_train, l_meaning_code_train) = generate_meaning_stim(l_structure=sent_form_info_train, full_time=stim_sent_train[0].shape[0], l_m_elt=l_m_elt)
write3DArrays(stim_mean_train, '../data/input/stim_mean_train.txt')
write3DArrays(stim_sent_train, '../data/input/stim_sent_train.txt')
if generate == "test":
if not sentence_to_meaning:
#print "*** Generating meaning for test set ... ***"
(stim_mean_test, l_meaning_code_test) = generate_meaning_stim(l_structure=sent_form_info_test, full_time=stim_sent_train[0].shape[0], l_m_elt=l_m_elt)
#print "*** ... meaning generated for test set ***"
#print ""
write3DArrays(stim_mean_test, '../data/input/stim_mean_test.txt')
import re
if __name__ == '__main__':
#print 'Number of arguments:', len(sys.argv), 'arguments.'
#print 'Argument List:', str(sys.argv)
corpusFilePath = sys.argv[1]
corpusFilePath = corpusFilePath.replace('$',' ')
ccwSplited = re.split('_', sys.argv[3])
structureSplited = re.split('_', sys.argv[4])
structureLetters = []
structureValues = []
for ii in structureSplited:
structureLetters.append(ii[0])
structureValues.append(ii[1])
main(path_file_in= corpusFilePath, generate = sys.argv[2], ccw = ccwSplited, sLetters = structureLetters, sValues = structureValues)
| FlorianLance/neuron-computing-cuda | scripts/python/generate_stim.py | Python | lgpl-2.1 | 25,674 |
#!/bin/python3
import os, shutil
import xml.etree.ElementTree as ET
NAME = 'programmer_beop'
DESCRIPTION = 'Francais (Programmer Beop, ergonomique, derive de Bpo)'
LANGUAGE = 'fr'
def install_symbols():
temp_file = './new_symbols'
symbols_file = '/usr/share/X11/xkb/symbols/fr'
symbols_layout = './symbols'
write_no_edit = True
with open(temp_file, 'w') as fout:
# Clear old layout from symbols
with open(symbols_file, 'r') as fr_keys:
for line in fr_keys:
if line.strip() == '// -- start {} --'.format(NAME):
write_no_edit = False
if write_no_edit:
fout.write(line)
if line.strip() == '// -- end {} --'.format(NAME):
write_no_edit = True
with open(symbols_layout, 'r') as symbols:
fout.write("\n")
for line in symbols:
fout.write(line)
shutil.move(symbols_file, symbols_file + '.bak')
shutil.move(temp_file, symbols_file)
def install_lst(lst_file):
temp_file = './new_lst.lst'
already_installed = False
found_language = False
with open(temp_file, 'w') as fout:
with open(lst_file, 'r') as fin:
for line in fin:
if len(line.split()) > 1 and line.split()[1] == '{}:'.format(LANGUAGE):
found_language = True
if len(line.split()) > 0 and line.split()[0] == NAME:
already_installed = True
if ( len(line.split()) > 1 and line.split()[1] != '{}:'.format(LANGUAGE)
and found_language and not already_installed):
fout.write(" {name} {language}: {description}\n".format(
name=NAME,
language=LANGUAGE,
description=DESCRIPTION))
fout.write(line)
shutil.move(lst_file, lst_file + '.bak')
shutil.move(temp_file, lst_file)
def install_xml(xml_file):
temp_file = './new_xml.xml'
tree = ET.parse(xml_file)
root = tree.getroot()
layouts = root.findall("./layoutList/layout")
for layout in layouts:
name = layout.find('configItem/name')
if name.text == LANGUAGE:
variantList = layout.find('variantList')
sublayouts = variantList.findall('variant/configItem/name')
if not list(filter(lambda x: x.text == NAME, sublayouts)):
variant = ET.SubElement(variantList, 'variant')
configItem = ET.SubElement(variant, 'configItem')
name = ET.SubElement(configItem, 'name')
name.text = NAME
description = ET.SubElement(configItem, 'description')
description.text = DESCRIPTION
with open(xml_file) as f:
header = f.readline() + f.readline()
with open(temp_file, 'w+') as fout:
# fout.write(header)
tree.write(temp_file, encoding="utf8", xml_declaration=False)
content = fout.read() + "\n"
fout.seek(0)
fout.write(header + content)
shutil.move(xml_file, xml_file + '.bak')
shutil.move(temp_file, xml_file)
if __name__ == '__main__':
install_symbols()
install_lst('/usr/share/X11/xkb/rules/base.lst')
install_lst('/usr/share/X11/xkb/rules/evdev.lst')
install_xml('/usr/share/X11/xkb/rules/base.xml')
install_xml('/usr/share/X11/xkb/rules/evdev.xml')
| luxcem/programmer-beop | install.py | Python | gpl-3.0 | 3,457 |
"""
Test for allantools (https://github.com/aewallin/allantools)
Stable32 was used to calculate the deviations we compare against.
AW2015-06-26
The dataset is from the 10 MHz output at the back of an HP Impedance Analyzer
measured with Keysight 53230A counter, 1.0s gate, RCON mode, with H-maser 10MHz reference
"""
import os
import testutils
import allantools as allan
import math
import sys
import pytest
import numpy as np
sys.path.append("..")
sys.path.append("../..") # hack to import from parent directory
# remove if you have allantools installed in your python path
data_file = 'ocxo_frequency.txt'
def change_to_test_dir():
# hack to run script from its own directory
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
verbose = 1
tolerance = 1e-4 # relative tolerance
rate = 1/float(1.0) # stable32 runs were done with this data-interval
class TestOCXO():
def test_adev_ci(self):
""" ADEV with confidence intervals, fixed alpha (no noise-ID) """
change_to_test_dir()
s32rows = testutils.read_stable32(
resultfile='adev_octave.txt', datarate=1.0)
for row in s32rows:
data = testutils.read_datafile(data_file)
data = allan.frequency2fractional(data, mean_frequency=1.0e7)
(taus, devs, errs, ns) = allan.adev(data, rate=rate, data_type="freq",
taus=[row['tau']])
# NOTE! Here we use alhpa from Stable32-results for the allantools edf computation!
edf = allan.edf_greenhall(alpha=row['alpha'], d=2, m=row['m'], N=len(
data), overlapping=False, modified=False, verbose=True)
print("alpha=", row['alpha'])
(lo, hi) = allan.confidence_interval(devs[0], edf=edf)
print(" n check: %d" % testutils.check_equal(ns[0], row['n']))
print(" dev check: %d" % testutils.check_approx_equal(
devs[0], row['dev'], tolerance=2e-4))
print("min dev check: %.4g %.4g %d" % (
lo, row['dev_min'], testutils.check_approx_equal(lo, row['dev_min'], tolerance=1e-3)))
print("max dev check: %.4g %.4g %d" % (
hi, row['dev_max'], testutils.check_approx_equal(hi, row['dev_max'], tolerance=1e-3)))
def test_adev_ci_and_noiseID(self):
""" ADEV with confidence intervals, including noise-ID """
change_to_test_dir()
s32rows = testutils.read_stable32(
resultfile='adev_octave.txt', datarate=1.0)
for row in s32rows:
data = testutils.read_datafile(data_file)
data = allan.frequency2fractional(data, mean_frequency=1.0e7)
(taus, devs, errs, ns) = allan.adev(data, rate=rate, data_type="freq",
taus=[row['tau']])
dev = devs[0]
try:
# CI including noise-ID
(lo2, hi2) = allan.confidence_interval_noiseID(
data, dev, af=int(row['m']), dev_type="adev", data_type="freq")
assert np.isclose(lo2, row['dev_min'], rtol=1e-2)
assert np.isclose(hi2, row['dev_max'], rtol=1e-2)
print(" CI OK! tau= %.0f lo/s32_lo = %f hi/s32_hi = %f " %
(row['tau'], lo2/row['dev_min'], hi2/row['dev_max']))
except NotImplementedError:
print("can't do CI for tau= %f" % row['tau'])
pass
def test_oadev_ci(self):
""" Overlapping ADEV with confidence intervals """
s32rows = testutils.read_stable32(
resultfile='oadev_octave.txt', datarate=1.0)
for row in s32rows:
data = testutils.read_datafile(data_file)
data = allan.frequency2fractional(data, mean_frequency=1.0e7)
(taus, devs, errs, ns) = allan.oadev(data, rate=rate, data_type="freq",
taus=[row['tau']])
# NOTE! Here we use alhpa from Stable32-results for the allantools edf computation!
edf = allan.edf_greenhall(alpha=row['alpha'], d=2, m=row['m'], N=len(
data), overlapping=True, modified=False, verbose=True)
(lo, hi) = allan.confidence_interval(devs[0], edf=edf)
print("n check: ", testutils.check_equal(ns[0], row['n']))
print("dev check: ", devs[0], row['dev'], testutils.check_approx_equal(
devs[0], row['dev'], tolerance=2e-3))
print("min dev check: ", lo, row['dev_min'], testutils.check_approx_equal(
lo, row['dev_min'], tolerance=2e-3))
print("max dev check: ", hi, row['dev_max'], testutils.check_approx_equal(
hi, row['dev_max'], tolerance=2e-3))
def test_mdev_ci(self):
""" Overlapping ADEV with confidence intervals """
s32rows = testutils.read_stable32(
resultfile='mdev_octave.txt', datarate=1.0)
for row in s32rows:
data = testutils.read_datafile(data_file)
data = allan.frequency2fractional(data, mean_frequency=1.0e7)
(taus, devs, errs, ns) = allan.mdev(data, rate=rate, data_type="freq",
taus=[row['tau']])
# NOTE! Here we use alhpa from Stable32-results for the allantools edf computation!
edf = allan.edf_greenhall(alpha=row['alpha'], d=2, m=row['m'], N=len(
data), overlapping=True, modified=True, verbose=True)
(lo, hi) = allan.confidence_interval(devs[0], edf=edf)
print("n check: ", testutils.check_equal(ns[0], row['n']))
print("dev check: ", devs[0], row['dev'], testutils.check_approx_equal(
devs[0], row['dev'], tolerance=2e-3))
print("min dev check: ", lo, row['dev_min'], testutils.check_approx_equal(
lo, row['dev_min'], tolerance=2e-3))
print("max dev check: ", hi, row['dev_max'], testutils.check_approx_equal(
hi, row['dev_max'], tolerance=2e-3))
def test_tdev_ci(self):
""" Time Deviation with confidence intervals """
s32rows = testutils.read_stable32(
resultfile='tdev_octave.txt', datarate=1.0)
for row in s32rows:
data = testutils.read_datafile(data_file)
data = allan.frequency2fractional(data, mean_frequency=1.0e7)
(taus, devs, errs, ns) = allan.tdev(data, rate=rate, data_type="freq",
taus=[row['tau']])
# NOTE! Here we use alhpa from Stable32-results for the allantools edf computation!
edf = allan.edf_greenhall(alpha=row['alpha'], d=2, m=row['m'], N=len(
data), overlapping=True, modified=True, verbose=True)
(lo, hi) = allan.confidence_interval(devs[0], edf=edf)
print("n check: ", testutils.check_equal(ns[0], row['n']))
print("dev check: ", devs[0], row['dev'], testutils.check_approx_equal(
devs[0], row['dev'], tolerance=2e-3))
print("min dev check: ", lo, row['dev_min'], testutils.check_approx_equal(
lo, row['dev_min'], tolerance=2e-3))
print("max dev check: ", hi, row['dev_max'], testutils.check_approx_equal(
hi, row['dev_max'], tolerance=2e-3))
def test_hdev_ci(self):
""" Hadamard with confidence intervals """
s32rows = testutils.read_stable32(
resultfile='hdev_octave.txt', datarate=1.0)
for row in s32rows:
data = testutils.read_datafile(data_file)
data = allan.frequency2fractional(data, mean_frequency=1.0e7)
(taus, devs, errs, ns) = allan.hdev(data, rate=rate, data_type="freq",
taus=[row['tau']])
# NOTE! Here we use alhpa from Stable32-results for the allantools edf computation!
edf = allan.edf_greenhall(alpha=row['alpha'], d=3, m=row['m'], N=len(
data), overlapping=False, modified=False, verbose=True)
(lo, hi) = allan.confidence_interval(devs[0], edf=edf)
print("n check: ", testutils.check_equal(ns[0], row['n']))
print("dev check: ", devs[0], row['dev'], testutils.check_approx_equal(
devs[0], row['dev'], tolerance=2e-3))
print("min dev check: ", lo, row['dev_min'], testutils.check_approx_equal(
lo, row['dev_min'], tolerance=2e-3))
print("max dev check: ", hi, row['dev_max'], testutils.check_approx_equal(
hi, row['dev_max'], tolerance=2e-3))
def test_ohdev_ci(self):
""" Overlapping Hadamard deviation with confidence intervals """
s32rows = testutils.read_stable32(
resultfile='ohdev_octave.txt', datarate=1.0)
for row in s32rows:
data = testutils.read_datafile(data_file)
data = allan.frequency2fractional(data, mean_frequency=1.0e7)
(taus, devs, errs, ns) = allan.ohdev(data, rate=rate, data_type="freq",
taus=[row['tau']])
# NOTE! Here we use alhpa from Stable32-results for the allantools edf computation!
edf = allan.edf_greenhall(alpha=row['alpha'], d=3, m=row['m'], N=len(
data), overlapping=True, modified=False, verbose=True)
(lo, hi) = allan.confidence_interval(devs[0], edf=edf)
print("n check: ", testutils.check_equal(ns[0], row['n']))
print("dev check: ", devs[0], row['dev'], testutils.check_approx_equal(
devs[0], row['dev'], tolerance=2e-3))
print("min dev check: ", lo, row['dev_min'], testutils.check_approx_equal(
lo, row['dev_min'], tolerance=2e-3))
print("max dev check: ", hi, row['dev_max'], testutils.check_approx_equal(
hi, row['dev_max'], tolerance=5e-3))
# fails
# totdev() needs bias-correction, depending on alpha(?)
@pytest.mark.skip(reason="needs bias-correction and noise-ID to work")
@pytest.mark.xfail
def test_totdev_ci(self):
print("totdev()")
s32rows = testutils.read_stable32(
resultfile='totdev_octave.txt', datarate=1.0)
for row in s32rows:
data = testutils.read_datafile(data_file)
data = allan.frequency2fractional(data, mean_frequency=1.0e7)
(taus, devs, errs, ns) = allan.totdev(data, rate=rate, data_type="freq",
taus=[row['tau']])
edf = allan.edf_totdev(N=len(data), m=row['m'], alpha=row['alpha'])
(lo, hi) = allan.confidence_interval(devs[0], edf=edf)
print("n check: ", testutils.check_equal(ns[0], row['n']))
print("dev check: ", testutils.check_approx_equal(
devs[0], row['dev'], tolerance=2e-3))
print("min dev check: %.4g %.4g %d" % (
lo, row['dev_min'], testutils.check_approx_equal(lo, row['dev_min'], tolerance=2e-3)))
print("max dev check: %.4g %.4g %d" % (
hi, row['dev_max'], testutils.check_approx_equal(hi, row['dev_max'], tolerance=2e-3)))
def test_noise_id(self):
""" test for noise-identification """
s32_rows = testutils.read_stable32('mdev_octave.txt', rate)
freq = testutils.read_datafile(data_file)
y_freq = allan.frequency2fractional(freq, mean_frequency=1.0e7)
phase = allan.frequency2phase(freq, rate)
for s32 in s32_rows:
s32_tau, s32_alpha, s32_AF = s32['tau'], s32['alpha'], int(
s32['m'])
# noise-ID from frequency
if len(phase)/s32_AF > 20:
alpha_int, alpha, d, rho = allan.autocorr_noise_id(
freq, data_type='freq', af=s32_AF)
print("y: ", s32_tau, s32_alpha, alpha_int, alpha, rho, d)
assert alpha_int == s32_alpha
# noise-ID from phase
if len(phase)/s32_AF > 20:
alpha_int, alpha, d, rho = allan.autocorr_noise_id(
phase, data_type='phase', af=s32_AF)
print("x: ", s32_tau, s32_alpha, alpha_int, alpha, rho, d)
assert alpha_int == s32_alpha
if __name__ == "__main__":
# pytest.main()
t = TestOCXO()
# t.test_adev_ci_and_noiseID()
t.test_noise_id()
"""
t.test_adev_ci()
t.test_oadev_ci()
t.test_mdev_ci()
t.test_tdev_ci()
t.test_hdev_ci()
t.test_ohdev_ci()
"""
# t.test_totdev_ci()
| aewallin/allantools | tests/ocxo/test_ocxo.py | Python | lgpl-3.0 | 12,692 |
#!/usr/bin/env python
from collections import deque
import rospy
import numpy
import tf
import actionlib
import yaml
from math import sqrt, cos
from collections import *
from sklearn.cluster import KMeans
from std_msgs.msg import *
from geometry_msgs.msg import *
from move_base_msgs.msg import *
from nav_msgs.msg import OccupancyGrid, Odometry, MapMetaData
class PathPlanner:
UP = Quaternion(0, 0, 0.7071, 0.7071)
DOWN = Quaternion(0, 0, -0.7071, 0.7071)
LEFT = Quaternion(0, 0, 1, 0)
RIGHT = Quaternion(0, 0, 0, 1)
def __init__(self):
self.goals = deque()
self.offset = (0, 0)
self.resolution = 0
self.map = None
self.listener = None
self.client = actionlib.SimpleActionClient('move_base', MoveBaseAction)
self.seq = -1
self.step = 0.2
def extract_points(self, ogrid):
points = []
#print numpy.asarray(ogrid.data)
for i in xrange(len(ogrid.data)):
#if there is contamination in an empty space, add point to list
if ogrid.data[i] > 0 and self.map[i] == 0:
newp = [j*ogrid.info.resolution for j in divmod(i, ogrid.info.width)]
newp.reverse()
points.append(newp)
return numpy.asarray(points)
def bounding_box(self, points):
min_x, min_y = numpy.min(points, axis=0)
max_x, max_y = numpy.max(points, axis=0)
min_x += self.offset[0]
max_x += self.offset[0]
min_y += self.offset[1]
max_y += self.offset[1]
center = ((min_x + max_x)/2, (min_y+max_y)/2)
#print min_x, min_y, max_x, max_y, center
return [min_x, min_y, max_x, max_y, center]
def _set_map(self, new_map, costmap):
#0.177 = robot radius - parameterize so it can source from same place as cleanerbot?
self.offset = (new_map.info.origin.position.x, new_map.info.origin.position.y)
self.resolution = new_map.info.resolution
m = []
for i in xrange(len(new_map.data)):
if new_map.data[i] > -1 and new_map.data[i] < costmap.data[i] and costmap.data[i] >= 40: m.append(costmap.data[i])
else: m.append(new_map.data[i])
self.map = m
def _set_goal(self, header, x, y, orientation):
pose = PoseStamped(header, Pose(Point(x, y, 0), orientation))
goal = MoveBaseGoal()
goal.target_pose = pose
return goal
def _set_cluster_goals(self, points, data, corner):
#data format: (min_x, min_y, max_x, max_y, center)
#print data
vert, horiz = corner.split()
g = []
if vert == 'top': #start from top
goal_range = numpy.arange(data[3], data[1], 0-self.step)
elif vert == 'bottom': #start from bottom
goal_range = numpy.arange(data[1], data[3], self.step)
for y in goal_range:
point_slice = []
for p in points:
if abs(y-p[1]) <= self.step: point_slice.append(p)
try:
min_x, min_y = numpy.min(point_slice, axis=0)
max_x, max_y = numpy.max(point_slice, axis=0)
except ValueError:
continue
header = Header(self.seq, rospy.Time.now(), '/map')
if horiz == 'left':
g.append(self._set_goal(header, min_x, y, self.RIGHT))
g.append(self._set_goal(header, max_x, y, self.LEFT))
elif horiz == 'right': #start from right
g.append(self._set_goal(header, max_x, y, self.LEFT))
g.append(self._set_goal(header, min_x, y, self.RIGHT))
return g
def _dist(self, a, b):
#print a, b
return sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2)
#pass it points in order: brute force (recursive?) nearest-neighbor
#point format: [(point_id, (x, y))]
def _nearest_neighbor(self, base_point, points):
if len(points) == 0:
return []
else:
temp = numpy.inf
index = -1
for p in xrange(len(points)):
dist = self._dist(base_point[1], points[p][1])
#print 'robot_loc: {0} >> cluster{1} loc: {2}: {3}'.format(base_point[1], p, points[p][1], dist)
if dist < temp:
temp = dist
index = p
next_point = points.pop(index)
direction = None
if base_point[1][0] < next_point[1][0]:
if base_point[1][1] < next_point[1][1]:
direction = "bottom left"
else: direction = "top left"
elif base_point[1][1] < next_point[1][1]: direction = "bottom right"
else: direction = "top right"
x = [(next_point[0], direction)]
x.extend(self._nearest_neighbor(next_point, points))
return x
def _get_order(self, centers):
#determine order of clusters to publish points for
#centers should be a list of (x, y) tuples
robot = rospy.wait_for_message("amcl_pose", PoseWithCovarianceStamped)
robot_loc = robot.pose.pose.position
points = zip(range(len(centers)), centers)
#print points
path = self._nearest_neighbor((None, (robot_loc.x, robot_loc.y)), points)
#print path
#point_order, directions = zip(*path)
return path
def set_path(self, ogrid):
#group contam_points into boxes, prioritizing less dirty areas first
points = self.extract_points(ogrid)
if points.size==0:
rospy.signal_shutdown("Cleaning complete. Program will now exit.")
return
else:
n_clusters = int(round(points.size/2*(self.resolution**2))) #1 cluster ~= 1 sq. meter
print n_clusters
cluster_data = []
if n_clusters >= 1:
kmeans = KMeans(n_clusters).fit(points)
for n in xrange(n_clusters):
xy = points[kmeans.labels_==n]
#print "point {0}".format(n)
cluster_data.append(self.bounding_box(xy))
order = self._get_order([x[4] for x in cluster_data])
self.seq += 1
self.goals.extend(self._set_cluster_goals(points[kmeans.labels_==order[0][0]].tolist(), cluster_data[order[0][0]], order[0][1]))
# for o in order:
# self.goals.extend(self._set_cluster_goals(cluster_data[o[0]], o[1]))
# self.seq += 1
# elif n_clusters == 0 and -1 in db.labels_:
# xy = points[db.labels_== -1]
# for point in xy:
# header = Header(self.seq, rospy.Time.now(), '/map')
# self.seq += 1
# pose = PoseStamped(header, Pose(Point(point[0], point[1], 0), RIGHT))
# goal = MoveBaseGoal()
# goal.target_pose = pose
# self.goals.append(goal)
else:
rospy.signal_shutdown("Cleaning complete. Program will now exit.")
#send next goal if previous one has completed/terminated
def send_goal(self):
self.client.wait_for_server()
goal = self.goals.popleft()
try:
print "Sending Goal"
self.client.send_goal(goal)
except IndexError:
print "Oops! No more goals!"
return
self.client.wait_for_result()
state = self.client.get_state()
count = 0
while state == 4 and count < 3: #if server rejects goal, try another goal a little closer
print "Goal failed. Trying again."
pose = goal.target_pose.pose
if pose.orientation == self.LEFT: pose.position.x -= self.step
elif pose.orientation == self.RIGHT: pose.position.x += self.step
goal.target_pose.pose = pose
self.client.send_goal(goal)
self.client.wait_for_result()
state = self.client.get_state()
count+=1
def setup(self):
rospy.init_node('path_planner', anonymous=True)
base_map = rospy.wait_for_message("map", OccupancyGrid)
costmap = rospy.wait_for_message("/move_base/global_costmap/costmap", OccupancyGrid)
self._set_map(base_map,costmap)
r = rospy.Rate(5)
while not rospy.is_shutdown():
if self.goals: self.send_goal() #if there is a goal, send something
else:
msg = rospy.wait_for_message("contamination_grid", OccupancyGrid)
self.set_path(msg)
#print "Looping"
r.sleep()
if __name__ == '__main__':
node = PathPlanner()
try:
node.setup()
except rospy.ROSInterruptException:
pass | kekraft/contamination_stack | scrubber_decontamination/scripts/path_planner.py | Python | bsd-3-clause | 8,714 |
"""
Test finding orphans via the view and django config
"""
import json
from contentstore.tests.utils import CourseTestCase
from student.models import CourseEnrollment
from xmodule.modulestore.django import modulestore
from contentstore.utils import reverse_course_url
class TestOrphan(CourseTestCase):
"""
Test finding orphans via view and django config
"""
def setUp(self):
super(TestOrphan, self).setUp()
runtime = self.course.runtime
self._create_item('chapter', 'Chapter1', {}, {'display_name': 'Chapter 1'}, 'course', self.course.location.name, runtime)
self._create_item('chapter', 'Chapter2', {}, {'display_name': 'Chapter 2'}, 'course', self.course.location.name, runtime)
self._create_item('chapter', 'OrphanChapter', {}, {'display_name': 'Orphan Chapter'}, None, None, runtime)
self._create_item('vertical', 'Vert1', {}, {'display_name': 'Vertical 1'}, 'chapter', 'Chapter1', runtime)
self._create_item('vertical', 'OrphanVert', {}, {'display_name': 'Orphan Vertical'}, None, None, runtime)
self._create_item('html', 'Html1', "<p>Goodbye</p>", {'display_name': 'Parented Html'}, 'vertical', 'Vert1', runtime)
self._create_item('html', 'OrphanHtml', "<p>Hello</p>", {'display_name': 'Orphan html'}, None, None, runtime)
self._create_item('static_tab', 'staticuno', "<p>tab</p>", {'display_name': 'Tab uno'}, None, None, runtime)
self._create_item('about', 'overview', "<p>overview</p>", {}, None, None, runtime)
self._create_item('course_info', 'updates', "<ol><li><h2>Sep 22</h2><p>test</p></li></ol>", {}, None, None, runtime)
self.orphan_url = reverse_course_url('orphan_handler', self.course.id)
def _create_item(self, category, name, data, metadata, parent_category, parent_name, runtime):
location = self.course.location.replace(category=category, name=name)
store = modulestore()
store.create_and_save_xmodule(
location, self.user.id, definition_data=data, metadata=metadata, runtime=runtime
)
if parent_name:
# add child to parent in mongo
parent_location = self.course.location.replace(category=parent_category, name=parent_name)
parent = store.get_item(parent_location)
parent.children.append(location)
store.update_item(parent, self.user.id)
def test_mongo_orphan(self):
"""
Test that old mongo finds the orphans
"""
orphans = json.loads(
self.client.get(
self.orphan_url,
HTTP_ACCEPT='application/json'
).content
)
self.assertEqual(len(orphans), 3, "Wrong # {}".format(orphans))
location = self.course.location.replace(category='chapter', name='OrphanChapter')
self.assertIn(location.to_deprecated_string(), orphans)
location = self.course.location.replace(category='vertical', name='OrphanVert')
self.assertIn(location.to_deprecated_string(), orphans)
location = self.course.location.replace(category='html', name='OrphanHtml')
self.assertIn(location.to_deprecated_string(), orphans)
def test_mongo_orphan_delete(self):
"""
Test that old mongo deletes the orphans
"""
self.client.delete(self.orphan_url)
orphans = json.loads(
self.client.get(self.orphan_url, HTTP_ACCEPT='application/json').content
)
self.assertEqual(len(orphans), 0, "Orphans not deleted {}".format(orphans))
def test_not_permitted(self):
"""
Test that auth restricts get and delete appropriately
"""
test_user_client, test_user = self.create_non_staff_authed_user_client()
CourseEnrollment.enroll(test_user, self.course.id)
response = test_user_client.get(self.orphan_url)
self.assertEqual(response.status_code, 403)
response = test_user_client.delete(self.orphan_url)
self.assertEqual(response.status_code, 403)
| wwj718/murp-edx | cms/djangoapps/contentstore/tests/test_orphan.py | Python | agpl-3.0 | 4,054 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#https://www.kaggle.com/dansbecker/handling-missing-values
# one tree
import pandas as pd
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
#from sklearn.model_selection import score_dataset
from sklearn.preprocessing import Imputer
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.externals import joblib
# input data
melbourne_data = pd.read_csv('train.csv')#train.csv
test_data = pd.read_csv('test.csv')#train.csv
new_data =melbourne_data.copy()
# data describe
#print (melbourne_data.describe())
#print (melbourne_data.columns)
#print (melbourne_price_data.head())
#print (melbourne_data.isnull().sum())
#print (X.head())
melbourne_data.dropna(axis=0, subset=['SalePrice'], inplace=True)
y = melbourne_data.SalePrice
cols_with_missing = [col for col in melbourne_data.columns
if melbourne_data[col].isnull().any()]
X = melbourne_data.drop(['Id', 'SalePrice'] + cols_with_missing, axis=1)
#X = X.select_dtypes(exclude=['object'])
# For the sake of keeping the example simple, we'll use only numeric predictors.
low_cardinality_cols = [cname for cname in X.columns if
X[cname].nunique() < 10 and
X[cname].dtype == "object"]
numeric_cols = [cname for cname in X.columns if
X[cname].dtype in ['int64', 'float64']]
my_cols = low_cardinality_cols + numeric_cols
train_predictors = X[my_cols]
#print train_predictors.dtypes.sample(10)
# split data into train and validation
# how to know test_size and random_state?
X_train,X_test,y_train,y_test = train_test_split(train_predictors,y,test_size=0.25,random_state = 0)
# handle data ,Processing data and comparison of raw data
def get_mae(X_mea, y_mea):
# multiple by -1 to make positive MAE score instead of neg value returned as sklearn convention
return -1 * cross_val_score(RandomForestRegressor(50), X_mea, y_mea, scoring = 'neg_mean_absolute_error').mean()
# A Drop Columns with Missing Values
#data_without_missing_values = melbourne_data.dropna(axis=1)
predictors_without_categoricals = X_train.select_dtypes(exclude=['object'])
mae_without_categoricals = get_mae(predictors_without_categoricals, y_train)
print('Mean Absolute Error when Dropping Categoricals: ' + str(int(mae_without_categoricals)))
one_hot_encoded_training_predictors = pd.get_dummies(X_train)
mae_one_hot_encoded = get_mae(one_hot_encoded_training_predictors, y_train)
print('Mean Abslute Error with One-Hot Encoding: ' + str(int(mae_one_hot_encoded)))
# find max_leaf_nodes, then get 400
'''
def getmea(max_leaf_nodes,mea_train_x,mea_test_x,mea_train_y,mea_test_y):
model = DecisionTreeRegressor(max_leaf_nodes = max_leaf_nodes,random_state = 0)
model.fit(mea_train_x,mea_train_y)
predicted_test = model.predict(mea_test_x)
return mean_absolute_error(mea_test_y,predicted_test)
for max_leaf_nodes in [300,350,400,450,500,550,600,650,700,750]:
mea = getmea(max_leaf_nodes,train_x,val_x,train_y,val_y)
print("Max_leaf_nodes: %d ,mea: %d" %(max_leaf_nodes,mea))
'''
# model and train
#melbourne_model = DecisionTreeRegressor(max_leaf_nodes = 400,random_state = 0)
#melbourne_model.fit(train_x,train_y)
# predict and save output
'''
#print ("Making predictions for the following 5 houses")
#print (val_x.head())
#print ("The predictions are")
predicted_test_prices = forest_model.predict(test[melbourne_predictors])
#print (predicted_home_prices)
my_submission = pd.DataFrame({'Id':test.Id,'SalePrice':predicted_test_prices})
my_submission.to_csv('submission.csv',index = False,header = False,columns = ['Id','SalePrice'])
my_submission.to_csv('result.txt',index=False,header=False,sep='\t')
'''
# validation
#predicted_home_prices = melbourne_model.predict(val_x)
#print mean_absolute_error(val_y,predicted_home_prices)
#save model
#joblib.dump(melbourne_model,'model.pickle')
#load model
#model = joblib.load('model.pickle')
| muxiaobai/CourseExercises | python/kaggle/learn2/one-hot.py | Python | gpl-2.0 | 4,135 |
"""
This package contains tests for the credit mining-related code of Tribler.
"""
| vandenheuvel/tribler | Tribler/Test/Core/CreditMining/__init__.py | Python | lgpl-3.0 | 83 |
# Copyright 2013 Douglas Linder
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pyglet
import cocos
import nwidget
import random
import model
import math
# Game control constants
SNAKE_SPEED = 5
GROW_RATE = 25
MARKER_EAT_DIST = 10
MARKER_SCORE = 10000
class GameView(cocos.layer.Layer):
""" Testing class """
def __init__(self, assets):
super(GameView, self).__init__()
self.assets = nwidget.Assets()
self.load_objects()
pyglet.clock.schedule_interval(self.update, 0.03)
def load_objects(self):
assets = self.assets
# Clear events from other views
nwidget.events.clear(cocos.director.director.window)
self.is_event_handler = True
# View model & ui
self.model = {
"score" : 0,
"dead" : False,
"updated" : False,
"play_time" : 0,
}
self.ui = model.Ui("game.py", self.model)
self.add(self.ui, z=1) # Above the background
# bind events
nwidget.listen("GAME_RESTART", self.on_restart)
nwidget.listen("GAME_GOTO_MENU", self.on_menu)
# Background
bg = model.Background(assets)
self.add(bg.node)
# Add path and snake
self.snake = model.Snake(assets)
self.path = model.Path()
width, height = cocos.director.director.get_window_size()
x = width / 2
y = height / 2
self.snake.jump(x, y)
self.path.jump(x, y)
self.add(self.snake.node)
self.add(self.path)
self.bounds = (0, 0, width, height)
# Direction
self.snake.right()
self.vector = "RIGHT"
self.speed = SNAKE_SPEED
# Are we paused because we died?
self.dead = False
# Start~
self.marker = None
self.inc_dt = 0
self.generate_marker()
def update(self, dt):
if not self.dead:
# Update timer
self.inc_dt += dt
if self.inc_dt > 1:
self.inc_dt = 0
self.model["play_time"] += 1
self.model["updated"] = True
motion = self.snake.move(self.speed, self.vector)
self.path.move(*motion)
if self.check_snake_dies():
self.on_died()
if self.check_snake_eats_marker():
self.on_marker()
self.generate_marker()
self.model["score"] += random.randint(MARKER_SCORE, MARKER_SCORE * 10)
self.model["updated"] = True
def check_snake_dies(self):
if self.snake.x < self.bounds[0]:
return True
elif self.snake.y < self.bounds[1]:
return True
elif self.snake.x > self.bounds[2]:
return True
elif self.snake.y > self.bounds[3]:
return True
elif self.path.intersects():
return True
return False
def check_snake_eats_marker(self):
dx = self.snake.x - self.marker.node.position[0]
dy = self.snake.y - self.marker.node.position[1]
d = math.sqrt(dx*dx + dy*dy)
if d < MARKER_EAT_DIST:
return True
return False
def generate_marker(self):
if self.marker is None:
self.marker = model.Marker(self.assets)
self.marker.node.scale = 0.4
self.marker.node.position = (
random.randint(self.bounds[0] + 40, self.bounds[2] - 40),
random.randint(self.bounds[1] + 40, self.bounds[3] - 40)
)
self.add(self.marker.node)
def on_restart(self, code, widget):
self.model["score"] = 0
self.model["dead"] = False
self.model["updated"] = True
self.model["play_time"] = 0
width, height = cocos.director.director.get_window_size()
x = width / 2
y = height / 2
self.path.reset()
self.snake.jump(x, y)
self.path.jump(x, y)
# Direction
self.snake.right()
self.vector = "RIGHT"
self.speed = SNAKE_SPEED
# Are we paused because we died?
self.dead = False
# Start~
self.inc_dt = 0
self.generate_marker()
def on_menu(self, code, widget):
model.Game.menu()
def on_marker(self):
self.path.length += GROW_RATE
self.remove(self.marker.node)
self.marker = None
self.model["score"] += 100
def on_died(self):
self.dead = True
self.model["dead"] = True # Toggles ui state!
self.model["updated"] = True # Toggles ui state!
def on_left(self):
if self.vector != "RIGHT":
self.vector = "LEFT"
self.snake.left()
def on_right(self):
if self.vector != "LEFT":
self.vector = "RIGHT"
self.snake.right()
def on_up(self):
if self.vector != "DOWN":
self.vector = "UP"
self.snake.up()
def on_down(self):
if self.vector != "UP":
self.vector = "DOWN"
self.snake.down()
def on_key_press(self, key, modifiers):
if key == pyglet.window.key.UP:
self.on_up()
elif key == pyglet.window.key.DOWN:
self.on_down()
elif key == pyglet.window.key.LEFT:
self.on_left()
elif key == pyglet.window.key.RIGHT:
self.on_right()
| shadowmint/nwidget | samples/snake/views/game_view.py | Python | apache-2.0 | 5,269 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from ._configuration import NetworkManagementClientConfiguration
from .operations import ApplicationGatewaysOperations
from .operations import ApplicationSecurityGroupsOperations
from .operations import AvailableDelegationsOperations
from .operations import AvailableResourceGroupDelegationsOperations
from .operations import AzureFirewallsOperations
from .operations import AzureFirewallFqdnTagsOperations
from .operations import NetworkManagementClientOperationsMixin
from .operations import DdosProtectionPlansOperations
from .operations import AvailableEndpointServicesOperations
from .operations import ExpressRouteCircuitAuthorizationsOperations
from .operations import ExpressRouteCircuitPeeringsOperations
from .operations import ExpressRouteCircuitConnectionsOperations
from .operations import ExpressRouteCircuitsOperations
from .operations import ExpressRouteServiceProvidersOperations
from .operations import ExpressRouteCrossConnectionsOperations
from .operations import ExpressRouteCrossConnectionPeeringsOperations
from .operations import ExpressRouteGatewaysOperations
from .operations import ExpressRouteConnectionsOperations
from .operations import ExpressRoutePortsLocationsOperations
from .operations import ExpressRoutePortsOperations
from .operations import ExpressRouteLinksOperations
from .operations import InterfaceEndpointsOperations
from .operations import LoadBalancersOperations
from .operations import LoadBalancerBackendAddressPoolsOperations
from .operations import LoadBalancerFrontendIPConfigurationsOperations
from .operations import InboundNatRulesOperations
from .operations import LoadBalancerLoadBalancingRulesOperations
from .operations import LoadBalancerOutboundRulesOperations
from .operations import LoadBalancerNetworkInterfacesOperations
from .operations import LoadBalancerProbesOperations
from .operations import NetworkInterfacesOperations
from .operations import NetworkInterfaceIPConfigurationsOperations
from .operations import NetworkInterfaceLoadBalancersOperations
from .operations import NetworkInterfaceTapConfigurationsOperations
from .operations import NetworkProfilesOperations
from .operations import NetworkSecurityGroupsOperations
from .operations import SecurityRulesOperations
from .operations import DefaultSecurityRulesOperations
from .operations import NetworkWatchersOperations
from .operations import PacketCapturesOperations
from .operations import ConnectionMonitorsOperations
from .operations import Operations
from .operations import PublicIPAddressesOperations
from .operations import PublicIPPrefixesOperations
from .operations import RouteFiltersOperations
from .operations import RouteFilterRulesOperations
from .operations import RouteTablesOperations
from .operations import RoutesOperations
from .operations import BgpServiceCommunitiesOperations
from .operations import ServiceEndpointPoliciesOperations
from .operations import ServiceEndpointPolicyDefinitionsOperations
from .operations import UsagesOperations
from .operations import VirtualNetworksOperations
from .operations import SubnetsOperations
from .operations import VirtualNetworkPeeringsOperations
from .operations import VirtualNetworkTapsOperations
from .operations import VirtualNetworkGatewaysOperations
from .operations import VirtualNetworkGatewayConnectionsOperations
from .operations import LocalNetworkGatewaysOperations
from .operations import VirtualWansOperations
from .operations import VpnSitesOperations
from .operations import VpnSitesConfigurationOperations
from .operations import VirtualHubsOperations
from .operations import HubVirtualNetworkConnectionsOperations
from .operations import VpnGatewaysOperations
from .operations import VpnConnectionsOperations
from .operations import P2SVpnServerConfigurationsOperations
from .operations import P2SVpnGatewaysOperations
from . import models
class NetworkManagementClient(NetworkManagementClientOperationsMixin):
"""Network Client.
:ivar application_gateways: ApplicationGatewaysOperations operations
:vartype application_gateways: azure.mgmt.network.v2018_08_01.operations.ApplicationGatewaysOperations
:ivar application_security_groups: ApplicationSecurityGroupsOperations operations
:vartype application_security_groups: azure.mgmt.network.v2018_08_01.operations.ApplicationSecurityGroupsOperations
:ivar available_delegations: AvailableDelegationsOperations operations
:vartype available_delegations: azure.mgmt.network.v2018_08_01.operations.AvailableDelegationsOperations
:ivar available_resource_group_delegations: AvailableResourceGroupDelegationsOperations operations
:vartype available_resource_group_delegations: azure.mgmt.network.v2018_08_01.operations.AvailableResourceGroupDelegationsOperations
:ivar azure_firewalls: AzureFirewallsOperations operations
:vartype azure_firewalls: azure.mgmt.network.v2018_08_01.operations.AzureFirewallsOperations
:ivar azure_firewall_fqdn_tags: AzureFirewallFqdnTagsOperations operations
:vartype azure_firewall_fqdn_tags: azure.mgmt.network.v2018_08_01.operations.AzureFirewallFqdnTagsOperations
:ivar ddos_protection_plans: DdosProtectionPlansOperations operations
:vartype ddos_protection_plans: azure.mgmt.network.v2018_08_01.operations.DdosProtectionPlansOperations
:ivar available_endpoint_services: AvailableEndpointServicesOperations operations
:vartype available_endpoint_services: azure.mgmt.network.v2018_08_01.operations.AvailableEndpointServicesOperations
:ivar express_route_circuit_authorizations: ExpressRouteCircuitAuthorizationsOperations operations
:vartype express_route_circuit_authorizations: azure.mgmt.network.v2018_08_01.operations.ExpressRouteCircuitAuthorizationsOperations
:ivar express_route_circuit_peerings: ExpressRouteCircuitPeeringsOperations operations
:vartype express_route_circuit_peerings: azure.mgmt.network.v2018_08_01.operations.ExpressRouteCircuitPeeringsOperations
:ivar express_route_circuit_connections: ExpressRouteCircuitConnectionsOperations operations
:vartype express_route_circuit_connections: azure.mgmt.network.v2018_08_01.operations.ExpressRouteCircuitConnectionsOperations
:ivar express_route_circuits: ExpressRouteCircuitsOperations operations
:vartype express_route_circuits: azure.mgmt.network.v2018_08_01.operations.ExpressRouteCircuitsOperations
:ivar express_route_service_providers: ExpressRouteServiceProvidersOperations operations
:vartype express_route_service_providers: azure.mgmt.network.v2018_08_01.operations.ExpressRouteServiceProvidersOperations
:ivar express_route_cross_connections: ExpressRouteCrossConnectionsOperations operations
:vartype express_route_cross_connections: azure.mgmt.network.v2018_08_01.operations.ExpressRouteCrossConnectionsOperations
:ivar express_route_cross_connection_peerings: ExpressRouteCrossConnectionPeeringsOperations operations
:vartype express_route_cross_connection_peerings: azure.mgmt.network.v2018_08_01.operations.ExpressRouteCrossConnectionPeeringsOperations
:ivar express_route_gateways: ExpressRouteGatewaysOperations operations
:vartype express_route_gateways: azure.mgmt.network.v2018_08_01.operations.ExpressRouteGatewaysOperations
:ivar express_route_connections: ExpressRouteConnectionsOperations operations
:vartype express_route_connections: azure.mgmt.network.v2018_08_01.operations.ExpressRouteConnectionsOperations
:ivar express_route_ports_locations: ExpressRoutePortsLocationsOperations operations
:vartype express_route_ports_locations: azure.mgmt.network.v2018_08_01.operations.ExpressRoutePortsLocationsOperations
:ivar express_route_ports: ExpressRoutePortsOperations operations
:vartype express_route_ports: azure.mgmt.network.v2018_08_01.operations.ExpressRoutePortsOperations
:ivar express_route_links: ExpressRouteLinksOperations operations
:vartype express_route_links: azure.mgmt.network.v2018_08_01.operations.ExpressRouteLinksOperations
:ivar interface_endpoints: InterfaceEndpointsOperations operations
:vartype interface_endpoints: azure.mgmt.network.v2018_08_01.operations.InterfaceEndpointsOperations
:ivar load_balancers: LoadBalancersOperations operations
:vartype load_balancers: azure.mgmt.network.v2018_08_01.operations.LoadBalancersOperations
:ivar load_balancer_backend_address_pools: LoadBalancerBackendAddressPoolsOperations operations
:vartype load_balancer_backend_address_pools: azure.mgmt.network.v2018_08_01.operations.LoadBalancerBackendAddressPoolsOperations
:ivar load_balancer_frontend_ip_configurations: LoadBalancerFrontendIPConfigurationsOperations operations
:vartype load_balancer_frontend_ip_configurations: azure.mgmt.network.v2018_08_01.operations.LoadBalancerFrontendIPConfigurationsOperations
:ivar inbound_nat_rules: InboundNatRulesOperations operations
:vartype inbound_nat_rules: azure.mgmt.network.v2018_08_01.operations.InboundNatRulesOperations
:ivar load_balancer_load_balancing_rules: LoadBalancerLoadBalancingRulesOperations operations
:vartype load_balancer_load_balancing_rules: azure.mgmt.network.v2018_08_01.operations.LoadBalancerLoadBalancingRulesOperations
:ivar load_balancer_outbound_rules: LoadBalancerOutboundRulesOperations operations
:vartype load_balancer_outbound_rules: azure.mgmt.network.v2018_08_01.operations.LoadBalancerOutboundRulesOperations
:ivar load_balancer_network_interfaces: LoadBalancerNetworkInterfacesOperations operations
:vartype load_balancer_network_interfaces: azure.mgmt.network.v2018_08_01.operations.LoadBalancerNetworkInterfacesOperations
:ivar load_balancer_probes: LoadBalancerProbesOperations operations
:vartype load_balancer_probes: azure.mgmt.network.v2018_08_01.operations.LoadBalancerProbesOperations
:ivar network_interfaces: NetworkInterfacesOperations operations
:vartype network_interfaces: azure.mgmt.network.v2018_08_01.operations.NetworkInterfacesOperations
:ivar network_interface_ip_configurations: NetworkInterfaceIPConfigurationsOperations operations
:vartype network_interface_ip_configurations: azure.mgmt.network.v2018_08_01.operations.NetworkInterfaceIPConfigurationsOperations
:ivar network_interface_load_balancers: NetworkInterfaceLoadBalancersOperations operations
:vartype network_interface_load_balancers: azure.mgmt.network.v2018_08_01.operations.NetworkInterfaceLoadBalancersOperations
:ivar network_interface_tap_configurations: NetworkInterfaceTapConfigurationsOperations operations
:vartype network_interface_tap_configurations: azure.mgmt.network.v2018_08_01.operations.NetworkInterfaceTapConfigurationsOperations
:ivar network_profiles: NetworkProfilesOperations operations
:vartype network_profiles: azure.mgmt.network.v2018_08_01.operations.NetworkProfilesOperations
:ivar network_security_groups: NetworkSecurityGroupsOperations operations
:vartype network_security_groups: azure.mgmt.network.v2018_08_01.operations.NetworkSecurityGroupsOperations
:ivar security_rules: SecurityRulesOperations operations
:vartype security_rules: azure.mgmt.network.v2018_08_01.operations.SecurityRulesOperations
:ivar default_security_rules: DefaultSecurityRulesOperations operations
:vartype default_security_rules: azure.mgmt.network.v2018_08_01.operations.DefaultSecurityRulesOperations
:ivar network_watchers: NetworkWatchersOperations operations
:vartype network_watchers: azure.mgmt.network.v2018_08_01.operations.NetworkWatchersOperations
:ivar packet_captures: PacketCapturesOperations operations
:vartype packet_captures: azure.mgmt.network.v2018_08_01.operations.PacketCapturesOperations
:ivar connection_monitors: ConnectionMonitorsOperations operations
:vartype connection_monitors: azure.mgmt.network.v2018_08_01.operations.ConnectionMonitorsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.network.v2018_08_01.operations.Operations
:ivar public_ip_addresses: PublicIPAddressesOperations operations
:vartype public_ip_addresses: azure.mgmt.network.v2018_08_01.operations.PublicIPAddressesOperations
:ivar public_ip_prefixes: PublicIPPrefixesOperations operations
:vartype public_ip_prefixes: azure.mgmt.network.v2018_08_01.operations.PublicIPPrefixesOperations
:ivar route_filters: RouteFiltersOperations operations
:vartype route_filters: azure.mgmt.network.v2018_08_01.operations.RouteFiltersOperations
:ivar route_filter_rules: RouteFilterRulesOperations operations
:vartype route_filter_rules: azure.mgmt.network.v2018_08_01.operations.RouteFilterRulesOperations
:ivar route_tables: RouteTablesOperations operations
:vartype route_tables: azure.mgmt.network.v2018_08_01.operations.RouteTablesOperations
:ivar routes: RoutesOperations operations
:vartype routes: azure.mgmt.network.v2018_08_01.operations.RoutesOperations
:ivar bgp_service_communities: BgpServiceCommunitiesOperations operations
:vartype bgp_service_communities: azure.mgmt.network.v2018_08_01.operations.BgpServiceCommunitiesOperations
:ivar service_endpoint_policies: ServiceEndpointPoliciesOperations operations
:vartype service_endpoint_policies: azure.mgmt.network.v2018_08_01.operations.ServiceEndpointPoliciesOperations
:ivar service_endpoint_policy_definitions: ServiceEndpointPolicyDefinitionsOperations operations
:vartype service_endpoint_policy_definitions: azure.mgmt.network.v2018_08_01.operations.ServiceEndpointPolicyDefinitionsOperations
:ivar usages: UsagesOperations operations
:vartype usages: azure.mgmt.network.v2018_08_01.operations.UsagesOperations
:ivar virtual_networks: VirtualNetworksOperations operations
:vartype virtual_networks: azure.mgmt.network.v2018_08_01.operations.VirtualNetworksOperations
:ivar subnets: SubnetsOperations operations
:vartype subnets: azure.mgmt.network.v2018_08_01.operations.SubnetsOperations
:ivar virtual_network_peerings: VirtualNetworkPeeringsOperations operations
:vartype virtual_network_peerings: azure.mgmt.network.v2018_08_01.operations.VirtualNetworkPeeringsOperations
:ivar virtual_network_taps: VirtualNetworkTapsOperations operations
:vartype virtual_network_taps: azure.mgmt.network.v2018_08_01.operations.VirtualNetworkTapsOperations
:ivar virtual_network_gateways: VirtualNetworkGatewaysOperations operations
:vartype virtual_network_gateways: azure.mgmt.network.v2018_08_01.operations.VirtualNetworkGatewaysOperations
:ivar virtual_network_gateway_connections: VirtualNetworkGatewayConnectionsOperations operations
:vartype virtual_network_gateway_connections: azure.mgmt.network.v2018_08_01.operations.VirtualNetworkGatewayConnectionsOperations
:ivar local_network_gateways: LocalNetworkGatewaysOperations operations
:vartype local_network_gateways: azure.mgmt.network.v2018_08_01.operations.LocalNetworkGatewaysOperations
:ivar virtual_wans: VirtualWansOperations operations
:vartype virtual_wans: azure.mgmt.network.v2018_08_01.operations.VirtualWansOperations
:ivar vpn_sites: VpnSitesOperations operations
:vartype vpn_sites: azure.mgmt.network.v2018_08_01.operations.VpnSitesOperations
:ivar vpn_sites_configuration: VpnSitesConfigurationOperations operations
:vartype vpn_sites_configuration: azure.mgmt.network.v2018_08_01.operations.VpnSitesConfigurationOperations
:ivar virtual_hubs: VirtualHubsOperations operations
:vartype virtual_hubs: azure.mgmt.network.v2018_08_01.operations.VirtualHubsOperations
:ivar hub_virtual_network_connections: HubVirtualNetworkConnectionsOperations operations
:vartype hub_virtual_network_connections: azure.mgmt.network.v2018_08_01.operations.HubVirtualNetworkConnectionsOperations
:ivar vpn_gateways: VpnGatewaysOperations operations
:vartype vpn_gateways: azure.mgmt.network.v2018_08_01.operations.VpnGatewaysOperations
:ivar vpn_connections: VpnConnectionsOperations operations
:vartype vpn_connections: azure.mgmt.network.v2018_08_01.operations.VpnConnectionsOperations
:ivar p2_svpn_server_configurations: P2SVpnServerConfigurationsOperations operations
:vartype p2_svpn_server_configurations: azure.mgmt.network.v2018_08_01.operations.P2SVpnServerConfigurationsOperations
:ivar p2_svpn_gateways: P2SVpnGatewaysOperations operations
:vartype p2_svpn_gateways: azure.mgmt.network.v2018_08_01.operations.P2SVpnGatewaysOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The subscription credentials which uniquely identify the Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = NetworkManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.application_gateways = ApplicationGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.application_security_groups = ApplicationSecurityGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.available_delegations = AvailableDelegationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.available_resource_group_delegations = AvailableResourceGroupDelegationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.azure_firewalls = AzureFirewallsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.azure_firewall_fqdn_tags = AzureFirewallFqdnTagsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.ddos_protection_plans = DdosProtectionPlansOperations(
self._client, self._config, self._serialize, self._deserialize)
self.available_endpoint_services = AvailableEndpointServicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuit_authorizations = ExpressRouteCircuitAuthorizationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuit_peerings = ExpressRouteCircuitPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuit_connections = ExpressRouteCircuitConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuits = ExpressRouteCircuitsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_service_providers = ExpressRouteServiceProvidersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_cross_connections = ExpressRouteCrossConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_cross_connection_peerings = ExpressRouteCrossConnectionPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_gateways = ExpressRouteGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_connections = ExpressRouteConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_ports_locations = ExpressRoutePortsLocationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_ports = ExpressRoutePortsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_links = ExpressRouteLinksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.interface_endpoints = InterfaceEndpointsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancers = LoadBalancersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_backend_address_pools = LoadBalancerBackendAddressPoolsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_frontend_ip_configurations = LoadBalancerFrontendIPConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.inbound_nat_rules = InboundNatRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_load_balancing_rules = LoadBalancerLoadBalancingRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_outbound_rules = LoadBalancerOutboundRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_network_interfaces = LoadBalancerNetworkInterfacesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_probes = LoadBalancerProbesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_interfaces = NetworkInterfacesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_interface_ip_configurations = NetworkInterfaceIPConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_interface_load_balancers = NetworkInterfaceLoadBalancersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_interface_tap_configurations = NetworkInterfaceTapConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_profiles = NetworkProfilesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_security_groups = NetworkSecurityGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.security_rules = SecurityRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.default_security_rules = DefaultSecurityRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_watchers = NetworkWatchersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.packet_captures = PacketCapturesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.connection_monitors = ConnectionMonitorsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.public_ip_addresses = PublicIPAddressesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.public_ip_prefixes = PublicIPPrefixesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.route_filters = RouteFiltersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.route_filter_rules = RouteFilterRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.route_tables = RouteTablesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.routes = RoutesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.bgp_service_communities = BgpServiceCommunitiesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.service_endpoint_policies = ServiceEndpointPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.service_endpoint_policy_definitions = ServiceEndpointPolicyDefinitionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.usages = UsagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_networks = VirtualNetworksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.subnets = SubnetsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_peerings = VirtualNetworkPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_taps = VirtualNetworkTapsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_gateways = VirtualNetworkGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_gateway_connections = VirtualNetworkGatewayConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.local_network_gateways = LocalNetworkGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_wans = VirtualWansOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_sites = VpnSitesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_sites_configuration = VpnSitesConfigurationOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_hubs = VirtualHubsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.hub_virtual_network_connections = HubVirtualNetworkConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_gateways = VpnGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_connections = VpnConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.p2_svpn_server_configurations = P2SVpnServerConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.p2_svpn_gateways = P2SVpnGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, http_request, **kwargs):
# type: (HttpRequest, Any) -> HttpResponse
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.HttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> NetworkManagementClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_08_01/_network_management_client.py | Python | mit | 29,817 |
#!/usr/bin/env python
# Copyright (c) PLUMgrid, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
# This program implements a topology likes below:
# pem: physical endpoint manager, implemented as a bpf program
#
# vm1 <--------+ +----> bridge1 <----+
# V V V
# pem router
# ^ ^ ^
# vm2 <--------+ +----> bridge2 <----+
#
# The vm1, vm2 and router are implemented as namespaces.
# The linux bridge device is used to provice bridge functionality.
# pem bpf will be attached to related network devices for vm1, vm1, bridge1 and bridge2.
#
# vm1 and vm2 are in different subnet. For vm1 to communicate to vm2,
# the packet will have to travel from vm1 to pem, bridge1, router, bridge2, pem, and
# then come to vm2.
#
# When this test is run with verbose mode (ctest -R <test_name> -V),
# the following printout is observed on my local box:
#
# ......
# 9: PING 200.1.1.1 (200.1.1.1) 56(84) bytes of data.
# 9: 64 bytes from 200.1.1.1: icmp_req=1 ttl=63 time=0.090 ms
# 9: 64 bytes from 200.1.1.1: icmp_req=2 ttl=63 time=0.032 ms
# 9:
# 9: --- 200.1.1.1 ping statistics ---
# 9: 2 packets transmitted, 2 received, 0% packet loss, time 999ms
# 9: rtt min/avg/max/mdev = 0.032/0.061/0.090/0.029 ms
# 9: [ ID] Interval Transfer Bandwidth
# 9: [ 5] 0.0- 1.0 sec 3.80 GBytes 32.6 Gbits/sec
# 9: Starting netserver with host 'IN(6)ADDR_ANY' port '12865' and family AF_UNSPEC
# 9: MIGRATED TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 200.1.1.1 (200.1.1.1) port 0 AF_INET : demo
# 9: Recv Send Send
# 9: Socket Socket Message Elapsed
# 9: Size Size Size Time Throughput
# 9: bytes bytes bytes secs. 10^6bits/sec
# 9:
# 9: 87380 16384 65160 1.00 39940.46
# 9: MIGRATED TCP REQUEST/RESPONSE TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 200.1.1.1 (200.1.1.1) port 0 AF_INET : demo : first burst 0
# 9: Local /Remote
# 9: Socket Size Request Resp. Elapsed Trans.
# 9: Send Recv Size Size Time Rate
# 9: bytes Bytes bytes bytes secs. per sec
# 9:
# 9: 16384 87380 1 1 1.00 46387.80
# 9: 16384 87380
# 9: .
# 9: ----------------------------------------------------------------------
# 9: Ran 1 test in 7.495s
# 9:
# 9: OK
from ctypes import c_uint
from bcc import BPF
from pyroute2 import IPRoute, NetNS, IPDB, NSPopen
import sys
from time import sleep
from unittest import main, TestCase
import subprocess
from simulation import Simulation
arg1 = sys.argv.pop(1)
ipr = IPRoute()
ipdb = IPDB(nl=ipr)
sim = Simulation(ipdb)
allocated_interfaces = set(ipdb.interfaces.keys())
def get_next_iface(prefix):
i = 0
while True:
iface = "{0}{1}".format(prefix, i)
if iface not in allocated_interfaces:
allocated_interfaces.add(iface)
return iface
i += 1
class TestBPFSocket(TestCase):
def setup_br(self, br, veth_rt_2_br, veth_pem_2_br, veth_br_2_pem):
# create veth which connecting pem and br
with ipdb.create(ifname=veth_pem_2_br, kind="veth", peer=veth_br_2_pem) as v:
v.up()
ipdb.interfaces[veth_br_2_pem].up().commit()
subprocess.call(["sysctl", "-q", "-w", "net.ipv6.conf." + veth_pem_2_br + ".disable_ipv6=1"])
subprocess.call(["sysctl", "-q", "-w", "net.ipv6.conf." + veth_br_2_pem + ".disable_ipv6=1"])
# set up the bridge and add router interface as one of its slaves
with ipdb.create(ifname=br, kind="bridge") as br1:
br1.add_port(ipdb.interfaces[veth_pem_2_br])
br1.add_port(ipdb.interfaces[veth_rt_2_br])
br1.up()
subprocess.call(["sysctl", "-q", "-w", "net.ipv6.conf." + br + ".disable_ipv6=1"])
def set_default_const(self):
self.ns1 = "ns1"
self.ns2 = "ns2"
self.ns_router = "ns_router"
self.br1 = get_next_iface("br")
self.veth_pem_2_br1 = "v20"
self.veth_br1_2_pem = "v21"
self.br2 = get_next_iface("br")
self.veth_pem_2_br2 = "v22"
self.veth_br2_2_pem = "v23"
self.vm1_ip = "100.1.1.1"
self.vm2_ip = "200.1.1.1"
self.vm1_rtr_ip = "100.1.1.254"
self.vm2_rtr_ip = "200.1.1.254"
self.vm1_rtr_mask = "100.1.1.0/24"
self.vm2_rtr_mask = "200.1.1.0/24"
def attach_filter(self, ifname, fd, name):
ifindex = ipdb.interfaces[ifname].index
ipr.tc("add", "ingress", ifindex, "ffff:")
ipr.tc("add-filter", "bpf", ifindex, ":1", fd=fd, name=name,
parent="ffff:", action="drop", classid=1)
def config_maps(self):
# pem just relays packets between VM and its corresponding
# slave link in the bridge interface
ns1_ifindex = self.ns1_eth_out.index
ns2_ifindex = self.ns2_eth_out.index
br1_ifindex = ipdb.interfaces[self.veth_br1_2_pem].index
br2_ifindex = ipdb.interfaces[self.veth_br2_2_pem].index
self.pem_dest[c_uint(ns1_ifindex)] = c_uint(br1_ifindex)
self.pem_dest[c_uint(br1_ifindex)] = c_uint(ns1_ifindex)
self.pem_dest[c_uint(ns2_ifindex)] = c_uint(br2_ifindex)
self.pem_dest[c_uint(br2_ifindex)] = c_uint(ns2_ifindex)
# tc filter setup with bpf programs attached
self.attach_filter(self.veth_br1_2_pem, self.pem_fn.fd, self.pem_fn.name)
self.attach_filter(self.veth_br2_2_pem, self.pem_fn.fd, self.pem_fn.name)
def test_brb2(self):
try:
b = BPF(src_file=arg1, debug=0)
self.pem_fn = b.load_func("pem", BPF.SCHED_CLS)
self.pem_dest= b.get_table("pem_dest")
self.pem_stats = b.get_table("pem_stats")
# set up the topology
self.set_default_const()
(ns1_ipdb, self.ns1_eth_out, _) = sim._create_ns(self.ns1, ipaddr=self.vm1_ip+'/24',
fn=self.pem_fn, action='drop',
disable_ipv6=True)
(ns2_ipdb, self.ns2_eth_out, _) = sim._create_ns(self.ns2, ipaddr=self.vm2_ip+'/24',
fn=self.pem_fn, action='drop',
disable_ipv6=True)
ns1_ipdb.routes.add({'dst': self.vm2_rtr_mask, 'gateway': self.vm1_rtr_ip}).commit()
ns2_ipdb.routes.add({'dst': self.vm1_rtr_mask, 'gateway': self.vm2_rtr_ip}).commit()
(_, self.nsrtr_eth0_out, _) = sim._create_ns(self.ns_router, ipaddr=self.vm1_rtr_ip+'/24',
disable_ipv6=True)
(rt_ipdb, self.nsrtr_eth1_out, _) = sim._ns_add_ifc(self.ns_router, "eth1", "ns_router2",
ipaddr=self.vm2_rtr_ip+'/24',
disable_ipv6=True)
# enable ip forwarding in router ns
nsp = NSPopen(rt_ipdb.nl.netns, ["sysctl", "-w", "net.ipv4.ip_forward=1"])
nsp.wait(); nsp.release()
# for each VM connecting to pem, there will be a corresponding veth connecting to the bridge
self.setup_br(self.br1, self.nsrtr_eth0_out.ifname, self.veth_pem_2_br1, self.veth_br1_2_pem)
self.setup_br(self.br2, self.nsrtr_eth1_out.ifname, self.veth_pem_2_br2, self.veth_br2_2_pem)
# load the program and configure maps
self.config_maps()
# ping
nsp = NSPopen(ns1_ipdb.nl.netns, ["ping", self.vm2_ip, "-c", "2"]); nsp.wait(); nsp.release()
# one arp request/reply, 2 icmp request/reply per VM, total 6 packets per VM, 12 packets total
self.assertEqual(self.pem_stats[c_uint(0)].value, 12)
nsp_server = NSPopen(ns2_ipdb.nl.netns, ["iperf", "-s", "-xSC"])
sleep(1)
nsp = NSPopen(ns1_ipdb.nl.netns, ["iperf", "-c", self.vm2_ip, "-t", "1", "-xSC"])
nsp.wait(); nsp.release()
nsp_server.kill(); nsp_server.wait(); nsp_server.release()
nsp_server = NSPopen(ns2_ipdb.nl.netns, ["netserver", "-D"])
sleep(1)
nsp = NSPopen(ns1_ipdb.nl.netns, ["netperf", "-l", "1", "-H", self.vm2_ip, "--", "-m", "65160"])
nsp.wait(); nsp.release()
nsp = NSPopen(ns1_ipdb.nl.netns, ["netperf", "-l", "1", "-H", self.vm2_ip, "-t", "TCP_RR"])
nsp.wait(); nsp.release()
nsp_server.kill(); nsp_server.wait(); nsp_server.release()
finally:
if self.br1 in ipdb.interfaces: ipdb.interfaces[self.br1].remove().commit()
if self.br2 in ipdb.interfaces: ipdb.interfaces[self.br2].remove().commit()
if self.veth_pem_2_br1 in ipdb.interfaces: ipdb.interfaces[self.veth_pem_2_br1].remove().commit()
if self.veth_pem_2_br2 in ipdb.interfaces: ipdb.interfaces[self.veth_pem_2_br2].remove().commit()
sim.release()
ipdb.release()
if __name__ == "__main__":
main()
| romain-intel/bcc | tests/python/test_brb2.py | Python | apache-2.0 | 9,233 |
from girder_worker.utils import girder_job
from girder_worker.app import app
@girder_job(title='Fibonacci Job')
@app.task
def fibonacci(n, **kwargs):
if n == 1 or n == 2:
return 1
return fibonacci(n-1) + fibonacci(n-2)
@app.task(bind=True)
def fib_seq(self, n, **kwargs):
if n < 0:
raise Exception('Must pass in positive integer!')
for _n in range(1, n+1):
print('%s: %s' % (_n, fibonacci(_n)))
| girder/girder_worker | tests/integration/common_tasks/common_tasks/test_tasks/fib.py | Python | apache-2.0 | 440 |
# Glumol - An adventure game creator
# Copyright (C) 1998-2008 Sylvain Baubeau & Alexis Contour
# This file is part of Glumol.
# Glumol is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# Glumol is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Glumol. If not, see <http://www.gnu.org/licenses/>.
class Font:
font_face = ""
letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789&\"'(-_"
scale = (1.0, 1.0)
alpha = 1.0
color = (255, 255, 255, 255)
def __init__(self):
pass
def __init__(self, letters, filename, widths):
self.letters = letters
self.filename = filename
self.widths = widths
def draw(self):
pass
def get_size(self):
return None
| lebauce/artub | pypoujol/font.py | Python | gpl-2.0 | 1,217 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for reduce operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
def make_reduce_tests(reduce_op,
min_value=-10,
max_value=10,
boolean_tensor_only=False,
allow_fully_quantize=False):
"""Make a set of tests to do reduce operation.
Args:
reduce_op: TensorFlow reduce operation to test, i.e. `tf.reduce_mean`.
min_value: min value for created tensor data.
max_value: max value for created tensor data.
boolean_tensor_only: If true, will only generate tensor with boolean value.
allow_fully_quantize: bool, whether fully_quantize is allowed.
Returns:
a function representing the true generator with `reduce_op_in` curried.
"""
def f(options):
"""Actual function that generates examples."""
test_parameters = [
{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape": [[3, 3, 2, 4]],
"axis": [
0, 1, 2, [0, 1], [0, 2], [1, 2], [0, 1, 2], [1, 0], [2, 0],
[2, 1], [2, 1, 0], [2, 0, 1], -1, -2, -3, [1, -1], [0, -1],
[-1, 0], [-1, -2, -3], [0, 0, 0], [2, 2, 0], [1, 0, -3, -3]
],
"const_axis": [True, False],
"keepdims": [True, False],
"fully_quantize": [False],
},
{
"input_dtype": [tf.float32],
"input_shape": [[1, 8, 8, 3]],
"axis": [
0, 1, 2, 3, [1, 2], [0, 3], [1, 2, 3], [0, 1, 2,
3], [3, 2, 1, 0],
[3, 1, 0, 2], [2, 0], [3, 0], [3, 1], [1, 0], -1, -2, -3, -4,
[0, -2], [2, 3, -1, 0], [3, 1, 2, -3], [3, -4], [2, 2, 2],
[2, 2, 3], [-3, -3, -4], [-3, 2, 1]
],
"const_axis": [True, False],
"keepdims": [True, False],
"fully_quantize": [False],
},
{
"input_dtype": [tf.float32],
"input_shape": [[], [1, 8, 8, 3], [3, 2, 4]],
"axis": [[]], # shape is: [0]
"const_axis": [False],
"keepdims": [True, False],
"fully_quantize": [False],
},
{
"input_dtype": [tf.float32],
"input_shape": [[], [1, 8, 8, 3], [3, 2, 4]],
"axis": [None], # shape is: []
"const_axis": [True],
"keepdims": [True, False],
"fully_quantize": [False],
},
{
"input_dtype": [tf.float32],
"input_shape": [[3, 3, 2, 4]],
"axis": [
0, 1, 2, [0, 1], [0, 2], [1, 2], [0, 1, 2], [1, 0], [2, 0],
[2, 1], [2, 1, 0], [2, 0, 1], -1, -2, -3, [1, -1], [0, -1],
[-1, 0], [-1, -2, -3], [0, 0, 0], [2, 2, 0], [1, 0, -3, -3]
],
"const_axis": [True],
"keepdims": [True, False],
"fully_quantize": [True],
},
{
"input_dtype": [tf.float32],
"input_shape": [[1, 8, 8, 4], [1, 8, 8, 3]],
"axis": [
0, 1, 2, 3, [0], [1], [2], [3], [-1], [-2], [-3], [1, 2],
[0, 3], [1, 2, 3], [1, 3], [2, 3]
],
"const_axis": [True],
"keepdims": [True, False],
"fully_quantize": [True],
},
]
# test_parameters include fully_quantize option only when
# allow_fully_quantize is True.
if not allow_fully_quantize:
test_parameters = [
test_parameter for test_parameter in test_parameters
if True not in test_parameter["fully_quantize"]
]
def build_graph(parameters):
"""Build the mean op testing graph."""
dtype = parameters["input_dtype"]
if boolean_tensor_only:
dtype = tf.bool
input_tensor = tf.compat.v1.placeholder(
dtype=dtype, name="input", shape=parameters["input_shape"])
# Get axis as either a placeholder or constants.
if parameters["const_axis"]:
axis = parameters["axis"]
input_tensors = [input_tensor]
else:
if isinstance(parameters["axis"], list):
shape = [len(parameters["axis"])]
else:
shape = [] # shape for None or integers.
axis = tf.compat.v1.placeholder(
dtype=tf.int32, name="axis", shape=shape)
input_tensors = [input_tensor, axis]
out = reduce_op(input_tensor, axis=axis, keepdims=parameters["keepdims"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build the inputs for reduced operators."""
dtype = parameters["input_dtype"]
if boolean_tensor_only:
dtype = tf.bool
values = [
create_tensor_data(
dtype,
parameters["input_shape"],
min_value=min_value,
max_value=max_value)
]
if not parameters["const_axis"]:
values.append(np.array(parameters["axis"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
return f
@register_make_test_function()
def make_mean_tests(options):
"""Make a set of tests to do mean."""
return make_reduce_tests(
tf.reduce_mean,
min_value=-1,
max_value=1,
boolean_tensor_only=False,
allow_fully_quantize=True)(
options)
@register_make_test_function()
def make_sum_tests(options):
"""Make a set of tests to do sum."""
return make_reduce_tests(
tf.reduce_sum,
min_value=-1,
max_value=1,
boolean_tensor_only=False,
allow_fully_quantize=True)(
options)
@register_make_test_function()
def make_reduce_prod_tests(options):
"""Make a set of tests to do prod."""
# set min max value to be -2, 2 to avoid overflow.
return make_reduce_tests(tf.reduce_prod, -2, 2)(options)
@register_make_test_function()
def make_reduce_max_tests(options):
"""Make a set of tests to do max."""
return make_reduce_tests(tf.reduce_max)(options)
@register_make_test_function()
def make_reduce_min_tests(options):
"""Make a set of tests to do min."""
return make_reduce_tests(tf.reduce_min)(options)
@register_make_test_function()
def make_reduce_any_tests(options):
"""Make a set of tests to do any."""
return make_reduce_tests(tf.reduce_any, boolean_tensor_only=True)(options)
| gunan/tensorflow | tensorflow/lite/testing/op_tests/reduce.py | Python | apache-2.0 | 7,523 |
from Products.CMFCore.utils import getToolByName
def install(context):
site = context.getSite()
form = site['parasite-resources']['requisitionUpdate']['filarial-research-materials-parasite-division-requisition-form']
if not form.hasObject('order-forms'):
folder = form[form.invokeFactory('FormSaveData2ContentAdapter', 'order-forms')]
folder.setTitleField('order-date')
folder.setTitle('Parasite Order Forms')
folder.setActionAdapter(('order-forms','parasite_requisition','custommaileradapter'))
form.data_2_content_script()
catalog = getToolByName(site, 'portal_catalog')
catalog.manage_catalogRebuild()
folder = site['parasite-resources']['requisitionUpdate']
if not folder.hasObject('parasitesearchform'):
link = folder.invokeFactory('Link', 'parasitesearchform')
link = folder[link]
link.setTitle('Parasite Search Form')
link.setRemoteUrl('./order_search_form')
link.setLocation('./order_search_form')
| uwosh/uwosh.filariasis | uwosh/filariasis/importexport.py | Python | gpl-2.0 | 1,058 |
import faker
generator = faker.Factory.create()
def test_imports_bulk_get(api, given, verifier):
# preconditions
total = 10
file_ids = [generator.uuid4() for _ in range(total)]
_import = {
'id': generator.uuid4(),
'result': [
{'resource': {'id': _id, 'href': generator.url()}}
for _id in file_ids
]
}
given.drs_imports.can_be_retrieved_in_bulk(_import)
# action
response = api.drs_imports.bulk_get(_import['id'])
# verification
assert len(response.result) == total
verifier.drs_imports.bulk_retrieved(response.id)
def test_imports_bulk_submit(api, given, verifier):
# preconditions
total = 10
imports = [
{
"drs_uri": generator.name(),
"project": generator.name(),
"metadata": {
generator.name(): generator.name(),
generator.name(): generator.name()
},
"name": generator.name()
}
for _ in range(total)
]
tags = [generator.name()]
given.drs_imports.can_be_submitted_in_bulk(imports)
# action
response = api.drs_imports.bulk_submit(imports, tags)
# verification
assert len(response.result) == total
verifier.drs_imports.bulk_submitted()
| sbg/sevenbridges-python | tests/test_drs_import.py | Python | apache-2.0 | 1,299 |
"""
Base service for authenticating rinzler's requests
"""
from django.http.request import HttpRequest
class BaseAuthService(object):
"""
BaseAuthService
"""
auth_data: dict = {} # Your authenticated data goes here
def authenticate(self, request: HttpRequest, auth_route: str, actual_params: dict) -> bool:
"""
Your AuhtService should override this method for request authentication, otherwise means no authentication.
:param request: HttpRequest Django's HttpRequest object
:param auth_route: str User's resqueted route
:param actual_params: User's url parameters
:return: bool
"""
if auth_route and actual_params:
self.auth_data = {}
return True
| feliphebueno/Rinzler | rinzler/auth/base_auth_service.py | Python | mit | 756 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2021, Shuup Commerce Inc. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from django.db.models import Avg, Count, Sum
from django.utils.translation import ugettext_lazy as _
from shuup.core.models import Contact
from shuup.default_reports.forms import CustomerSalesReportForm
from shuup.default_reports.mixins import OrderReportMixin
from shuup.reports.report import ShuupReportBase
class CustomerSalesReport(OrderReportMixin, ShuupReportBase):
identifier = "customer_sales_report"
title = _("Customer Sales")
form_class = CustomerSalesReportForm
filename_template = "customer-sales-report-%(time)s"
schema = [
{"key": "customer", "title": _("Customer")},
{"key": "order_count", "title": _("Orders")},
{"key": "average_sales", "title": _("Average Sales")},
{"key": "taxless_total", "title": _("Taxless Total")},
{"key": "taxful_total", "title": _("Taxful Total")},
]
def get_objects(self):
return (
Contact.objects.filter(customer_orders__in=super(CustomerSalesReport, self).get_objects())
.annotate(
order_count=Count("customer_orders", distinct=True),
average_sales=Avg("customer_orders__taxful_total_price_value"),
taxless_total=Sum("customer_orders__taxless_total_price_value"),
taxful_total=Sum("customer_orders__taxful_total_price_value"),
)
.filter(order_count__gt=0)
.order_by("-%s" % self.options["order_by"])[: self.queryset_row_limit]
.values("name", "order_count", "average_sales", "taxless_total", "taxful_total")
)
def get_data(self):
data = []
for contact in self.get_objects():
data.append(
{
"customer": contact["name"],
"order_count": contact["order_count"],
"average_sales": self.shop.create_price(contact["average_sales"]),
"taxless_total": self.shop.create_price(contact["taxless_total"]),
"taxful_total": self.shop.create_price(contact["taxful_total"]),
}
)
return self.get_return_data(data)
| shoopio/shoop | shuup/default_reports/reports/customer_sales.py | Python | agpl-3.0 | 2,407 |
#!/usr/bin/
#-*- coding: iso-8859-1 -*-
#===============================================================================
# __________ ________________ __ _______
# / ____/ __ \/ ____/ ____/ __ )/ / / / ___/
# / /_ / /_/ / __/ / __/ / __ / / / /\__ \
# / __/ / _, _/ /___/ /___/ /_/ / /_/ /___/ /
# /_/ /_/ |_/_____/_____/_____/\____//____/
#
#Source File: FB_XMLDataModel.py
#Version: V0.1 , 22.01.2008
#Author: Jerome Leisner
#email: j.leisner@ing-automation.de
#===============================================================================
import xml.dom
#from xml.dom import dom
#from xml.dom.minidom import *
##general class for handling project data which are based on XML
class FB_XMLDataModel:
__LogObj = None
__DOMObj = None
##Constructor for an empty
#@param LogObj: Log-File-Object to log all events within this inctance
#@param projectname: Path and name of project
def __init__(self,LogObj, Document, projectname):
self.__LogObj = LogObj
#create an DOMObj
self.__DOMObj = Document
##return the current DOM-Object
def getDOMObj(self):
return self.__DOMObj
##sets a document
def setDOMObj(self,document):
self.__DOMObj = document
#remove data with the given ID
def removeData(self,ID):
Node=self.getDataRootNode(ID)
Parent = Node.ParentNode()
Parent.removeChild(Node)
#get the name of a Node
def getName(self,ID):
Node = self.getDataRootNode(ID)
return self.readDOMNodeValue(Node, "name" )
#set the name of Node
def setName(self,ID,Name):
Node = self.getDataRootNode(ID)
self.writeDOMNodeValue(Node, "name", Name)
#get comment
def getComment(self, ID):
Node = self.getDataRootNode(ID)
return self.readDOMNodeValue(Node, "comment")
#set comment
def setComment(self,ID,Comment):
Node = self.getDataRootNode(ID)
self.writeDOMNodeValue(Node,"comment", comment)
##Return the id list for all child nodes with namen name from the given stat node
#@param node the start node
#@param childname the child node name
#@return the id String collection
def getIDList(self,Node,ChildName):
if(Node <> None):
NodeList = Node.getElementsByTagName(ChildName)
IDList = []
for i in range(len(NodeList)):
Element = NodeList.item(i)
if(Element.hasAttribute("id")):
IDList.append(Element.getAttribute("id"))
return IDList
else:
self.__LogObj.NewLog("Error at getIDList, Parameter Node is None",1)
return None
##Create a new standard child data node. This node includes a name with id and a comment tag.
#@param tagname the child root tag name
#@return the DOM Element
def createChild(self,TagName):
Element = self.__DOMObj.createElement(TagName)
id = tagname+"-" + self.getNewID(TagName) # get id for the child
Element.setAttribute("id", id)
Element.appendChild(self.__DOMObj.createElement("name"))
Element.appendChild(self.__DOMObj.createElement("comment"))
return Element
##@return the child id for the given child node
def getChildID(self, ChildNode):
return ChildNode.getAttribute("id")
##Returns an ID number for a new child node
def getNewID(self, TagName):
NodeList = self.__DOMObj.getElementsByTagName(TagName)
# calculate maximum value
max=0
for i in range(len(NodeList)):
Element = NodeList.item(i)
if(Element.hasAttribute("id")):
idstr = Element.getAttribute("id")
prefix = TagName + "-"
number = idstr.replace(prefix, "")
if(int(number) > max):
max = int(number)
return max + 1 # return maximum + 1
##Returns the root node with the given id
def getDataRootNode(self,ID):
drNode = None
end = ID.find('-') #find first character "-"
NodeName = ""
if(end <> -1):
NodeName = ID[0: end]
else:
NodeName = ID
#print self.__DOMObj
NodeList = self.__DOMObj.getElementsByTagName(NodeName)
#print NodeList
if(len(NodeList) > 0):
for i in range(len(NodeList)):
Node = NodeList.item(i)
#attr = Node.getAttribute("id")
attr = Node.attributes
if(attr <> None):
idNode = None
idNode = attr.getNamedItem("id")
#print idNode.nodeValue
if(idNode <> None):
if(idNode.nodeValue == ID):
drNode = Node
break
return drNode
#Writes the node value for the node given in the StringTokenizer path. If no such node exists it will created.
#@param n path root node
#@param path path to the node
#@param value the value that will be written
def writeDOMNodeValue(self, Node, Path, Value):
self.writeNodeValue(Node, Path, Value)
self.__DOMObj.normalize()
#Recursively DFS method for writing node values. If no such nod exists the method creates them.
def writeNodeValue(self, Node, Path, Value):
#there are no entries
if(Node.childNodes == None):
print "look at writeNodeValue..."
#if(len(Node.childNodes) == 0):
# NewNode = Node.appendChild(self.__DOMObj.createElement(Path))
# print NewNode
#self.writeNodeValue(NewNode, Path, Value)
else:
NodeList = Node.childNodes
if(len(NodeList) > 0):
found = False
if(NodeList[0].nodeType == Node.ELEMENT_NODE):
for i in range(len(NodeList)):
#check if Node already exist
if(NodeList.item(i).nodeName == Path):
found = True
self.writeNodeValue(NodeList.item(i), Path, Value)
break
if(found == False):
if(Node.nodeType == Node.ELEMENT_NODE):
#print Node.parentNode
#Child = Node.documentElement.createElement(Path)
#NewNode = Node.appendChild(Child)
#self.writeNodeValue(NewNode, Path, Value)
pass
#Nodetyp TEXT -> found correct level
else:
Node.firstChild.data = unicode(Value,"ISO-8859-1")
return
#Len of NodeList = 0 -> writable Node doesnt exist
else:
if(Node.hasChildNodes == True):
if(Node.childNodes.item(0).nodeType == Node.TEXT_NODE):
Node.firstChild.data = unicode(Value,"ISO-8859-1")
return
else:
Node.appendChild(self.__DOMObj.createTextNode(unicode(Value,"ISO-8859-1")))
return
else:
Node.appendChild(self.__DOMObj.createTextNode(unicode(Value,"ISO-8859-1")))
return
##BFS method for reading node values
def readDOMNodeValue(self, Node, path):
#NodeList contains all childnodes within Parent Node "Node"
NodeList = Node.childNodes
if(len(NodeList) > 0):
cnt = 0
for cnt in range(len(NodeList)):
if(NodeList.item(cnt).nodeName == path):
#get NodeObject with given path
actualVisited = NodeList.item(cnt)
break
else:
return ""
#are there more subnodes of given path-node?
if(actualVisited.hasChildNodes()):
if(actualVisited.firstChild.nodeType == Node.TEXT_NODE):
Value = actualVisited.firstChild.data
return Value.encode("ISO-8859-1")
else:
return ""
return "" | Makki1/old-svn | avr/sketchbook/GiraRM_Debug/freebus/freebus_ets/software/freebus-ets/src/build/XML/FB_XMLDataModel.py | Python | gpl-3.0 | 8,588 |
"""Tests for the humidifier intents."""
from homeassistant.components.humidifier import (
ATTR_AVAILABLE_MODES,
ATTR_HUMIDITY,
DOMAIN,
SERVICE_SET_HUMIDITY,
SERVICE_SET_MODE,
intent,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_MODE,
ATTR_SUPPORTED_FEATURES,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.helpers.intent import IntentHandleError
from tests.common import async_mock_service
async def test_intent_set_humidity(hass):
"""Test the set humidity intent."""
hass.states.async_set(
"humidifier.bedroom_humidifier", STATE_ON, {ATTR_HUMIDITY: 40}
)
humidity_calls = async_mock_service(hass, DOMAIN, SERVICE_SET_HUMIDITY)
turn_on_calls = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)
await intent.async_setup_intents(hass)
result = await hass.helpers.intent.async_handle(
"test",
intent.INTENT_HUMIDITY,
{"name": {"value": "Bedroom humidifier"}, "humidity": {"value": "50"}},
)
await hass.async_block_till_done()
assert result.speech["plain"]["speech"] == "The bedroom humidifier is set to 50%"
assert len(turn_on_calls) == 0
assert len(humidity_calls) == 1
call = humidity_calls[0]
assert call.domain == DOMAIN
assert call.service == SERVICE_SET_HUMIDITY
assert call.data.get(ATTR_ENTITY_ID) == "humidifier.bedroom_humidifier"
assert call.data.get(ATTR_HUMIDITY) == 50
async def test_intent_set_humidity_and_turn_on(hass):
"""Test the set humidity intent for turned off humidifier."""
hass.states.async_set(
"humidifier.bedroom_humidifier", STATE_OFF, {ATTR_HUMIDITY: 40}
)
humidity_calls = async_mock_service(hass, DOMAIN, SERVICE_SET_HUMIDITY)
turn_on_calls = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)
await intent.async_setup_intents(hass)
result = await hass.helpers.intent.async_handle(
"test",
intent.INTENT_HUMIDITY,
{"name": {"value": "Bedroom humidifier"}, "humidity": {"value": "50"}},
)
await hass.async_block_till_done()
assert (
result.speech["plain"]["speech"]
== "Turned bedroom humidifier on and set humidity to 50%"
)
assert len(turn_on_calls) == 1
call = turn_on_calls[0]
assert call.domain == DOMAIN
assert call.service == SERVICE_TURN_ON
assert call.data.get(ATTR_ENTITY_ID) == "humidifier.bedroom_humidifier"
assert len(humidity_calls) == 1
call = humidity_calls[0]
assert call.domain == DOMAIN
assert call.service == SERVICE_SET_HUMIDITY
assert call.data.get(ATTR_ENTITY_ID) == "humidifier.bedroom_humidifier"
assert call.data.get(ATTR_HUMIDITY) == 50
async def test_intent_set_mode(hass):
"""Test the set mode intent."""
hass.states.async_set(
"humidifier.bedroom_humidifier",
STATE_ON,
{
ATTR_HUMIDITY: 40,
ATTR_SUPPORTED_FEATURES: 1,
ATTR_AVAILABLE_MODES: ["home", "away"],
ATTR_MODE: "home",
},
)
mode_calls = async_mock_service(hass, DOMAIN, SERVICE_SET_MODE)
turn_on_calls = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)
await intent.async_setup_intents(hass)
result = await hass.helpers.intent.async_handle(
"test",
intent.INTENT_MODE,
{"name": {"value": "Bedroom humidifier"}, "mode": {"value": "away"}},
)
await hass.async_block_till_done()
assert (
result.speech["plain"]["speech"]
== "The mode for bedroom humidifier is set to away"
)
assert len(turn_on_calls) == 0
assert len(mode_calls) == 1
call = mode_calls[0]
assert call.domain == DOMAIN
assert call.service == SERVICE_SET_MODE
assert call.data.get(ATTR_ENTITY_ID) == "humidifier.bedroom_humidifier"
assert call.data.get(ATTR_MODE) == "away"
async def test_intent_set_mode_and_turn_on(hass):
"""Test the set mode intent."""
hass.states.async_set(
"humidifier.bedroom_humidifier",
STATE_OFF,
{
ATTR_HUMIDITY: 40,
ATTR_SUPPORTED_FEATURES: 1,
ATTR_AVAILABLE_MODES: ["home", "away"],
ATTR_MODE: "home",
},
)
mode_calls = async_mock_service(hass, DOMAIN, SERVICE_SET_MODE)
turn_on_calls = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)
await intent.async_setup_intents(hass)
result = await hass.helpers.intent.async_handle(
"test",
intent.INTENT_MODE,
{"name": {"value": "Bedroom humidifier"}, "mode": {"value": "away"}},
)
await hass.async_block_till_done()
assert (
result.speech["plain"]["speech"]
== "Turned bedroom humidifier on and set away mode"
)
assert len(turn_on_calls) == 1
call = turn_on_calls[0]
assert call.domain == DOMAIN
assert call.service == SERVICE_TURN_ON
assert call.data.get(ATTR_ENTITY_ID) == "humidifier.bedroom_humidifier"
assert len(mode_calls) == 1
call = mode_calls[0]
assert call.domain == DOMAIN
assert call.service == SERVICE_SET_MODE
assert call.data.get(ATTR_ENTITY_ID) == "humidifier.bedroom_humidifier"
assert call.data.get(ATTR_MODE) == "away"
async def test_intent_set_mode_tests_feature(hass):
"""Test the set mode intent where modes are not supported."""
hass.states.async_set(
"humidifier.bedroom_humidifier", STATE_ON, {ATTR_HUMIDITY: 40}
)
mode_calls = async_mock_service(hass, DOMAIN, SERVICE_SET_MODE)
await intent.async_setup_intents(hass)
try:
await hass.helpers.intent.async_handle(
"test",
intent.INTENT_MODE,
{"name": {"value": "Bedroom humidifier"}, "mode": {"value": "away"}},
)
assert False, "handling intent should have raised"
except IntentHandleError as err:
assert str(err) == "Entity bedroom humidifier does not support modes"
assert len(mode_calls) == 0
async def test_intent_set_unknown_mode(hass):
"""Test the set mode intent for unsupported mode."""
hass.states.async_set(
"humidifier.bedroom_humidifier",
STATE_ON,
{
ATTR_HUMIDITY: 40,
ATTR_SUPPORTED_FEATURES: 1,
ATTR_AVAILABLE_MODES: ["home", "away"],
ATTR_MODE: "home",
},
)
mode_calls = async_mock_service(hass, DOMAIN, SERVICE_SET_MODE)
await intent.async_setup_intents(hass)
try:
await hass.helpers.intent.async_handle(
"test",
intent.INTENT_MODE,
{"name": {"value": "Bedroom humidifier"}, "mode": {"value": "eco"}},
)
assert False, "handling intent should have raised"
except IntentHandleError as err:
assert str(err) == "Entity bedroom humidifier does not support eco mode"
assert len(mode_calls) == 0
| jawilson/home-assistant | tests/components/humidifier/test_intent.py | Python | apache-2.0 | 6,859 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Blackbox Hardware Driver',
'category': 'Point of Sale',
'sequence': 6,
'summary': 'Hardware Driver for Belgian Fiscal Data Modules',
'website': 'https://www.odoo.com/page/point-of-sale',
'description': """
Fiscal Data Module Hardware Driver
==================================
This module allows a Point Of Sale client to communicate with a
connected Belgian Fiscal Data Module.
This module does **not** turn an Odoo Point Of Sale module into a certified
Belgian cash register. It allows the communication on with a certified Fiscal
Data Module but will not modify the behaviour of the Point of Sale.
""",
'depends': ['hw_proxy'],
'external_dependencies': {'python': ['serial']},
}
| chienlieu2017/it_management | odoo/addons/hw_blackbox_be/__manifest__.py | Python | gpl-3.0 | 826 |
#pylint: disable=E1101,W0703,W0212,W0232, R0914,R0915,W0105
"""
Package.py
Class to handle the downloading of the package
"""
import urlparse
import re
import json
import os
import fnmatch
from agent.lib.utils import md5sum
from agent.lib.errors import Errors, PackagePathError, PackageScriptNotFound
from agent.lib.errors import AgentException
import logging
import pylons
from agent.lib.agent_thread.exec_thread import ExecThread
from agent.lib.contextutils import copycontexts
from agent.lib import utils, configutil
LOG = logging.getLogger("gc")
class PackageControl:
'''
abstraction for package that allows interaction with the package
'''
SCRIPT_PATH = os.path.join('cronus', 'scripts')
def __init__(self, packagePath, threadMgr):
'''
constructor
@param packagePath: path to package
@param threadMgr: thread manager to execute package scripts
'''
if not os.path.isdir(packagePath):
raise PackagePathError("package path '%s' not found" % packagePath)
self.__packagePath = packagePath
self.__threadMgr = threadMgr
self.__userName = configutil.getAppUser()
def __scriptPath(self, script):
'''
@param script: script name
@return: path to script
'''
return os.path.join(self.__packagePath, PackageControl.SCRIPT_PATH, script)
def hasScript(self, script):
'''
@param script: script name
@return: True iff packagePath/cronus/scripts/script exists and is a file
'''
return os.path.isfile(self.__scriptPath(script))
def runScript(self, script, timeout, progressTimeout):
'''
@param script: script name
@param timeout: total script timeout
@param progressTimeout: progress timeout
@return: ExecThread instance
@throws PackageScriptNotFound: if script does not exist
'''
if not self.hasScript(script):
raise PackageScriptNotFound('missing package script: ' + self.__scriptPath(script))
cmd = utils.sudoCmd([], self.__userName) if self.__userName else []
cmd.append(self.__scriptPath(script))
execThread = ExecThread(self.__threadMgr, cmd)
execThread.setTimeout(timeout)
execThread.setProgressTimeout(progressTimeout)
copycontexts(self, execThread, ['guid', 'service'])
execThread.start()
return execThread
class PackageUtil:
''' Util functions '''
nameRe = re.compile('((([a-zA-Z0-9_]+)-(([0-9]+)\.([0-9]+)\.([a-zA-Z0-9_]+)\.([a-zA-Z0-9_]+)))\.cronus)')
inProgressExt = '.inprogress'
@staticmethod
def validateDownload(obj, *args, **kwargs):
""" used by urlgrabber to check the files """
if (obj.filename.rpartition('.')[2] == 'prop'):
if (PackageUtil.validateProp(obj.filename) == False):
raise AgentException(Errors.INVALID_PACKAGE, 'Package prop (%s) not valid' % obj.filename)
return
# figure the prop file name from the filename
if (obj.filename.rpartition('.')[2] == 'inprogress'):
propFilename = obj.filename.rpartition('.')[0] + '.prop'
else:
propFilename = obj.filename + '.prop'
if (PackageUtil.validatePackage(obj.filename, propFilename) == False):
raise AgentException(Errors.INVALID_PACKAGE, 'Package (%s) not valid' % obj.filename)
@staticmethod
def validateProp(filename):
""" checks that the properties file is correct
return a boolean"""
# does the file exists
if (not os.path.exists(filename)):
LOG.warning('Prop file (%s) does not exist' % (filename))
return False
# can I read it
try:
propFile = open(filename, 'r')
prop = json.load(propFile)
propFile.close()
except (ValueError, OSError):
LOG.warning('Prop file (%s) unable to read or did not parse' % (filename))
return False
# does the prop have the correct value
for key in ('name', 'md5', 'description', 'size', 'contact'):
if (key not in prop):
LOG.warning('Prop file (%s) missing key (%s)' % (filename, key))
return False
return True
@staticmethod
def validatePackage(filename, propFilename = None):
""" validate the package
returns True or False """
if (propFilename == None):
propFilename = filename + '.prop'
if (not PackageUtil.validateProp(propFilename)):
return False
try:
# check that the file exists
if (not os.path.exists(filename)):
LOG.warning('Package (%s) does not exists' % (filename))
return False
# load in the prop file
propFile = open(propFilename, 'r')
prop = json.load(propFile)
propFile.close()
size = os.path.getsize(filename)
if (size != int(prop['size'])):
LOG.warning('package size = %s : %s' % (str(size), str(prop['size'])))
return False
md5Sum = md5sum(filename)
propmd5 = prop['md5']
if (md5Sum != propmd5):
LOG.warning('package md5 = %s : %s' % (md5Sum, prop['md5']))
return False
# make sure the tar file has the expected structure
# TPY to do after we fix the cronus-deploy
except Exception, excep:
LOG.error('validatePackage exception %s' % excep)
return False
return True
@staticmethod
def getPackageKey(packageUri):
""" parse uri and get uniquely identifier of the package """
uriDict = PackageUtil.parseUri(packageUri)
return uriDict['package']
@staticmethod
def getPackageVersion(packageUri):
""" parse uri and get version number """
uriDict = PackageUtil.parseUri(packageUri)
return uriDict['packageVersion']
@staticmethod
def parseUri(packageUri, path = None, packageloc = None):
""" static method to parse the uri
@throws - AgentException
"""
if (path == None):
from agent.lib.packagemgr import PackageMgr
path = PackageMgr.packagePath()
uri = urlparse.urlparse(packageUri)
if (uri.scheme != 'http'):
raise AgentException(Errors.PACKAGE_SCHEME_ERROR,
'uri (%s) scheme(%s) not supported' % (packageUri, uri.scheme))
if (uri.path == ''):
raise AgentException(Errors.PACKAGE_PATH_ERROR,
'uri (%s) path is empty' % (packageUri))
# now parse the path. get the name and then verify that it matches the convention
if packageloc is not None:
packName = packageloc
else:
packName = uri.path.rpartition('/')[2]
match = PackageUtil.nameRe.match(packName)
if (match == None or match.group(0) != packName):
raise AgentException(Errors.PACKAGE_PATH_ERROR,
'cannot find package name in path %s' % (uri.path))
# ok now we can fill out the dictionary
uriDict = {}
uriDict['uri'] = packageUri
uriDict['scheme'] = uri.scheme
uriDict['uripath'] = uri.path
uriDict['package'] = match.group(1)
uriDict['packageNameVersion'] = match.group(2)
uriDict['inProgressPackage'] = PackageUtil.inProgressPath(uriDict['package'])
uriDict['packageName'] = match.group(3)
uriDict['propName'] = uriDict['package'] + '.prop'
uriDict['packageVersion'] = match.group(4)
uriDict['packageVersionMajor'] = match.group(5)
uriDict['packageVersionMinor'] = match.group(6)
uriDict['packageVersionBuild'] = match.group(7)
uriDict['packagePlat'] = match.group(8)
# path specific attributes
uriDict['packagePath'] = os.path.join(path, uriDict['package'])
uriDict['inProgressPackagePath'] = os.path.join(path, uriDict['inProgressPackage'])
uriDict['propPath'] = os.path.join(path, uriDict['propName'])
# calculate prop url
# append path with .prop - mons: leaving the below code in place in case we support other protocols in future
uriScheme = uri.scheme
if (uriScheme != "http"): #only use http to download .prop and .torrent files
uriScheme = "http"
uripath = uri.path
propParseResult = urlparse.ParseResult(uriScheme, uri.netloc, uripath + '.prop',
uri.params, uri.query, uri.fragment)
propUri = urlparse.urlunparse(propParseResult)
uriDict['propUri'] = propUri
return uriDict
@staticmethod
def inProgressPath(packagePath):
""" return the path to the inprogress name """
return packagePath + PackageUtil.inProgressExt
@staticmethod
def isInProgressPath(packagePath):
""" return whether a package in in progress or not """
return packagePath.endswith(PackageUtil.inProgressExt)
@staticmethod
def cleanUpPackage(inProgressFilename, packageFilename, propFilename):
""" attempt to remove all traces of this package """
try:
for filename in (inProgressFilename, packageFilename, propFilename):
if (filename is not None and os.path.exists(filename)):
os.remove(filename)
except OSError, osErr :
LOG.error('Unable to cleanup Package (%s)' % osErr)
@staticmethod
def getAllInstalledPackages(installedPkgPath):
"""returns the list of paths of all the packages in installed-packages folder
expanding till the version level. e.g. pkgA-0.6.0.unix.cronus will have a
pkgA and 0.6.0.unix as its child. This method returns paths till the version
as a pkgA can have many versions as its children.
"""
allPkgVers = []
if os.path.exists(installedPkgPath):
for pkg in os.listdir(installedPkgPath):
pkgVersions = os.listdir(os.path.join(installedPkgPath, pkg))
for pkgVersion in pkgVersions:
pkgPath = os.path.join(installedPkgPath, pkg)
if not fnmatch.fnmatch(pkgVersion, '*.inprogress'):
allPkgVers.append(os.path.join(pkgPath, pkgVersion))
return allPkgVers
@staticmethod
def cleanupInstalledPkgs(installedPkgPath, orphanPkgs):
'''removes folders under installed-packages which does not have
any manifest reference'''
from agent.lib.agent_thread.service_delete import ServiceDelete
import time
#import pdb; pdb.set_trace()
for pkg in orphanPkgs:
if (os.path.exists(pkg) and (time.time() > (os.path.getctime(pkg) + float(pylons.config['packageMgr_install_package_min_age'])))):
parentPkg = os.path.dirname(pkg)
try :
LOG.info('Garbage collecting folder contents of package %s' % pkg)
ServiceDelete.deleteFolderContents(pkg)
if os.listdir(parentPkg).__len__() <= 0:
ServiceDelete.deleteFolderContents(parentPkg)
LOG.info('attempting to delete folder contents of package %s' % parentPkg)
except Exception as ex:
LOG.error('Unable to garbage collect %s - %s' % (pkg, ex))
LOG.info('Completed cleanup Installed pkg %s' % pkg)
@staticmethod
def cleanupPackages(orphanpkgs):
'''removes folders under installed-packages which does not have
any manifest reference. Age is not a factor for cleanup. All orphans are cleaned-up
Need to check for packages of interest(packages for which create is going on etc.
'''
from agent.lib.agent_thread.service_delete import ServiceDelete
import time
for pkg in orphanpkgs:
LOG.debug('attempting to cleanup Installed pkg %s' % pkg)
if (os.path.exists(pkg) and (time.time() > (os.path.getctime(pkg) + float(pylons.config['packageMgr_install_package_min_age'])))):
parentPkg = os.path.dirname(pkg)
try :
LOG.debug('attempting to delete folder contents of package %s' % pkg)
ServiceDelete.deleteFolderContents(pkg)
if os.listdir(parentPkg).__len__() <= 0:
ServiceDelete.deleteFolderContents(parentPkg)
LOG.debug('attempting to delete folder contents of package %s' % parentPkg)
except Exception as ex:
LOG.error('Unable to garbage collect %s - %s' % (pkg, ex))
| cronuspaas/cronusagent | agent/agent/lib/package.py | Python | apache-2.0 | 12,966 |
"""
WSGI config for iam_manager project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "iam_manager.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| MicroPyramid/IAM-Manager | iam_manager/wsgi.py | Python | mit | 490 |
#!/usr/bin/env python3
# Copyright 2016 The Fontbakery Authors
# Copyright 2017 The Google Font Tools Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Generate a test html snippet for a family hosted on fonts.google.com
This script works well for quickly producing test cases using jsbin.
$ fontbakery family-html-snippet "Exo" "Hello World"
>>> ...
<html>
<head>
<link href="https://fonts.googleapis.com/css?family=Exo"/rel="stylesheet">
<style>
.g{font-family: 'Exo'; font-weight:400;}
.h{font-family: 'Exo'; font-weight:400; font-style: italic;}
.m{font-family: 'Exo'; font-weight:700;}
.n{font-family: 'Exo'; font-weight:700; font-style: italic;}
</style>
</head>
<body>
<p class='g'>Hello World</p>
<p class='h'>Hello World</p>
<p class='m'>Hello World</p>
<p class='n'>Hello World</p>
</body>
</html>
"""
from __future__ import print_function
import json
import requests
import sys
from argparse import (ArgumentParser,
RawTextHelpFormatter)
GF_API = "https://www.googleapis.com/webfonts/v1/webfonts?key={}"
GF_API_WEIGHT_TO_CSS_WEIGHT = {
"100": "100",
"100italic": "100i",
"200": "200",
"200italic": "200i",
"300": "300",
"300italic": "300i",
"regular": "400",
"italic": "400i",
"500": "500",
"500italic": "500i",
"600": "600",
"600italic": "600i",
"700": "700",
"700italic": "700i",
"800": "800",
"800italic": "800i",
"900": "900",
"900italic": "900i"
}
API_TO_CSS_STYLE_NAME = {
"100": "a",
"100i": "b",
"200": "c",
"200i": "d",
"300": "e",
"300i": "f",
"400": "g",
"400i": "h",
"500": "i",
"500i": "j",
"600": "k",
"600i": "l",
"700": "m",
"700i": "n",
"800": "o",
"800i": "p",
"900": "q",
"900i": "r",
}
def get_gf_family(family, api_key):
"""Get data of the given family hosted on Google Fonts"""
request = requests.get(GF_API.format(api_key))
try:
response = json.loads(request.text)
if "error" in response:
if response["error"]["errors"][0]["reason"] == "keyInvalid":
sys.exit(("The Google Fonts API key '{}'"
" was rejected as being invalid !").format(api_key))
else:
sys.exit(("There were errors in the"
" Google Fonts API request:"
" {}").format(response["error"]))
else:
gf_families = response
except (ValueError, KeyError):
sys.exit("Unable to load and parse data from Google Web Fonts API.")
for item in gf_families['items']:
if family == item['family']:
return item
return False
def get_family_styles(gf_family):
"""Get all the styles of a family"""
styles = []
if gf_family:
for var in gf_family['variants']:
styles.append((GF_API_WEIGHT_TO_CSS_WEIGHT[var]))
return styles
def get_family_subsets(family_subsets, gf_family):
"""Get all the valid subsets from the given family"""
valid_subsets = []
if family_subsets:
for subset in family_subsets:
if subset in gf_family['subsets']:
valid_subsets.append(subset)
return valid_subsets
def gen_head_webfonts(family, styles, subsets=None):
"""Gen the html snippet to load fonts"""
server = '"https://fonts.googleapis.com/css?family='
if subsets:
return '<link href=%s%s:%s&subset=%s" /rel="stylesheet">' % (
server, family.replace(' ', '+'), ','.join(styles), ','.join(subsets)
)
return '<link href=%s%s:%s" /rel="stylesheet">' % (
server, family.replace(' ', '+'), ','.join(styles)
)
def gen_css_styles(family, styles):
css = []
for style in styles:
if style.endswith('i'):
css.append((".%s{font-family: '%s'; "
"font-weight:%s; "
"font-style: italic;}" % (
API_TO_CSS_STYLE_NAME[style],
family,
style[:-1])
))
else:
css.append((".%s{font-family: '%s'; "
"font-weight:%s;}" % (
API_TO_CSS_STYLE_NAME[style],
family,
style)
))
return css
def gen_body_text(styles, sample_text):
html = []
for style in styles:
html.append("<p class='%s'>%s</p>" % (
API_TO_CSS_STYLE_NAME[style],
sample_text)
)
return html
def main():
parser = ArgumentParser(description=__doc__,
formatter_class=RawTextHelpFormatter)
parser.add_argument('key',
help='Key from Google Fonts Developer API')
parser.add_argument('family',
help='family name on fonts.google.com')
parser.add_argument('sample_text',
help='sample text used for each font')
parser.add_argument('--subsets', nargs='+',
help='family subset(s) seperated by a space')
args = parser.parse_args()
gf_family = get_gf_family(args.family, args.key)
family_styles = get_family_styles(gf_family)
family_subsets = get_family_subsets(args.subsets, gf_family)
if family_subsets:
head_fonts = gen_head_webfonts(args.family, family_styles, family_subsets)
else:
head_fonts = gen_head_webfonts(args.family, family_styles)
css_styles = gen_css_styles(args.family, family_styles)
body_text = gen_body_text(family_styles, args.sample_text)
html = """
<html>
<head>
%s
<style>
%s
</style>
</head>
<body>
%s
</body>
</html>""" % (
head_fonts,
'\n '.join(css_styles),
'\n '.join(body_text)
)
print(html)
if __name__ == '__main__':
main()
| googlefonts/gftools | bin/gftools-family-html-snippet.py | Python | apache-2.0 | 6,090 |
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from numpy.testing import assert_allclose
import pytest
import jax
from jax import jit, random, value_and_grad
import jax.numpy as jnp
from jax.test_util import check_close
from numpyro.util import _versiontuple
if _versiontuple(jax.__version__) >= (0, 2, 25):
from jax.example_libraries import optimizers
else:
from jax.experimental import optimizers
import numpyro
from numpyro import optim
import numpyro.distributions as dist
from numpyro.distributions import constraints
from numpyro.distributions.transforms import AffineTransform, SigmoidTransform
from numpyro.handlers import substitute
from numpyro.infer import (
SVI,
RenyiELBO,
Trace_ELBO,
TraceGraph_ELBO,
TraceMeanField_ELBO,
)
from numpyro.primitives import mutable as numpyro_mutable
from numpyro.util import fori_loop
@pytest.mark.parametrize("alpha", [0.0, 2.0])
def test_renyi_elbo(alpha):
def model(x):
numpyro.sample("obs", dist.Normal(0, 1), obs=x)
def guide(x):
pass
def elbo_loss_fn(x):
return Trace_ELBO().loss(random.PRNGKey(0), {}, model, guide, x)
def renyi_loss_fn(x):
return RenyiELBO(alpha=alpha, num_particles=10).loss(
random.PRNGKey(0), {}, model, guide, x
)
elbo_loss, elbo_grad = value_and_grad(elbo_loss_fn)(2.0)
renyi_loss, renyi_grad = value_and_grad(renyi_loss_fn)(2.0)
assert_allclose(elbo_loss, renyi_loss, rtol=1e-6)
assert_allclose(elbo_grad, renyi_grad, rtol=1e-6)
@pytest.mark.parametrize("elbo", [Trace_ELBO(), RenyiELBO(num_particles=10)])
@pytest.mark.parametrize("optimizer", [optim.Adam(0.05), optimizers.adam(0.05)])
def test_beta_bernoulli(elbo, optimizer):
data = jnp.array([1.0] * 8 + [0.0] * 2)
def model(data):
f = numpyro.sample("beta", dist.Beta(1.0, 1.0))
with numpyro.plate("N", len(data)):
numpyro.sample("obs", dist.Bernoulli(f), obs=data)
def guide(data):
alpha_q = numpyro.param("alpha_q", 1.0, constraint=constraints.positive)
beta_q = numpyro.param("beta_q", 1.0, constraint=constraints.positive)
numpyro.sample("beta", dist.Beta(alpha_q, beta_q))
svi = SVI(model, guide, optimizer, elbo)
svi_state = svi.init(random.PRNGKey(1), data)
assert_allclose(svi.optim.get_params(svi_state.optim_state)["alpha_q"], 0.0)
def body_fn(i, val):
svi_state, _ = svi.update(val, data)
return svi_state
svi_state = fori_loop(0, 2000, body_fn, svi_state)
params = svi.get_params(svi_state)
assert_allclose(
params["alpha_q"] / (params["alpha_q"] + params["beta_q"]),
0.8,
atol=0.05,
rtol=0.05,
)
@pytest.mark.parametrize("progress_bar", [True, False])
def test_run(progress_bar):
data = jnp.array([1.0] * 8 + [0.0] * 2)
def model(data):
f = numpyro.sample("beta", dist.Beta(1.0, 1.0))
with numpyro.plate("N", len(data)):
numpyro.sample("obs", dist.Bernoulli(f), obs=data)
def guide(data):
alpha_q = numpyro.param(
"alpha_q", lambda key: random.normal(key), constraint=constraints.positive
)
beta_q = numpyro.param(
"beta_q",
lambda key: random.exponential(key),
constraint=constraints.positive,
)
numpyro.sample("beta", dist.Beta(alpha_q, beta_q))
svi = SVI(model, guide, optim.Adam(0.05), Trace_ELBO())
svi_result = svi.run(random.PRNGKey(1), 1000, data, progress_bar=progress_bar)
params, losses = svi_result.params, svi_result.losses
assert losses.shape == (1000,)
assert_allclose(
params["alpha_q"] / (params["alpha_q"] + params["beta_q"]),
0.8,
atol=0.05,
rtol=0.05,
)
def test_jitted_update_fn():
data = jnp.array([1.0] * 8 + [0.0] * 2)
def model(data):
f = numpyro.sample("beta", dist.Beta(1.0, 1.0))
with numpyro.plate("N", len(data)):
numpyro.sample("obs", dist.Bernoulli(f), obs=data)
def guide(data):
alpha_q = numpyro.param("alpha_q", 1.0, constraint=constraints.positive)
beta_q = numpyro.param("beta_q", 1.0, constraint=constraints.positive)
numpyro.sample("beta", dist.Beta(alpha_q, beta_q))
adam = optim.Adam(0.05)
svi = SVI(model, guide, adam, Trace_ELBO())
svi_state = svi.init(random.PRNGKey(1), data)
expected = svi.get_params(svi.update(svi_state, data)[0])
actual = svi.get_params(jit(svi.update)(svi_state, data=data)[0])
check_close(actual, expected, atol=1e-5)
def test_param():
# this test the validity of model/guide sites having
# param constraints contain composed transformed
rng_keys = random.split(random.PRNGKey(0), 5)
a_minval = 1
c_minval = -2
c_maxval = -1
a_init = jnp.exp(random.normal(rng_keys[0])) + a_minval
b_init = jnp.exp(random.normal(rng_keys[1]))
c_init = random.uniform(rng_keys[2], minval=c_minval, maxval=c_maxval)
d_init = random.uniform(rng_keys[3])
obs = random.normal(rng_keys[4])
def model():
a = numpyro.param("a", a_init, constraint=constraints.greater_than(a_minval))
b = numpyro.param("b", b_init, constraint=constraints.positive)
numpyro.sample("x", dist.Normal(a, b), obs=obs)
def guide():
c = numpyro.param(
"c", c_init, constraint=constraints.interval(c_minval, c_maxval)
)
d = numpyro.param("d", d_init, constraint=constraints.unit_interval)
numpyro.sample("y", dist.Normal(c, d), obs=obs)
adam = optim.Adam(0.01)
svi = SVI(model, guide, adam, Trace_ELBO())
svi_state = svi.init(random.PRNGKey(0))
params = svi.get_params(svi_state)
assert_allclose(params["a"], a_init)
assert_allclose(params["b"], b_init)
assert_allclose(params["c"], c_init)
assert_allclose(params["d"], d_init)
actual_loss = svi.evaluate(svi_state)
assert jnp.isfinite(actual_loss)
expected_loss = dist.Normal(c_init, d_init).log_prob(obs) - dist.Normal(
a_init, b_init
).log_prob(obs)
# not so precisely because we do transform / inverse transform stuffs
assert_allclose(actual_loss, expected_loss, rtol=1e-6)
def test_elbo_dynamic_support():
x_prior = dist.TransformedDistribution(
dist.Normal(),
[AffineTransform(0, 2), SigmoidTransform(), AffineTransform(0, 3)],
)
x_guide = dist.Uniform(0, 3)
def model():
numpyro.sample("x", x_prior)
def guide():
numpyro.sample("x", x_guide)
adam = optim.Adam(0.01)
x = 2.0
guide = substitute(guide, data={"x": x})
svi = SVI(model, guide, adam, Trace_ELBO())
svi_state = svi.init(random.PRNGKey(0))
actual_loss = svi.evaluate(svi_state)
assert jnp.isfinite(actual_loss)
expected_loss = x_guide.log_prob(x) - x_prior.log_prob(x)
assert_allclose(actual_loss, expected_loss)
@pytest.mark.parametrize("num_steps", [10, 30, 50])
def test_run_with_small_num_steps(num_steps):
def model():
pass
def guide():
pass
svi = SVI(model, guide, optim.Adam(1), Trace_ELBO())
svi.run(random.PRNGKey(0), num_steps)
@pytest.mark.parametrize("stable_run", [True, False])
def test_stable_run(stable_run):
def model():
var = numpyro.sample("var", dist.Exponential(1))
numpyro.sample("obs", dist.Normal(0, jnp.sqrt(var)), obs=0.0)
def guide():
loc = numpyro.param("loc", 0.0)
numpyro.sample("var", dist.Normal(loc, 10))
svi = SVI(model, guide, optim.Adam(1), Trace_ELBO())
svi_result = svi.run(random.PRNGKey(0), 1000, stable_update=stable_run)
assert jnp.isfinite(svi_result.params["loc"]) == stable_run
def test_svi_discrete_latent():
cont_inf_only_cls = [RenyiELBO(), Trace_ELBO(), TraceMeanField_ELBO()]
mixed_inf_cls = [TraceGraph_ELBO()]
assert not any([c.can_infer_discrete for c in cont_inf_only_cls])
assert all([c.can_infer_discrete for c in mixed_inf_cls])
def model():
numpyro.sample("x", dist.Bernoulli(0.5))
def guide():
probs = numpyro.param("probs", 0.2)
numpyro.sample("x", dist.Bernoulli(probs))
for elbo in cont_inf_only_cls:
svi = SVI(model, guide, optim.Adam(1), elbo)
s_name = type(elbo).__name__
w_msg = f"Currently, SVI with {s_name} loss does not support models with discrete latent variables"
with pytest.warns(UserWarning, match=w_msg):
svi.run(random.PRNGKey(0), 10)
@pytest.mark.parametrize("stable_update", [True, False])
@pytest.mark.parametrize("num_particles", [1, 10])
@pytest.mark.parametrize("elbo", [Trace_ELBO, TraceMeanField_ELBO])
def test_mutable_state(stable_update, num_particles, elbo):
def model():
x = numpyro.sample("x", dist.Normal(-1, 1))
numpyro_mutable("x1p", x + 1)
def guide():
loc = numpyro.param("loc", 0.0)
p = numpyro_mutable("loc1p", {"value": None})
# we can modify the content of `p` if it is a dict
p["value"] = loc + 2
numpyro.sample("x", dist.Normal(loc, 0.1))
svi = SVI(model, guide, optim.Adam(0.1), elbo(num_particles=num_particles))
if num_particles > 1:
with pytest.raises(ValueError, match="mutable state"):
svi_result = svi.run(random.PRNGKey(0), 1000, stable_update=stable_update)
return
svi_result = svi.run(random.PRNGKey(0), 1000, stable_update=stable_update)
params = svi_result.params
mutable_state = svi_result.state.mutable_state
assert set(mutable_state) == {"x1p", "loc1p"}
assert_allclose(mutable_state["loc1p"]["value"], params["loc"] + 2, atol=0.1)
# here, the initial loc has value 0., hence x1p will have init value near 1
# it won't be updated during SVI run because it is not a mutable state
assert_allclose(mutable_state["x1p"], 1.0, atol=0.2)
def test_tracegraph_normal_normal():
# normal-normal; known covariance
lam0 = jnp.array([0.1, 0.1]) # precision of prior
loc0 = jnp.array([0.0, 0.5]) # prior mean
# known precision of observation noise
lam = jnp.array([6.0, 4.0])
data = []
data.append(jnp.array([-0.1, 0.3]))
data.append(jnp.array([0.0, 0.4]))
data.append(jnp.array([0.2, 0.5]))
data.append(jnp.array([0.1, 0.7]))
n_data = len(data)
sum_data = data[0] + data[1] + data[2] + data[3]
analytic_lam_n = lam0 + n_data * lam
analytic_log_sig_n = -0.5 * jnp.log(analytic_lam_n)
analytic_loc_n = sum_data * (lam / analytic_lam_n) + loc0 * (lam0 / analytic_lam_n)
class FakeNormal(dist.Normal):
reparametrized_params = []
def model():
with numpyro.plate("plate", 2):
loc_latent = numpyro.sample(
"loc_latent", FakeNormal(loc0, jnp.power(lam0, -0.5))
)
for i, x in enumerate(data):
numpyro.sample(
"obs_{}".format(i),
dist.Normal(loc_latent, jnp.power(lam, -0.5)),
obs=x,
)
return loc_latent
def guide():
loc_q = numpyro.param("loc_q", analytic_loc_n + jnp.array([0.334, 0.334]))
log_sig_q = numpyro.param(
"log_sig_q", analytic_log_sig_n + jnp.array([-0.29, -0.29])
)
sig_q = jnp.exp(log_sig_q)
with numpyro.plate("plate", 2):
loc_latent = numpyro.sample("loc_latent", FakeNormal(loc_q, sig_q))
return loc_latent
adam = optim.Adam(step_size=0.0015, b1=0.97, b2=0.999)
svi = SVI(model, guide, adam, loss=TraceGraph_ELBO())
svi_result = svi.run(jax.random.PRNGKey(0), 5000)
loc_error = jnp.sum(jnp.power(analytic_loc_n - svi_result.params["loc_q"], 2.0))
log_sig_error = jnp.sum(
jnp.power(analytic_log_sig_n - svi_result.params["log_sig_q"], 2.0)
)
assert_allclose(loc_error, 0, atol=0.05)
assert_allclose(log_sig_error, 0, atol=0.05)
def test_tracegraph_beta_bernoulli():
# bernoulli-beta model
# beta prior hyperparameter
alpha0 = 1.0
beta0 = 1.0 # beta prior hyperparameter
data = jnp.array([0.0, 1.0, 1.0, 1.0])
n_data = float(len(data))
data_sum = data.sum()
alpha_n = alpha0 + data_sum # posterior alpha
beta_n = beta0 - data_sum + n_data # posterior beta
log_alpha_n = jnp.log(alpha_n)
log_beta_n = jnp.log(beta_n)
class FakeBeta(dist.Beta):
reparametrized_params = []
def model():
p_latent = numpyro.sample("p_latent", FakeBeta(alpha0, beta0))
with numpyro.plate("data", len(data)):
numpyro.sample("obs", dist.Bernoulli(p_latent), obs=data)
return p_latent
def guide():
alpha_q_log = numpyro.param("alpha_q_log", log_alpha_n + 0.17)
beta_q_log = numpyro.param("beta_q_log", log_beta_n - 0.143)
alpha_q, beta_q = jnp.exp(alpha_q_log), jnp.exp(beta_q_log)
p_latent = numpyro.sample("p_latent", FakeBeta(alpha_q, beta_q))
with numpyro.plate("data", len(data)):
pass
return p_latent
adam = optim.Adam(step_size=0.0007, b1=0.95, b2=0.999)
svi = SVI(model, guide, adam, loss=TraceGraph_ELBO())
svi_result = svi.run(jax.random.PRNGKey(0), 3000)
alpha_error = jnp.sum(
jnp.power(log_alpha_n - svi_result.params["alpha_q_log"], 2.0)
)
beta_error = jnp.sum(jnp.power(log_beta_n - svi_result.params["beta_q_log"], 2.0))
assert_allclose(alpha_error, 0, atol=0.03)
assert_allclose(beta_error, 0, atol=0.04)
def test_tracegraph_gamma_exponential():
# exponential-gamma model
# gamma prior hyperparameter
alpha0 = 1.0
# gamma prior hyperparameter
beta0 = 1.0
n_data = 2
data = jnp.array([3.0, 2.0]) # two observations
alpha_n = alpha0 + n_data # posterior alpha
beta_n = beta0 + data.sum() # posterior beta
log_alpha_n = jnp.log(alpha_n)
log_beta_n = jnp.log(beta_n)
class FakeGamma(dist.Gamma):
reparametrized_params = []
def model():
lambda_latent = numpyro.sample("lambda_latent", FakeGamma(alpha0, beta0))
with numpyro.plate("data", len(data)):
numpyro.sample("obs", dist.Exponential(lambda_latent), obs=data)
return lambda_latent
def guide():
alpha_q_log = numpyro.param("alpha_q_log", log_alpha_n + 0.17)
beta_q_log = numpyro.param("beta_q_log", log_beta_n - 0.143)
alpha_q, beta_q = jnp.exp(alpha_q_log), jnp.exp(beta_q_log)
numpyro.sample("lambda_latent", FakeGamma(alpha_q, beta_q))
with numpyro.plate("data", len(data)):
pass
adam = optim.Adam(step_size=0.0007, b1=0.95, b2=0.999)
svi = SVI(model, guide, adam, loss=TraceGraph_ELBO())
svi_result = svi.run(jax.random.PRNGKey(0), 8000)
alpha_error = jnp.sum(
jnp.power(log_alpha_n - svi_result.params["alpha_q_log"], 2.0)
)
beta_error = jnp.sum(jnp.power(log_beta_n - svi_result.params["beta_q_log"], 2.0))
assert_allclose(alpha_error, 0, atol=0.04)
assert_allclose(beta_error, 0, atol=0.04)
@pytest.mark.parametrize(
"num_latents,num_steps,step_size,atol,difficulty",
[
(3, 5000, 0.003, 0.05, 0.6),
(5, 6000, 0.003, 0.05, 0.6),
(7, 8000, 0.003, 0.05, 0.6),
],
)
def test_tracegraph_gaussian_chain(num_latents, num_steps, step_size, atol, difficulty):
loc0 = 0.2
data = jnp.array([-0.1, 0.03, 0.2, 0.1])
n_data = data.shape[0]
sum_data = data.sum()
N = num_latents
lambdas = [1.5 * (k + 1) / N for k in range(N + 1)]
lambdas = list(map(lambda x: jnp.array([x]), lambdas))
lambda_tilde_posts = [lambdas[0]]
for k in range(1, N):
lambda_tilde_k = (lambdas[k] * lambda_tilde_posts[k - 1]) / (
lambdas[k] + lambda_tilde_posts[k - 1]
)
lambda_tilde_posts.append(lambda_tilde_k)
lambda_posts = [
None
] # this is never used (just a way of shifting the indexing by 1)
for k in range(1, N):
lambda_k = lambdas[k] + lambda_tilde_posts[k - 1]
lambda_posts.append(lambda_k)
lambda_N_post = (n_data * lambdas[N]) + lambda_tilde_posts[N - 1]
lambda_posts.append(lambda_N_post)
target_kappas = [None]
target_kappas.extend([lambdas[k] / lambda_posts[k] for k in range(1, N)])
target_mus = [None]
target_mus.extend(
[loc0 * lambda_tilde_posts[k - 1] / lambda_posts[k] for k in range(1, N)]
)
target_loc_N = (
sum_data * lambdas[N] / lambda_N_post
+ loc0 * lambda_tilde_posts[N - 1] / lambda_N_post
)
target_mus.append(target_loc_N)
np.random.seed(0)
while True:
mask = np.random.binomial(1, 0.3, (N,))
if mask.sum() < 0.4 * N and mask.sum() > 0.5:
which_nodes_reparam = mask
break
class FakeNormal(dist.Normal):
reparametrized_params = []
def model(difficulty=0.0):
next_mean = loc0
for k in range(1, N + 1):
latent_dist = dist.Normal(next_mean, jnp.power(lambdas[k - 1], -0.5))
loc_latent = numpyro.sample("loc_latent_{}".format(k), latent_dist)
next_mean = loc_latent
loc_N = next_mean
with numpyro.plate("data", data.shape[0]):
numpyro.sample(
"obs", dist.Normal(loc_N, jnp.power(lambdas[N], -0.5)), obs=data
)
return loc_N
def guide(difficulty=0.0):
previous_sample = None
for k in reversed(range(1, N + 1)):
loc_q = numpyro.param(
f"loc_q_{k}",
lambda key: target_mus[k]
+ difficulty * (0.1 * random.normal(key) - 0.53),
)
log_sig_q = numpyro.param(
f"log_sig_q_{k}",
lambda key: -0.5 * jnp.log(lambda_posts[k])
+ difficulty * (0.1 * random.normal(key) - 0.53),
)
sig_q = jnp.exp(log_sig_q)
kappa_q = None
if k != N:
kappa_q = numpyro.param(
"kappa_q_%d" % k,
lambda key: target_kappas[k]
+ difficulty * (0.1 * random.normal(key) - 0.53),
)
mean_function = loc_q if k == N else kappa_q * previous_sample + loc_q
node_flagged = True if which_nodes_reparam[k - 1] == 1.0 else False
Normal = dist.Normal if node_flagged else FakeNormal
loc_latent = numpyro.sample(f"loc_latent_{k}", Normal(mean_function, sig_q))
previous_sample = loc_latent
return previous_sample
adam = optim.Adam(step_size=step_size, b1=0.95, b2=0.999)
svi = SVI(model, guide, adam, loss=TraceGraph_ELBO())
svi_result = svi.run(jax.random.PRNGKey(0), num_steps, difficulty=difficulty)
kappa_errors, log_sig_errors, loc_errors = [], [], []
for k in range(1, N + 1):
if k != N:
kappa_error = jnp.sum(
jnp.power(svi_result.params[f"kappa_q_{k}"] - target_kappas[k], 2)
)
kappa_errors.append(kappa_error)
loc_errors.append(
jnp.sum(jnp.power(svi_result.params[f"loc_q_{k}"] - target_mus[k], 2))
)
log_sig_error = jnp.sum(
jnp.power(
svi_result.params[f"log_sig_q_{k}"] + 0.5 * jnp.log(lambda_posts[k]), 2
)
)
log_sig_errors.append(log_sig_error)
max_errors = (np.max(loc_errors), np.max(log_sig_errors), np.max(kappa_errors))
for i in range(3):
assert_allclose(max_errors[i], 0, atol=atol)
| pyro-ppl/numpyro | test/infer/test_svi.py | Python | apache-2.0 | 19,631 |
#!/usr/bin/env python
# coding: utf8
#
"""
cd /Users/brunoflaven/Documents/02_copy/_000_IA_bruno_light/_my_article_python-explorations/git_repo_python_explorations_nlp/article_6_chatbot_with_pytorch
python 00_train.py
"""
# VALUE
JSONFILE="intents-backoffice-1.json"
FILE = "data-backoffice-2.pth"
import numpy as np
import random
import json
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
# from nltk_utils import bag_of_words, tokenize, stem
# from model import NeuralNet
from asset_nltk_utils import bag_of_words, tokenize, stem
from asset_model import NeuralNet
with open(JSONFILE, 'r') as f:
intents = json.load(f)
all_words = []
tags = []
xy = []
# loop through each sentence in our intents patterns
for intent in intents['intents']:
tag = intent['tag']
# add to tag list
tags.append(tag)
for pattern in intent['patterns']:
# tokenize each word in the sentence
w = tokenize(pattern)
# add to our words list
all_words.extend(w)
# add to xy pair
xy.append((w, tag))
# stem and lower each word
ignore_words = ['?', '.', '!']
all_words = [stem(w) for w in all_words if w not in ignore_words]
# remove duplicates and sort
all_words = sorted(set(all_words))
tags = sorted(set(tags))
print(len(xy), "patterns")
print(len(tags), "tags:", tags)
print(len(all_words), "unique stemmed words:", all_words)
# create training data
X_train = []
y_train = []
for (pattern_sentence, tag) in xy:
# X: bag of words for each pattern_sentence
bag = bag_of_words(pattern_sentence, all_words)
X_train.append(bag)
# y: PyTorch CrossEntropyLoss needs only class labels, not one-hot
label = tags.index(tag)
y_train.append(label)
X_train = np.array(X_train)
y_train = np.array(y_train)
# Hyper-parameters
num_epochs = 1000
batch_size = 8
learning_rate = 0.001
input_size = len(X_train[0])
hidden_size = 8
output_size = len(tags)
print(input_size, output_size)
class ChatDataset(Dataset):
def __init__(self):
self.n_samples = len(X_train)
self.x_data = X_train
self.y_data = y_train
# support indexing such that dataset[i] can be used to get i-th sample
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
# we can call len(dataset) to return the size
def __len__(self):
return self.n_samples
dataset = ChatDataset()
train_loader = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=0)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = NeuralNet(input_size, hidden_size, output_size).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
for epoch in range(num_epochs):
for (words, labels) in train_loader:
words = words.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(words)
# if y would be one-hot, we must apply
# labels = torch.max(labels, 1)[1]
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch+1) % 100 == 0:
print (f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')
print(f'final loss: {loss.item():.4f}')
data = {
"model_state": model.state_dict(),
"input_size": input_size,
"hidden_size": hidden_size,
"output_size": output_size,
"all_words": all_words,
"tags": tags
}
torch.save(data, FILE)
print(f'training complete. file saved to {FILE}')
| bflaven/BlogArticlesExamples | python_nlp_explorations_chatbot_keywords_extraction/article_6_chatbot_with_pytorch/00_train.py | Python | mit | 3,740 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""High level conversion support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.autograph import utils
from tensorflow.contrib.autograph.converters import asserts
from tensorflow.contrib.autograph.converters import break_statements
from tensorflow.contrib.autograph.converters import builtin_functions
from tensorflow.contrib.autograph.converters import call_trees
from tensorflow.contrib.autograph.converters import continue_statements
from tensorflow.contrib.autograph.converters import control_flow
from tensorflow.contrib.autograph.converters import decorators
from tensorflow.contrib.autograph.converters import ifexp
from tensorflow.contrib.autograph.converters import lists
from tensorflow.contrib.autograph.converters import logical_expressions
from tensorflow.contrib.autograph.converters import name_scopes
from tensorflow.contrib.autograph.converters import side_effect_guards
from tensorflow.contrib.autograph.converters import single_return
from tensorflow.contrib.autograph.impl import config
from tensorflow.contrib.autograph.impl import naming
from tensorflow.contrib.autograph.pyct import context
from tensorflow.contrib.autograph.pyct import inspect_utils
from tensorflow.contrib.autograph.pyct import parser
from tensorflow.contrib.autograph.pyct import qual_names
from tensorflow.contrib.autograph.pyct.static_analysis import activity
from tensorflow.contrib.autograph.pyct.static_analysis import live_values
from tensorflow.contrib.autograph.pyct.static_analysis import type_info
from tensorflow.contrib.autograph.utils import type_hints
from tensorflow.python.util import tf_inspect
# TODO(mdan): Might we not need any renaming at all?
class ConversionMap(object):
"""ConversionMap keeps track of converting function hierarchies.
This object is mutable, and is updated as functions are converted.
Attributes:
recursive: Whether to recusrively convert any functions that the decorator
function may call.
nocompile_decorators: tuple of decorator functions that toggle compilation
off.
dependency_cache: dict[object]: ast; maps original entities to their
converted AST
additional_imports: set(object); additional entities which for any reason
cannot be attached after loading and need to be explicitly imported
in the generated code
name_map: dict[string]: string; maps original entities to the name of
their converted counterparts
api_module: A reference to the api module. The reference needs to be passed
to avoid circular dependencies.
"""
# TODO(mdan): Rename to ConversionContext, and pull in additional flags.
def __init__(self, recursive, nocompile_decorators, partial_types,
api_module):
self.recursive = recursive
self.nocompile_decorators = nocompile_decorators
self.partial_types = partial_types if partial_types else ()
self.dependency_cache = {}
self.additional_imports = set()
self.name_map = {}
self.api_module = api_module
def new_namer(self, namespace):
return naming.Namer(namespace, self.recursive, self.name_map,
self.partial_types)
def update_name_map(self, namer):
for o, name in namer.renamed_calls.items():
if o in self.name_map:
if self.name_map[o] != name:
raise ValueError(
'Calls to %s were converted using multiple names (%s). This is '
'possible when an entity with one of these names already '
'existed. To fix, avoid using any of these names.')
else:
self.name_map[o] = name
def add_to_cache(self, original_entity, converted_ast):
self.dependency_cache[original_entity] = converted_ast
def is_whitelisted_for_graph(o):
"""Check whether an entity is whitelisted for use in graph mode.
Examples of whitelisted entities include all members of the tensorflow
package.
Args:
o: A Python entity.
Returns:
Boolean
"""
m = tf_inspect.getmodule(o)
for prefix, in config.DEFAULT_UNCOMPILED_MODULES:
if m.__name__.startswith(prefix):
return True
return False
def entity_to_graph(o, conversion_map, arg_values, arg_types):
"""Compile a Python entity into equivalent TensorFlow.
The function will also recursively compile all the entities that `o`
references, updating `dependency_cache`.
This function is reentrant, and relies on dependency_cache to avoid
generating duplicate code.
Args:
o: A Python entity.
conversion_map: A ConversionMap object.
arg_values: A dict containing value hints for symbols like function
parameters.
arg_types: A dict containing type hints for symbols like function
parameters.
Returns:
A tuple (ast, new_name):
* ast: An AST representing an entity with interface equivalent to `o`,
but which when executed it creates TF a graph.
* new_name: The symbol name under which the new entity can be found.
Raises:
ValueError: if the entity type is not supported.
"""
if tf_inspect.isclass(o):
node, new_name = class_to_graph(o, conversion_map)
elif tf_inspect.isfunction(o):
node, new_name = function_to_graph(o, conversion_map, arg_values, arg_types)
elif tf_inspect.ismethod(o):
node, new_name = function_to_graph(o, conversion_map, arg_values, arg_types)
else:
raise ValueError(
'Entity "%s" has unsupported type "%s". Only functions and classes are '
'supported for now.' % (o, type(o)))
conversion_map.add_to_cache(o, node)
if conversion_map.recursive:
while True:
candidate = None
for obj in conversion_map.name_map.keys():
if obj not in conversion_map.dependency_cache:
candidate = obj
break
if candidate is None:
break
if (hasattr(candidate, 'im_class') and
getattr(candidate, 'im_class') not in conversion_map.partial_types):
# Class members are converted with their objects, unless they're
# only converted partially.
continue
entity_to_graph(candidate, conversion_map, {}, {})
return node, new_name
def class_to_graph(c, conversion_map):
"""Specialization of `entity_to_graph` for classes."""
converted_members = {}
method_filter = lambda m: tf_inspect.isfunction(m) or tf_inspect.ismethod(m)
members = tf_inspect.getmembers(c, predicate=method_filter)
if not members:
raise ValueError('Cannot convert %s: it has no member methods.' % c)
class_namespace = None
for _, m in members:
node, _ = function_to_graph(
m,
conversion_map=conversion_map,
arg_values={},
arg_types={'self': (c.__name__, c)},
owner_type=c)
# TODO(mdan): Do not assume all members have the same view of globals.
if class_namespace is None:
class_namespace = inspect_utils.getnamespace(m)
converted_members[m] = node
namer = conversion_map.new_namer(class_namespace)
class_name = namer.compiled_class_name(c.__name__, c)
node = gast.ClassDef(
class_name,
bases=[],
keywords=[],
body=list(converted_members.values()),
decorator_list=[])
return node, class_name
def _add_self_references(namespace, api_module):
"""Self refs are only required for analysis and are not used directly."""
# Manually add the utils namespace which may be used from generated code.
if 'autograph_util' not in namespace:
namespace['autograph_utils'] = utils
elif namespace['autograph_utils'] != utils:
raise ValueError(
'The module name "autograph_utils" is reserved and may not be used.')
# We also make reference to the api module for dynamic conversion, but
# to avoid circular references we don't import it here.
if 'autograph_api' not in namespace:
namespace['autograph_api'] = api_module
elif namespace['autograph_api'] != api_module:
raise ValueError(
'The module name "autograph_api" is reserved and may not be used.')
def function_to_graph(f, conversion_map, arg_values, arg_types,
owner_type=None):
"""Specialization of `entity_to_graph` for callable functions."""
node, source = parser.parse_entity(f)
node = node.body[0]
namespace = inspect_utils.getnamespace(f)
_add_self_references(namespace, conversion_map.api_module)
namer = conversion_map.new_namer(namespace)
ctx = context.EntityContext(
namer=namer,
source_code=source,
source_file='<fragment>',
namespace=namespace,
arg_values=arg_values,
arg_types=arg_types,
owner_type=owner_type,
recursive=conversion_map.recursive,
type_annotation_func=type_hints.set_element_type)
node, deps = node_to_graph(node, ctx, conversion_map.nocompile_decorators)
# TODO(mdan): This somewhat duplicates the call rename logic in call_treest.py
new_name, did_rename = namer.compiled_function_name(f.__name__, f, owner_type)
if not did_rename:
new_name = f.__name__
if node.name != f.__name__:
raise NotImplementedError('Strange corner case. Send us offending code!')
node.name = new_name
conversion_map.update_name_map(namer)
# TODO(mdan): Use this at compilation.
conversion_map.additional_imports.update(deps)
return node, new_name
def _static_analysis_pass(node, ctx):
node = qual_names.resolve(node)
node = activity.resolve(node, ctx, None)
node = live_values.resolve(node, ctx, config.PYTHON_LITERALS)
node = type_info.resolve(node, ctx)
return node
def node_to_graph(node, ctx, nocompile_decorators):
"""Convert Python code to equivalent TF graph mode code.
Args:
node: A Python AST node representing the code to convert.
ctx: An EntityContext object.
nocompile_decorators: A tuple containing decorators to be stripped from
functions during conversion.
Returns:
A tuple (node, deps):
* node: A Python ast node, representing the converted code.
* deps: A set of strings, the fully qualified names of entity
dependencies that this node has.
"""
# TODO(mdan): Verify arguments for correctness.
# TODO(mdan): Factor out common elements.
# These include:
# * code move between blocks
# * visiting blocks in transformers
# Certain steps, especially canonicalization, insert new symbols into the
# tree, which must be accounted. Although less efficient, it is most robust
# to re-run the analysis.
node = _static_analysis_pass(node, ctx)
# TODO(mdan): Clean this up.
# Some intermediate analyses are not required, and some comments got orphaned.
# Past this point, line numbers are no longer accurate so we ignore the
# source.
# TODO(mdan): Is it feasible to reconstruct intermediate source code?
ctx.source_code = None
node = ifexp.transform(node, ctx)
node, deps = decorators.transform(node, nocompile_decorators)
node = break_statements.transform(node, ctx)
node = asserts.transform(node, ctx)
# Note: sequencing continue canonicalization before for loop one avoids
# dealing with the extra loop increment operation that the for
# canonicalization creates.
node = continue_statements.transform(node, ctx)
ctx.namespace['len'] = len
node = _static_analysis_pass(node, ctx)
node = single_return.transform(node, ctx)
node = _static_analysis_pass(node, ctx)
node = lists.transform(node, ctx)
node = builtin_functions.transform(node, ctx)
node = _static_analysis_pass(node, ctx)
node = call_trees.transform(node, ctx, config.DEFAULT_UNCOMPILED_MODULES,
nocompile_decorators)
node = control_flow.transform(node, ctx)
# control_flow may create new symbols and change scopes.
node = _static_analysis_pass(node, ctx)
node = logical_expressions.transform(node, ctx)
node = side_effect_guards.transform(node, ctx)
node = name_scopes.transform(node, ctx)
return node, deps
| allenlavoie/tensorflow | tensorflow/contrib/autograph/impl/conversion.py | Python | apache-2.0 | 12,675 |
# automate/server/user/views.py
#################
#### imports ####
#################
#from flask import render_template, Blueprint, url_for, \
# redirect, flash, request
#from flask_login import login_user, logout_user, login_required
#from automate.server import bcrypt, db
#from automate.server import db
#from automate.server.models import User
#from automate.server.user.forms import LoginForm, RegisterForm
################
#### config ####
################
#user_blueprint = Blueprint('user', __name__,)
################
#### routes ####
################
#@user_blueprint.route('/register', methods=['GET', 'POST'])
#def register():
# form = RegisterForm(request.form)
# if form.validate_on_submit():
# user = User(
# email=form.email.data,
# password=form.password.data
# )
# db.session.add(user)
# db.session.commit()
#
# login_user(user)
#
# flash('Thank you for registering.', 'success')
# return redirect(url_for("user.members"))
#
# return render_template('user/register.html', form=form)
#
#
#@user_blueprint.route('/login', methods=['GET', 'POST'])
#def login():
# form = LoginForm(request.form)
# if form.validate_on_submit():
# user = User.query.filter_by(email=form.email.data).first()
# if user:
# #if user and bcrypt.check_password_hash(
# # user.password, request.form['password']):
# # login_user(user)
# flash('You are logged in. Welcome!', 'success')
# return redirect(url_for('user.members'))
# else:
# flash('Invalid email and/or password.', 'danger')
# return render_template('user/login.html', form=form)
# return render_template('user/login.html', title='Please Login', form=form)
#
#
#@user_blueprint.route('/logout')
#@login_required
#def logout():
# logout_user()
# flash('You were logged out. Bye!', 'success')
# return redirect(url_for('main.home'))
#
#
#@user_blueprint.route('/members')
#@login_required
#def members():
# return render_template('user/members.html')
# | JeromeErasmus/browserstack_automate | automate/server/user/views.py | Python | apache-2.0 | 2,112 |
# Simple wrapper script needed to run epydoc
import sys
try:
from epydoc.cli import cli
except ImportError:
print>>sys.stderr, "No epydoc installed (see http://epydoc.sourceforge.net)"
sys.exit(2)
# Epydoc 3.0.1 has some trouble running with recent Docutils (>= 0.6),
# so we work around this bug, following the lines of the fix in
# https://bugs.gentoo.org/attachment.cgi?id=210118
# (see http://bugs.gentoo.org/287546)
try:
from docutils.nodes import Text
if not hasattr(Text, 'data'):
setattr(Text, 'data', property(lambda self: self.astext()))
except ImportError:
print>>sys.stderr, "docutils is needed for running epydoc " \
"(see http://docutils.sourceforge.net)"
sys.exit(2)
# Epydoc doesn't allow much control over the generated graphs. This is
# bad especially for the class graph for Component which has a lot of
# subclasses, so we need to force Left-to-Right mode.
# from epydoc.docwriter.html import HTMLWriter
# HTMLWriter_render_graph = HTMLWriter.render_graph
# def render_graph_LR(self, graph):
# if graph:
# graph.body += 'rankdir=LR\n'
# return HTMLWriter_render_graph(self, graph)
# HTMLWriter.render_graph = render_graph_LR
# Well, LR mode doesn't really look better...
# the ASCII-art version seems better in most cases.
# Workaround "visiting unknown node type" error due to `.. note ::`
# This was due to the lack of Admonitions transforms. Add it.
from epydoc.markup.restructuredtext import _DocumentPseudoWriter
from docutils.transforms import writer_aux
orig_get_transforms = _DocumentPseudoWriter.get_transforms
def pseudo_get_transforms(self):
return orig_get_transforms(self) + [writer_aux.Admonitions]
_DocumentPseudoWriter.get_transforms = pseudo_get_transforms
# Run epydoc
cli()
| dinhkhanh/trac | doc/utils/runepydoc.py | Python | bsd-3-clause | 1,792 |
#-*- coding:utf-8 -*-
from django.conf import settings
from django.db import models, transaction
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from bookshare.apps.books.models import Book
class BookShareModel(models.Model):
pub_date = models.DateTimeField(_('Published Date'), default=timezone.now, auto_now_add=True)
class Meta:
abstract = True
class ConditionMixin(models.Model):
class Meta:
abstract = True
A = "A"
B = "B"
C = "C"
CONDITIONS = (
(A, "A"),
(B, "B"),
(C, "C"),
)
condition = models.CharField(_(u'보관상태'), max_length=2, choices=CONDITIONS)
class StockManager(models.Manager):
def available(self, *args, **kwargs):
qs = self.get_query_set().filter(*args, **kwargs)
return qs.filter(status=Stock.AVAILABLE)
def rented(self, *args, **kwargs):
qs = self.get_query_set().filter(*args, **kwargs)
return qs.filter(status=Stock.RENTED)
class Stock(ConditionMixin):
AVAILABLE = u'available'
RENTED = u'rented'
RECLAIMED = u'reclaimed'
STATUS = (
(AVAILABLE, u'대여 가능'),
(RENTED, u'대여중'),
(RECLAIMED, u'반환 완료'),
)
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
renter = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, related_name="renting_stocks")
book = models.ForeignKey(Book)
added_at = models.DateTimeField(auto_now_add=True)
changed_at = models.DateTimeField(auto_now=True)
status = models.CharField(_(u'상태'), max_length=10,
choices=STATUS,
default=AVAILABLE)
objects = StockManager()
def __unicode__(self):
return u"{} [{}] - {}".format(self.book, self.id, self.owner)
def ensure_status(self, status):
assert self.status == status, "invalid status"
class Meta:
verbose_name = _(u'대여현황')
verbose_name_plural = _(u'대여현황')
class StockHistory(ConditionMixin):
RENT = u'rent'
RETURN = u'return'
DELIVER = u'deliver'
RECLAIM = u'reclaim'
ACTION = (
(RENT, u'대여'),
(RETURN, u'반납'),
(DELIVER, u'위탁'),
(RECLAIM, u'반환'),
)
actor = models.ForeignKey(settings.AUTH_USER_MODEL)
stock = models.ForeignKey(Stock)
added_at = models.DateTimeField(auto_now_add=True)
action = models.CharField(_(u'행동'), max_length=10, choices=ACTION)
class Meta:
verbose_name = _(u'대여이력')
verbose_name_plural = _(u'대여이력')
class RequestManager(models.Manager):
def pending(self, *args, **kwargs):
qs = self.get_query_set().filter(*args, **kwargs)
return qs.filter(status=RequestMixin.PENDING)
def done(self, *args, **kwargs):
qs = self.get_query_set().filter(*args, **kwargs)
return qs.filter(status=RequestMixin.DONE)
def canceled(self, *args, **kwargs):
qs = self.get_query_set().filter(*args, **kwargs)
return qs.filter(status=RequestMixin.CANCELED)
class RequestMixin(models.Model):
class Meta:
abstract = True
ordering = ['-added_at']
PENDING = u'pending'
DONE = u'done'
CANCELED = u'canceled'
STATUS = (
(PENDING, u'대기중'),
(DONE, u'완료'),
(CANCELED, u'취소')
)
actor = models.ForeignKey(settings.AUTH_USER_MODEL)
added_at = models.DateTimeField(auto_now_add=True)
changed_at = models.DateTimeField(auto_now=True)
status = models.CharField(_(u'상태'), max_length=10,
choices=STATUS,
default=PENDING)
objects = RequestManager()
def __unicode__(self):
return u"{} - {}".format(self.book, self.actor)
def ensure_status(self, status):
assert self.status == status, "invalid status"
class RentRequest(RequestMixin):
book = models.ForeignKey(Book)
class ReturnRequest(RequestMixin):
stock = models.ForeignKey(Stock)
class ReclaimRequest(RequestMixin):
stock = models.ForeignKey(Stock)
@transaction.atomic
def rent_book(actor, book):
### precondition
actor.ensure_points(book.point())
assert book.any_availiable_stock(), "대여 가능한 물품이 존재하지 않습니다"
stock = book.any_availiable_stock()
stock.renter = actor
stock.status = Stock.RENTED
stock.save()
StockHistory.objects.create(actor=actor,
stock=stock,
action=StockHistory.RENT,
condition=stock.condition).save()
actor.lose_points(book.point())
actor.save()
@transaction.atomic
def process_rent_request(request):
### precondition
request.ensure_status(RentRequest.PENDING)
request.actor.ensure_points(request.book.point())
assert request.book.any_availiable_stock(), "대여 가능한 물품이 존재하지 않습니다"
request.status = RentRequest.DONE
stock = request.book.any_availiable_stock()
stock.renter = request.actor
stock.status = Stock.RENTED
stock.save()
StockHistory.objects.create(actor=request.actor,
stock=stock,
action=StockHistory.RENT,
condition=stock.condition).save()
request.actor.lose_points(request.book.point())
request.actor.save()
request.save()
@transaction.atomic
def return_stock(actor, stock, condition):
### precondition
stock.ensure_status(Stock.RENTED)
stock.status = Stock.AVAILABLE
stock.renter = None
stock.save()
StockHistory.objects.create(actor=actor,
stock=stock,
action=StockHistory.RETURN,
condition=condition).save()
actor.get_points(stock.book.point())
actor.save()
@transaction.atomic
def deliver_stock(actor, book, condition):
s = Stock.objects.create(owner=actor, book=book, condition=condition, renter=None)
s.save()
StockHistory.objects.create(actor=actor,
stock=s,
action=StockHistory.DELIVER,
condition=condition).save()
actor.get_points(2 * book.point())
actor.save()
def request_reclaim(actor, stock):
### precondition
stock.ensure_status(Stock.AVAILABLE)
assert stock in actor.stock_set.all(), "자신이 기부한 책이 아닙니다"
ReclaimRequest.objects.create(actor=actor, stock=stock).save()
@transaction.atomic
def process_reclaim_request(request):
### precondition
assert request.stock.owner == request.actor, "책의 주인이 일치하지 않습니다"
request.status.ensure_status(ReclaimRequest.PENDING)
request.status = ReclaimRequest.DONE
request.save()
request.stock.status = Stock.RECLAIMED
request.stock.save()
StockHistory.objects.create(actor=request.actor,
stock=request.stock,
action=StockHistory.RECLAIM,
condition=request.stock.condition).save()
| SungJinYoo/BookShare | bookshare/apps/core/models.py | Python | gpl-2.0 | 7,322 |
import numpy as np
class CrossValidationResult():
"""
Container for the results of a cross validation. Works only for binary classification.
This could be generalized further if need be.
"""
SEPARATOR = '-' * 80
def __init__(self, folds, fold_size, feature_labels):
"""
Creates a new CrossValidationResult instance.
Args:
folds (int): Number of folds to expect.
fold_size (int): Number of examples in a fold.
feature_labels (list(str)): The ordered list of feature descriptors. Has no significance for
the calculations but is useful when describing the feature importance.
"""
self.folds = folds
self.fold_size = fold_size
self.feature_labels = feature_labels
self.true_positives = []
self.false_positives = []
self.false_negatives = []
self.true_negatives = []
self.accuracies = []
self.precisions = []
self.recalls = []
self.f1_scores = []
self.feature_importances = []
@staticmethod
def mean(data):
"""
Calculates the mean value of the provided data.
Args:
data (list): Data over which to calculate the mean.
Returns:
float: Mean value of the data.
"""
return sum(data) / len(data)
def add_fold_predictions(self, predictions, answers, feature_importances):
"""
Adds the results of a fold prediction to the results.
Args:
predictions (list): Predictions that were made in this fold.
answers (list): Correct answers to the predictions.
feature_importances (list): Importance of the different features in the model.
"""
tp = np.sum([(x == y == 1) for x, y in zip(predictions, answers)])
fp = np.sum([(x == 1 and y == 0) for x, y in zip(predictions, answers)])
fn = np.sum([(x == 0 and y == 1) for x, y in zip(predictions, answers)])
tn = np.sum([(x == y == 0) for x, y in zip(predictions, answers)])
if len(predictions) != self.fold_size or tp + fp + fn + tn != self.fold_size:
raise ValueError('Unexpected number of prediction results!!')
accuracy = (tp + tn) / self.fold_size
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f1_score = 2 * tp / (2 * tp + fp + fn)
self.true_positives.append(tp/self.fold_size)
self.false_positives.append(fp/self.fold_size)
self.false_negatives.append(fn/self.fold_size)
self.true_negatives.append(tn/self.fold_size)
self.accuracies.append(accuracy)
self.precisions.append(precision)
self.recalls.append(recall)
self.f1_scores.append(f1_score)
self.feature_importances.append(feature_importances)
def print_short_results(self):
"""
Prints only few statistics of the cross validation results.
"""
print('Means in %d-fold CV:\tACC=%f,\tF1=%f' % (self.folds, CrossValidationResult.mean(self.accuracies), CrossValidationResult.mean(self.f1_scores)))
def print_results(self):
"""
Prints statistics of the cross validation results.
"""
print('Cross-validation results')
print(CrossValidationResult.SEPARATOR)
print('TP:\tmin=%f\tmean=%f\tmax=%f\tstdev=%f' % (min(self.true_positives),
sum(self.true_positives)/len(self.true_positives), max(self.true_positives), np.std(self.true_positives)))
print('FP:\tmin=%f\tmean=%f\tmax=%f\tstdev=%f' % (min(self.false_positives),
sum(self.false_positives)/len(self.false_positives), max(self.false_positives), np.std(self.false_positives)))
print('FN:\tmin=%f\tmean=%f\tmax=%f\tstdev=%f' % (min(self.false_negatives),
sum(self.false_negatives)/len(self.false_negatives), max(self.false_negatives), np.std(self.false_negatives)))
print('TN:\tmin=%f\tmean=%f\tmax=%f\tstdev=%f' % (min(self.true_negatives),
sum(self.true_negatives)/len(self.true_negatives), max(self.true_negatives), np.std(self.true_negatives)))
print('ACC:\tmin=%f\tmean=%f\tmax=%f\tstdev=%f' % (min(self.accuracies),
sum(self.accuracies)/len(self.accuracies), max(self.accuracies), np.std(self.accuracies)))
print('PREC:\tmin=%f\tmean=%f\tmax=%f\tstdev=%f' % (min(self.precisions),
sum(self.precisions)/len(self.precisions), max(self.precisions), np.std(self.precisions)))
print('REC:\tmin=%f\tmean=%f\tmax=%f\tstdev=%f' % (min(self.recalls),
sum(self.recalls)/len(self.recalls), max(self.recalls), np.std(self.recalls)))
print('F1:\tmin=%f\tmean=%f\tmax=%f\tstdev=%f' % (min(self.f1_scores),
sum(self.f1_scores)/len(self.f1_scores), max(self.f1_scores), np.std(self.f1_scores)))
print(CrossValidationResult.SEPARATOR)
mean_importance_ranking = []
max_attribute_name_length = max(map(lambda s: len(s), self.feature_labels))
for i in range(len(self.feature_importances[0])):
imp = []
for j in self.feature_importances:
imp.append(j[i])
mean = sum(imp)/len(imp)
# print('IMP(%d):\tmin=%f\tmean=%f\tmax=%f' % (i, min(imp), mean, max(imp)))
mean_importance_ranking.append((mean, i))
mean_importance_ranking.sort()
print('Mean importance ranking: \n%s' % '\n'.join(list(map(lambda x: '%s: %f' % (self.feature_labels[x[1]].ljust(max_attribute_name_length + 1), x[0]), mean_importance_ranking))))
print(CrossValidationResult.SEPARATOR)
| furgerf/kaggle-projects | common/cross_validation_result.py | Python | apache-2.0 | 5,228 |
from sympy.core import (Basic, Expr, S, C, Symbol, Wild, Add, sympify, diff,
oo, Tuple, Interval)
from sympy.core.symbol import Dummy
from sympy.core.compatibility import is_sequence
from sympy.integrals.trigonometry import trigintegrate
from sympy.integrals.deltafunctions import deltaintegrate
from sympy.integrals.rationaltools import ratint
from sympy.integrals.risch import heurisch
from sympy.integrals.meijerint import meijerint_definite, meijerint_indefinite
from sympy.utilities import xthreaded, flatten
from sympy.utilities.misc import filldedent
from sympy.polys import Poly, PolynomialError
from sympy.solvers.solvers import solve, posify
from sympy.functions import Piecewise, sqrt, sign
from sympy.geometry import Curve
from sympy.functions.elementary.piecewise import piecewise_fold
from sympy.series import limit
def _process_limits(*symbols):
"""Convert the symbols-related limits into proper limits,
storing them as Tuple(symbol, lower, upper). The sign of
the function is also returned when the upper limit is missing
so (x, 1, None) becomes (x, None, 1) and the sign is changed.
"""
limits = []
sign = 1
for V in symbols:
if isinstance(V, Symbol):
limits.append(Tuple(V))
continue
elif is_sequence(V, Tuple):
V = sympify(flatten(V))
if V[0].is_Symbol:
newsymbol = V[0]
if len(V) == 2 and isinstance(V[1], Interval):
V[1:] = [V[1].start, V[1].end]
if len(V) == 3:
if V[1] is None and V[2] is not None:
nlim = [V[2]]
elif V[1] is not None and V[2] is None:
sign *= -1
nlim = [V[1]]
elif V[1] is None and V[2] is None:
nlim = []
else:
nlim = V[1:]
limits.append(Tuple(newsymbol, *nlim ))
continue
elif len(V) == 1 or (len(V) == 2 and V[1] is None):
limits.append(Tuple(newsymbol))
continue
elif len(V) == 2:
limits.append(Tuple(newsymbol, V[1]))
continue
raise ValueError('Invalid limits given: %s' % str(symbols))
return limits, sign
class Integral(Expr):
"""Represents unevaluated integral."""
__slots__ = ['is_commutative']
def __new__(cls, function, *symbols, **assumptions):
"""Create an unevaluated integral.
Arguments are an integrand followed by one or more limits.
If no limits are given and there is only one free symbol in the
expression, that symbol will be used, otherwise an error will be
raised.
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x)
Integral(x, x)
>>> Integral(y)
Integral(y, y)
When limits are provided, they are interpreted as follows (using
``x`` as though it were the variable of integration):
(x,) or x - indefinite integral
(x, a) - "evaluate at" integral
(x, a, b) - definite integral
Although the same integral will be obtained from an indefinite
integral and an "evaluate at" integral when ``a == x``, they
respond differently to substitution:
>>> i = Integral(x, x)
>>> at = Integral(x, (x, x))
>>> i.doit() == at.doit()
True
>>> i.subs(x, 1)
Integral(1, x)
>>> at.subs(x, 1)
Integral(x, (x, 1))
The ``as_dummy`` method can be used to see which symbols cannot be
targeted by subs: those with a preppended underscore cannot be
changed with ``subs``. (Also, the integration variables themselves --
the first element of a limit -- can never be changed by subs.)
>>> i.as_dummy()
Integral(x, x)
>>> at.as_dummy()
Integral(_x, (_x, x))
"""
# Any embedded piecewise functions need to be brought out to the
# top level so that integration can go into piecewise mode at the
# earliest possible moment.
function = piecewise_fold(sympify(function))
if function is S.NaN:
return S.NaN
if symbols:
limits, sign = _process_limits(*symbols)
else:
# no symbols provided -- let's compute full anti-derivative
free = function.free_symbols
if len(free) != 1:
raise ValueError("specify variables of integration for %s" % function)
limits, sign = [Tuple(s) for s in free], 1
while isinstance(function, Integral):
# denest the integrand
limits = list(function.limits) + limits
function = function.function
obj = Expr.__new__(cls, **assumptions)
arglist = [sign*function]
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
def __getnewargs__(self):
return (self.function,) + tuple([tuple(xab) for xab in self.limits])
@property
def function(self):
"""Return the function to be integrated.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x
>>> Integral(x**2, (x,)).function
x**2
See Also
========
limits, variables, free_symbols
"""
return self._args[0]
@property
def limits(self):
"""Return the limits of integration.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i
>>> Integral(x**i, (i, 1, 3)).limits
((i, 1, 3),)
See Also
========
function, variables, free_symbols
"""
return self._args[1:]
@property
def variables(self):
"""Return a list of the integration variables.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i
>>> Integral(x**i, (i, 1, 3)).variables
[i]
See Also
========
function, limits, free_symbols
as_dummy : Replace integration variables with dummy ones
transform : Perform mapping on the integration variable
"""
return [l[0] for l in self.limits]
@property
def free_symbols(self):
"""
This method returns the symbols that will exist when the
integral is evaluated. This is useful if one is trying to
determine whether an integral depends on a certain
symbol or not.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x, (x, y, 1)).free_symbols
set([y])
See Also
========
function, limits, variables
"""
function, limits = self.function, self.limits
if function.is_zero:
return set()
isyms = function.free_symbols
for xab in limits:
if len(xab) == 1:
isyms.add(xab[0])
continue
# take out the target symbol
if xab[0] in isyms:
isyms.remove(xab[0])
if len(xab) == 3 and xab[1] == xab[2]:
# if two limits are the same the integral is 0
# and there are no symbols
return set()
# add in the new symbols
for i in xab[1:]:
isyms.update(i.free_symbols)
return isyms
@property
def is_zero(self):
"""Since Integral doesn't autosimplify it it useful to see if
it would simplify to zero or not in a trivial manner, i.e. when
the function is 0 or two limits of a definite integral are the same.
This is a very naive and quick test, not intended to check for special
patterns like Integral(sin(m*x)*cos(n*x), (x, 0, 2*pi)) == 0.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y, z
>>> Integral(1, (x, 1, 1)).is_zero
True
>>> Integral(0, (x, y, z)).is_zero
True
>>> Integral(1, (x, 1, 2)).is_zero
False
See Also
========
is_number
"""
if (self.function.is_zero or
any(len(xab) == 3 and xab[1] == xab[2] for xab in self.limits)):
return True
if not self.free_symbols and self.function.is_number:
# the integrand is a number and the limits are numerical
return False
@property
def is_number(self):
"""
Return True if the Integral will result in a number, else False.
sympy considers anything that will result in a number to have
is_number == True.
>>> from sympy import log
>>> log(2).is_number
True
Integrals are a special case since they contain symbols that can
be replaced with numbers. Whether the integral can be done or not is
another issue. But answering whether the final result is a number is
not difficult.
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x).is_number
False
>>> Integral(x, y).is_number
False
>>> Integral(x, (y, 1, x)).is_number
False
>>> Integral(x, (y, 1, 2)).is_number
False
>>> Integral(x, (y, 1, 1)).is_number
True
>>> Integral(x, (x, 1, 2)).is_number
True
>>> Integral(x*y, (x, 1, 2), (y, 1, 3)).is_number
True
>>> Integral(1, x, (x, 1, 2)).is_number
True
See Also
========
is_zero
"""
integrand, limits = self.function, self.limits
isyms = integrand.atoms(Symbol)
for xab in limits:
if len(xab) == 1:
isyms.add(xab[0])
continue # it may be removed later
elif len(xab) == 3 and xab[1] == xab[2]: # XXX naive equality test
return True # integral collapsed
if xab[0] in isyms:
# take it out of the symbols since it will be replace
# with whatever the limits of the integral are
isyms.remove(xab[0])
# add in the new symbols
for i in xab[1:]:
isyms.update(i.free_symbols)
# if there are no surviving symbols then the result is a number
return len(isyms) == 0
def as_dummy(self):
"""
Replace instances of the integration variables with their dummy
counterparts to make clear what are dummy variables and what
are real-world symbols in an Integral.
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x, (x, x, y), (y, x, y)).as_dummy()
Integral(_x, (_x, x, _y), (_y, x, y))
The "integral at" limit that has a length of 1 is not treated as
though the integration symbol is a dummy, but the explicit form
of length 2 does treat the integration variable as a dummy.
>>> Integral(x, x).as_dummy()
Integral(x, x)
>>> Integral(x, (x, x)).as_dummy()
Integral(_x, (_x, x))
If there were no dummies in the original expression, then the
output of this function will show which symbols cannot be
changed by subs(), those with an underscore prefix.
See Also
========
variables : Lists the integration variables
transform : Perform mapping on the integration variable
"""
reps = {}
f = self.function
limits = list(self.limits)
for i in xrange(-1, -len(limits) - 1, -1):
xab = list(limits[i])
if len(xab) == 1:
continue
x = xab[0]
xab[0] = x.as_dummy()
for j in range(1, len(xab)):
xab[j] = xab[j].subs(reps)
reps[x] = xab[0]
limits[i] = xab
f = f.subs(reps)
return Integral(f, *limits)
def transform(self, x, u, inverse=False):
r"""
Performs a change of variables from `x` to `u` using the relationship
given by `x` and `u` which will define the transformations `f` and `F`
(which are inverses of each other) as follows:
1) If `x` is a Symbol (which is a variable of integration) then `u`
will be interpreted as some function, f(u), with inverse F(u).
This, in effect, just makes the substitution of x with f(x).
2) If `u` is a Symbol then `x` will be interpreted as some function,
F(x), with inverse f(u). This is commonly referred to as
u-substitution.
The `inverse` option will reverse `x` and `u`. It is a deprecated option
since `x` and `u` can just be passed in reverse order.
Once f and F have been identified, the transformation is made as
follows:
.. math:: \int_a^b x \mathrm{d}x \rightarrow \int_{F(a)}^{F(b)} f(x)
\frac{\mathrm{d}}{\mathrm{d}x}
where `F(x)` is the inverse of `f(x)` and the limits and integrand have
been corrected so as to retain the same value after integration.
Notes
=====
The mappings, F(x) or f(u), must lead to a unique integral. Linear
or rational linear expression, `2*x`, `1/x` and `sqrt(x)`, will
always work; quadratic expressions like `x**2 - 1` are acceptable
as long as the resulting integrand does not depend on the sign of
the solutions (see examples).
The integral will be returned unchanged if `x` is not a variable of
integration.
`x` must be (or contain) only one of of the integration variables. If
`u` has more than one free symbol then it should be sent as a tuple
(`u`, `uvar`) where `uvar` identifies which variable is replacing
the integration variable.
XXX can it contain another integration variable?
Examples
========
>>> from sympy.abc import a, b, c, d, x, u, y
>>> from sympy import Integral, S, cos, sqrt
>>> i = Integral(x*cos(x**2 - 1), (x, 0, 1))
transform can change the variable of integration
>>> i.transform(x, u)
Integral(u*cos(u**2 - 1), (u, 0, 1))
transform can perform u-substitution as long as a unique
integrand is obtained:
>>> i.transform(x**2 - 1, u)
Integral(cos(u)/2, (u, -1, 0))
This attempt fails because x = +/-sqrt(u + 1) and the
sign does not cancel out of the integrand:
>>> Integral(cos(x**2 - 1), (x, 0, 1)).transform(x**2 - 1, u)
Traceback (most recent call last):
...
ValueError:
The mapping between F(x) and f(u) did not give a unique integrand.
transform can do a substitution. Here, the previous
result is transformed back into the original expression
using "u-substitution":
>>> ui = _
>>> _.transform(sqrt(u + 1), x) == i
True
We can accomplish the same with a regular substitution:
>>> ui.transform(u, x**2 - 1) == i
True
If the `x` does not contain a symbol of integration then
the integral will be returned unchanged. Integral `i` does
not have an integration variable `a` so no change is made:
>>> i.transform(a, x) == i
True
When `u` has more than one free symbol the symbol that is
replacing `x` must be identified by passing `u` as a tuple:
>>> Integral(x, (x, 0, 1)).transform(x, (u + a, u))
Integral(a + u, (u, -a, -a + 1))
>>> Integral(x, (x, 0, 1)).transform(x, (u + a, a))
Integral(a + u, (a, -u, -u + 1))
See Also
========
variables : Lists the integration variables
as_dummy : Replace integration variables with dummy ones
"""
if inverse:
# when this is removed, update the docstring
from sympy.utilities.exceptions import SymPyDeprecationWarning
SymPyDeprecationWarning(
feature="transform(x, f(x), inverse=True)",
useinstead="transform(f(x), x)",
issue=3380, deprecated_since_version="0.7.2",
).warn()
# in the old style x and u contained the same variable so
# don't worry about using the old-style feature with the
# new style input...but it will still work:
# i.transform(x, u).transform(x, u, inverse=True) -> i
x, u = u, x
d = Dummy('d')
xfree = x.free_symbols.intersection(self.variables)
if len(xfree) > 1:
raise ValueError('F(x) can only contain one of: %s' % self.variables)
xvar = xfree.pop() if xfree else d
if xvar not in self.variables:
return self
u = sympify(u)
if isinstance(u, Expr):
ufree = u.free_symbols
if len(ufree) != 1:
raise ValueError(filldedent('''
When f(u) has more than one free symbol, the one replacing x
must be identified: pass f(u) as (f(u), u)'''))
uvar = ufree.pop()
else:
u, uvar = u
if uvar not in u.free_symbols:
raise ValueError(filldedent('''
Expecting a tuple (expr, symbol) where symbol identified
a free symbol in expr, but symbol is not in expr's free
symbols.'''))
if not isinstance(uvar, Symbol):
raise ValueError(filldedent('''
Expecting a tuple (expr, symbol) but didn't get
a symbol; got %s''' % uvar))
if x.is_Symbol and u.is_Symbol:
return self.xreplace({x: u})
if not x.is_Symbol and not u.is_Symbol:
raise ValueError('either x or u must be a symbol')
if uvar == xvar:
return self.transform(x, u.subs(uvar, d)).xreplace({d: uvar})
if uvar in self.limits:
raise ValueError(filldedent('''
u must contain the same variable as in x
or a variable that is not already an integration variable'''))
if not x.is_Symbol:
F = [x.subs(xvar, d)]
soln = solve(u - x, xvar, check=False)
if not soln:
raise ValueError('no solution for solve(F(x) - f(u), x)')
f = [fi.subs(uvar, d) for fi in soln]
else:
f = [u.subs(uvar, d)]
pdiff, reps = posify(u - x)
puvar = uvar.subs([(v, k) for k, v in reps.iteritems()])
soln = [s.subs(reps) for s in solve(pdiff, puvar)]
if not soln:
raise ValueError('no solution for solve(F(x) - f(u), u)')
F = [fi.subs(xvar, d) for fi in soln]
newfuncs = set([(self.function.subs(xvar, fi)*fi.diff(d)
).subs(d, uvar) for fi in f])
if len(newfuncs) > 1:
raise ValueError(filldedent('''
The mapping between F(x) and f(u) did not give
a unique integrand.'''))
newfunc = newfuncs.pop()
def _calc_limit_1(F, a, b):
"""
replace d with a, using subs if possible, otherwise limit
where sign of b is considered
"""
wok = F.subs(d, a)
if wok is S.NaN or wok.is_bounded is False and a.is_bounded:
return limit(sign(b)*F, d, a)
return wok
def _calc_limit(a, b):
"""
replace d with a, using subs if possible, otherwise limit
where sign of b is considered
"""
avals = list(set([_calc_limit_1(Fi, a, b) for Fi in F]))
if len(avals) > 1:
raise ValueError(filldedent('''
The mapping between F(x) and f(u) did not
give a unique limit.'''))
return avals[0]
newlimits = []
for xab in self.limits:
sym = xab[0]
if sym == xvar:
if len(xab) == 3:
a, b = xab[1:]
a, b = _calc_limit(a, b), _calc_limit(b, a)
if a > b:
a, b = b, a
newfunc = -newfunc
newlimits.append((uvar, a, b))
elif len(xab) == 2:
a = _calc_limit(xab[1], 1)
newlimits.append((uvar, a))
else:
newlimits.append(uvar)
else:
newlimits.append(xab)
return Integral(newfunc, *newlimits)
def doit(self, **hints):
"""
Perform the integration using any hints given.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i
>>> Integral(x**i, (i, 1, 3)).doit()
x**3/log(x) - x/log(x)
See Also
========
sympy.integrals.trigonometry.trigintegrate
sympy.integrals.risch.heurisch
sympy.integrals.rationaltools.ratint
as_sum : Approximate the integral using a sum
"""
if not hints.get('integrals', True):
return self
deep = hints.get('deep', True)
meijerg = hints.get('meijerg', None)
conds = hints.get('conds', 'piecewise')
if conds not in ['separate', 'piecewise', 'none']:
raise ValueError('conds must be one of "separate", "piecewise", ' \
'"none", got: %s' % conds)
# check for the trivial case of equal upper and lower limits
if self.is_zero:
return S.Zero
# now compute and check the function
function = self.function
if deep:
function = function.doit(**hints)
if function.is_zero:
return S.Zero
# There is no trivial answer, so continue
undone_limits = []
ulj = set() # free symbols of any undone limits' upper and lower limits
for xab in self.limits:
# compute uli, the free symbols in the
# Upper and Lower limits of limit I
if len(xab) == 1:
uli = set(xab[:1])
elif len(xab) == 2:
uli = xab[1].free_symbols
elif len(xab) == 3:
uli = xab[1].free_symbols.union(xab[2].free_symbols)
# this integral can be done as long as there is no blocking
# limit that has been undone. An undone limit is blocking if
# it contains an integration variable that is in this limit's
# upper or lower free symbols or vice versa
if xab[0] in ulj or any(v[0] in uli for v in undone_limits):
undone_limits.append(xab)
ulj.update(uli)
continue
# There are a number of tradeoffs in using the meijer g method.
# It can sometimes be a lot faster than other methods, and
# sometimes slower. And there are certain types of integrals for
# which it is more likely to work than others.
# These heuristics are incorporated in deciding what integration
# methods to try, in what order.
# See the integrate() docstring for details.
def try_meijerg(function, xab):
ret = None
if len(xab) == 3 and meijerg is not False:
x, a, b = xab
try:
res = meijerint_definite(function, x, a, b)
except NotImplementedError:
from sympy.integrals.meijerint import _debug
_debug('NotImplementedError from meijerint_definite')
res = None
if res is not None:
f, cond = res
if conds == 'piecewise':
ret = Piecewise((f, cond),
(Integral(function, (x, a, b)), True))
elif conds == 'separate':
if len(self.limits) != 1:
raise ValueError('conds=separate not supported in ' \
'multiple integrals')
ret = f, cond
else:
ret = f
return ret
meijerg1 = meijerg
if len(xab) == 3 and xab[1].is_real and xab[2].is_real \
and not function.is_Poly and \
(xab[1].has(oo, -oo) or xab[2].has(oo, -oo)):
ret = try_meijerg(function, xab)
if ret is not None:
function = ret
continue
else:
meijerg1 = False
# If the special meijerg code did not succeed finding a definite
# integral, then the code using meijerint_indefinite will not either
# (it might find an antiderivative, but the answer is likely to be
# nonsensical).
# Thus if we are requested to only use meijer g-function methods,
# we give up at this stage. Otherwise we just disable g-function
# methods.
if meijerg1 is False and meijerg is True:
antideriv = None
else:
antideriv = self._eval_integral(function, xab[0], meijerg1)
if antideriv is None and meijerg1 is True:
ret = try_meijerg(function, xab)
if ret is not None:
function = ret
continue
if antideriv is None:
undone_limits.append(xab)
else:
if len(xab) == 1:
function = antideriv
else:
if len(xab) == 3:
x, a, b = xab
if len(xab) == 2:
x, b = xab
a = None
if deep:
if isinstance(a, Basic):
a = a.doit(**hints)
if isinstance(b, Basic):
b = b.doit(**hints)
if antideriv.is_Poly:
gens = list(antideriv.gens)
gens.remove(x)
antideriv = antideriv.as_expr()
function = antideriv._eval_interval(x, a, b)
function = Poly(function, *gens)
else:
try:
function = antideriv._eval_interval(x, a, b)
except NotImplementedError:
# This can happen if _eval_interval depends in a
# complicated way on limits that cannot be computed
undone_limits.append(xab)
if undone_limits:
return self.func(*([function] + undone_limits))
return function
def _eval_derivative(self, sym):
"""Evaluate the derivative of the current Integral object by
differentiating under the integral sign [1], using the Fundamental
Theorem of Calculus [2] when possible.
Whenever an Integral is encountered that is equivalent to zero or
has an integrand that is independent of the variable of integration
those integrals are performed. All others are returned as Integral
instances which can be resolved with doit() (provided they are integrable).
References:
[1] http://en.wikipedia.org/wiki/Differentiation_under_the_integral_sign
[2] http://en.wikipedia.org/wiki/Fundamental_theorem_of_calculus
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> i = Integral(x + y, y, (y, 1, x))
>>> i.diff(x)
Integral(x + y, (y, x)) + Integral(1, y, (y, 1, x))
>>> i.doit().diff(x) == i.diff(x).doit()
True
>>> i.diff(y)
0
The previous must be true since there is no y in the evaluated integral:
>>> i.free_symbols
set([x])
>>> i.doit()
2*x**3/3 - x/2 - 1/6
"""
# differentiate under the integral sign; we do not
# check for regularity conditions (TODO), see issue 1116
# get limits and the function
f, limits = self.function, list(self.limits)
# the order matters if variables of integration appear in the limits
# so work our way in from the outside to the inside.
limit = limits.pop(-1)
if len(limit) == 3:
x, a, b = limit
elif len(limit) == 2:
x, b = limit
a = None
else:
a = b = None
x = limit[0]
if limits: # f is the argument to an integral
f = Integral(f, *tuple(limits))
# assemble the pieces
def _do(f, ab):
dab_dsym = diff(ab, sym)
if not dab_dsym:
return S.Zero
if isinstance(f, Integral):
limits = [(x, x) if (len(l) == 1 and l[0] == x) else l
for l in f.limits]
f = Integral(f.function, *limits)
return f.subs(x, ab)*dab_dsym
rv = 0
if b is not None:
rv += _do(f, b)
if a is not None:
rv -= _do(f, a)
if len(limit) == 1 and sym == x:
# the dummy variable *is* also the real-world variable
arg = f
rv += arg
else:
# the dummy variable might match sym but it's
# only a dummy and the actual variable is determined
# by the limits, so mask off the variable of integration
# while differentiating
u = Dummy('u')
arg = f.subs(x, u).diff(sym).subs(u, x)
rv += Integral(arg, Tuple(x, a, b))
return rv
def _eval_integral(self, f, x, meijerg=None):
"""Calculate the anti-derivative to the function f(x).
This is a powerful function that should in theory be able to integrate
everything that can be integrated. If you find something, that it
doesn't, it is easy to implement it.
(1) Simple heuristics (based on pattern matching and integral table):
- most frequently used functions (e.g. polynomials)
- functions non-integrable by any of the following algorithms (e.g.
exp(-x**2))
(2) Integration of rational functions:
(a) using apart() - apart() is full partial fraction decomposition
procedure based on Bronstein-Salvy algorithm. It gives formal
decomposition with no polynomial factorization at all (so it's
fast and gives the most general results). However it needs an
implementation of the RootsOf class.
(b) using Trager's algorithm - possibly faster than (a) but needs
implementation :)
(3) Whichever implementation of pmInt (Mateusz, Kirill's or a
combination of both).
- this way we can handle efficiently huge class of elementary and
special functions
(4) Recursive Risch algorithm as described in Bronstein's integration
tutorial.
- this way we can handle those integrable functions for which (3)
fails
(5) Powerful heuristics based mostly on user defined rules.
- handle complicated, rarely used cases
"""
# if it is a poly(x) then let the polynomial integrate itself (fast)
#
# It is important to make this check first, otherwise the other code
# will return a sympy expression instead of a Polynomial.
#
# see Polynomial for details.
if isinstance(f, Poly) and not meijerg:
return f.integrate(x)
# Piecewise antiderivatives need to call special integrate.
if f.func is Piecewise:
return f._eval_integral(x)
# let's cut it short if `f` does not depend on `x`
if not f.has(x):
return f*x
# try to convert to poly(x) and then integrate if successful (fast)
poly = f.as_poly(x)
if poly is not None and not meijerg:
return poly.integrate().as_expr()
# since Integral(f=g1+g2+...) == Integral(g1) + Integral(g2) + ...
# we are going to handle Add terms separately,
# if `f` is not Add -- we only have one term
parts = []
args = Add.make_args(f)
for g in args:
coeff, g = g.as_independent(x)
# g(x) = const
if g is S.One and not meijerg:
parts.append(coeff*x)
continue
# g(x) = expr + O(x**n)
order_term = g.getO()
if order_term is not None:
h = self._eval_integral(g.removeO(), x)
if h is not None:
h_order_expr = self._eval_integral(order_term.expr, x)
if h_order_expr is not None:
h_order_term = order_term.func(h_order_expr, *order_term.variables)
parts.append(coeff*(h + h_order_term))
continue
# NOTE: if there is O(x**n) and we fail to integrate then there is
# no point in trying other methods because they will fail anyway.
return None
# c
# g(x) = (a*x+b)
if g.is_Pow and not g.exp.has(x) and not meijerg:
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
M = g.base.match(a*x + b)
if M is not None:
if g.exp == -1:
h = C.log(g.base)
else:
h = g.base**(g.exp + 1) / (g.exp + 1)
parts.append(coeff * h / M[a])
continue
# poly(x)
# g(x) = -------
# poly(x)
if g.is_rational_function(x) and not meijerg:
parts.append(coeff * ratint(g, x))
continue
if not meijerg:
# g(x) = Mul(trig)
h = trigintegrate(g, x)
if h is not None:
parts.append(coeff * h)
continue
# g(x) has at least a DiracDelta term
h = deltaintegrate(g, x)
if h is not None:
parts.append(coeff * h)
continue
if not meijerg:
# fall back to the more general algorithm
try:
h = heurisch(g, x, hints=[])
except PolynomialError:
# XXX: this exception means there is a bug in the
# implementation of heuristic Risch integration
# algorithm.
h = None
else:
h = None
if meijerg is not False and h is None:
# rewrite using G functions
try:
h = meijerint_indefinite(g, x)
except NotImplementedError:
from sympy.integrals.meijerint import _debug
_debug('NotImplementedError from meijerint_definite')
res = None
if h is not None:
parts.append(coeff * h)
continue
# if we failed maybe it was because we had
# a product that could have been expanded,
# so let's try an expansion of the whole
# thing before giving up; we don't try this
# out the outset because there are things
# that cannot be solved unless they are
# NOT expanded e.g., x**x*(1+log(x)). There
# should probably be a checker somewhere in this
# routine to look for such cases and try to do
# collection on the expressions if they are already
# in an expanded form
if not h and len(args) == 1:
f = f.expand(mul=True, deep=False)
if f.is_Add:
return self._eval_integral(f, x, meijerg)
if h is not None:
parts.append(coeff * h)
else:
return None
return Add(*parts)
def _eval_lseries(self, x):
for term in self.function.lseries(x):
yield integrate(term, *self.limits)
def _eval_nseries(self, x, n, logx):
terms, order = self.function.nseries(x, n=n, logx=logx).as_coeff_add(C.Order)
return integrate(terms, *self.limits) + Add(*order)*x
def _eval_subs(self, old, new):
"""
Substitute old with new in the integrand and the limits, but don't
change anything that is (or corresponds to) a dummy variable of
integration.
The normal substitution semantics -- traversing all arguments looking
for matching patterns -- should not be applied to the Integrals since
changing the integration variables should also entail a change in the
integration limits (which should be done with the transform method). So
this method just makes changes in the integrand and the limits.
Not all instances of a given variable are conceptually the same: the
first argument of the limit tuple with length greater than 1 and any
corresponding variable in the integrand are dummy variables while
every other symbol is a symbol that will be unchanged when the integral
is evaluated. For example, the dummy variables for ``i`` can be seen
as symbols with a preppended underscore:
>>> from sympy import Integral
>>> from sympy.abc import a, b, c, x, y
>>> i = Integral(a + x, (a, a, b))
>>> i.as_dummy()
Integral(_a + x, (_a, a, b))
If you want to change the lower limit to 1 there is no reason to
prohibit this since it is not conceptually related to the integration
variable, _a. Nor is there reason to disallow changing the b to 1.
If a second limit were added, however, as in:
>>> i = Integral(x + a, (a, a, b), (b, 1, 2))
the dummy variables become:
>>> i.as_dummy()
Integral(_a + x, (_a, a, _b), (_b, 1, 2))
Note that the ``b`` of the first limit is now a dummy variable since
``b`` is a dummy variable in the second limit.
The "evaluate at" form of an integral allows some flexibility in how
the integral will be treated by subs: if there is no second argument,
none of the symbols matching the integration symbol are considered to
be dummy variables, but if an explicit expression is given for a limit
then the usual interpretation of the integration symbol as a dummy
symbol applies:
>>> Integral(x).as_dummy() # implicit integration wrt x
Integral(x, x)
>>> Integral(x, x).as_dummy()
Integral(x, x)
>>> _.subs(x, 1)
Integral(1, x)
>>> i = Integral(x, (x, x))
>>> i.as_dummy()
Integral(_x, (_x, x))
>>> i.subs(x, 1)
Integral(x, (x, 1))
Summary: no variable of the integrand or limit can be the target of
substitution if it appears as a variable of integration in a limit
positioned to the right of it. The only exception is for a variable
that defines an indefinite integral limit (a single symbol): that
symbol *can* be replaced in the integrand.
>>> i = Integral(a + x, (a, a, 3), (b, x, c))
>>> i.free_symbols # only these can be changed
set([a, c, x])
>>> i.subs(a, c) # note that the variable of integration is unchanged
Integral(a + x, (a, c, 3), (b, x, c))
>>> i.subs(a + x, b) == i # there is no x + a, only x + <a>
True
>>> i.subs(x, y - c)
Integral(a - c + y, (a, a, 3), (b, -c + y, c))
"""
integrand, limits = self.function, self.limits
old_atoms = old.free_symbols
limits = list(limits)
dummies = set()
for i in xrange(-1, -len(limits) - 1, -1):
xab = limits[i]
if len(xab) == 1:
continue
if not dummies.intersection(old_atoms):
limits[i] = Tuple(xab[0],
*[l._subs(old, new) for l in xab[1:]])
dummies.add(xab[0])
if not dummies.intersection(old_atoms):
integrand = integrand.subs(old, new)
return Integral(integrand, *limits)
def as_sum(self, n, method="midpoint"):
"""
Approximates the integral by a sum.
method ... one of: left, right, midpoint
This is basically just the rectangle method [1], the only difference is
where the function value is taken in each interval.
[1] http://en.wikipedia.org/wiki/Rectangle_method
**method = midpoint**:
Uses the n-order midpoint rule to evaluate the integral.
Midpoint rule uses rectangles approximation for the given area (e.g.
definite integral) of the function with heights equal to the point on
the curve exactly in the middle of each interval (thus midpoint
method). See [1] for more information.
Examples
========
>>> from sympy import sqrt
>>> from sympy.abc import x
>>> from sympy.integrals import Integral
>>> e = Integral(sqrt(x**3+1), (x, 2, 10))
>>> e
Integral(sqrt(x**3 + 1), (x, 2, 10))
>>> e.as_sum(4, method="midpoint")
4*sqrt(7) + 6*sqrt(14) + 4*sqrt(86) + 2*sqrt(730)
>>> e.as_sum(4, method="midpoint").n()
124.164447891310
>>> e.n()
124.616199194723
**method=left**:
Uses the n-order rectangle rule to evaluate the integral, at each
interval the function value is taken at the left hand side of the
interval.
Examples
========
>>> from sympy import sqrt
>>> from sympy.abc import x
>>> e = Integral(sqrt(x**3+1), (x, 2, 10))
>>> e
Integral(sqrt(x**3 + 1), (x, 2, 10))
>>> e.as_sum(4, method="left")
6 + 2*sqrt(65) + 2*sqrt(217) + 6*sqrt(57)
>>> e.as_sum(4, method="left").n()
96.8853618335341
>>> e.n()
124.616199194723
See Also
========
Integral.doit : Perform the integration using any hints
"""
limits = self.limits
if len(limits) > 1:
raise NotImplementedError("Multidimensional midpoint rule not implemented yet")
else:
limit = limits[0]
if n <= 0:
raise ValueError("n must be > 0")
if n == oo:
raise NotImplementedError("Infinite summation not yet implemented")
sym, lower_limit, upper_limit = limit
dx = (upper_limit - lower_limit)/n
result = 0.
for i in range(n):
if method == "midpoint":
xi = lower_limit + i*dx + dx/2
elif method == "left":
xi = lower_limit + i*dx
elif method == "right":
xi = lower_limit + i*dx + dx
else:
raise NotImplementedError("Unknown method %s" % method)
result += self.function.subs(sym, xi)
return result*dx
@xthreaded
def integrate(*args, **kwargs):
"""integrate(f, var, ...)
Compute definite or indefinite integral of one or more variables
using Risch-Norman algorithm and table lookup. This procedure is
able to handle elementary algebraic and transcendental functions
and also a huge class of special functions, including Airy,
Bessel, Whittaker and Lambert.
var can be:
- a symbol -- indefinite integration
- a tuple (symbol, a) -- indefinite integration with result
given with `a` replacing `symbol`
- a tuple (symbol, a, b) -- definite integration
Several variables can be specified, in which case the result is
multiple integration. (If var is omitted and the integrand is
univariate, the indefinite integral in that variable will be performed.)
Indefinite integrals are returned without terms that are independent
of the integration variables. (see examples)
Definite improper integrals often entail delicate convergence
conditions. Pass conds='piecewise', 'separate' or 'none' to have
these returned, respectively, as a Piecewise function, as a separate
result (i.e. result will be a tuple), or not at all (default is
'piecewise').
**Strategy**
SymPy uses various approaches to integration. One method is to find
an antiderivative for the integrand, and then use the fundamental
theorem of calculus. Various functions are implemented to integrate
polynomial, rational and trigonometric functions, and integrands
containing DiracDelta terms. There is also a (very successful,
albeit somewhat slow) general implementation of the heuristic risch
algorithm. See the docstring of Integral._eval_integral() for more
details on computing the antiderivative using algebraic methods.
Another family of strategies comes from re-writing the integrand in
terms of so-called Meijer G-functions. Indefinite integrals of a
single G-function can always be computed, and the definite integral
of a product of two G-functions can be computed from zero to
infinity. Various strategies are implemented to rewrite integrands
as G-functions, and use this information to compute integrals (see
the ``meijerint`` module).
In general, the algebraic methods work best for computing
antiderivatives of (possibly complicated) combinations of elementary
functions. The G-function methods work best for computing definite
integrals from zero to infinity of moderately complicated
combinations of special functions, or indefinite integrals of very
simple combinations of special functions.
The strategy employed by the integration code is as follows:
- If computing a definite integral, and both limits are real,
and at least one limit is +- oo, try the G-function method of
definite integration first.
- Try to find an antiderivative, using all available methods, ordered
by performance (that is try fastest method first, slowest last; in
particular polynomial integration is tried first, meijer
g-functions second to last, and heuristic risch last).
- If still not successful, try G-functions irrespective of the
limits.
The option meijerg=True, False, None can be used to, respectively:
always use G-function methods and no others, never use G-function
methods, or use all available methods (in order as described above).
It defaults to None.
Examples
========
>>> from sympy import integrate, log, exp, oo
>>> from sympy.abc import a, x, y
>>> integrate(x*y, x)
x**2*y/2
>>> integrate(log(x), x)
x*log(x) - x
>>> integrate(log(x), (x, 1, a))
a*log(a) - a + 1
>>> integrate(x)
x**2/2
Terms that are independent of x are dropped by indefinite integration:
>>> from sympy import sqrt
>>> integrate(sqrt(1 + x), (x, 0, x))
2*(x + 1)**(3/2)/3 - 2/3
>>> integrate(sqrt(1 + x), x)
2*(x + 1)**(3/2)/3
>>> integrate(x*y)
Traceback (most recent call last):
...
ValueError: specify integration variables to integrate x*y
Note that ``integrate(x)`` syntax is meant only for convenience
in interactive sessions and should be avoided in library code.
>>> integrate(x**a*exp(-x), (x, 0, oo)) # same as conds='piecewise'
Piecewise((gamma(a + 1), -re(a) < 1), (Integral(x**a*exp(-x), (x, 0, oo)), True))
>>> integrate(x**a*exp(-x), (x, 0, oo), conds='none')
gamma(a + 1)
>>> integrate(x**a*exp(-x), (x, 0, oo), conds='separate')
(gamma(a + 1), -re(a) < 1)
See Also
========
Integral, Integral.doit
"""
meijerg = kwargs.pop('meijerg', None)
conds = kwargs.pop('conds', 'piecewise')
integral = Integral(*args, **kwargs)
if isinstance(integral, Integral):
return integral.doit(deep = False, meijerg = meijerg, conds = conds)
else:
return integral
@xthreaded
def line_integrate(field, curve, vars):
"""line_integrate(field, Curve, variables)
Compute the line integral.
Examples
========
>>> from sympy import Curve, line_integrate, E, ln
>>> from sympy.abc import x, y, t
>>> C = Curve([E**t + 1, E**t - 1], (t, 0, ln(2)))
>>> line_integrate(x + y, C, [x, y])
3*sqrt(2)
See Also
========
integrate, Integral
"""
F = sympify(field)
if not F:
raise ValueError("Expecting function specifying field as first argument.")
if not isinstance(curve, Curve):
raise ValueError("Expecting Curve entity as second argument.")
if not is_sequence(vars):
raise ValueError("Expecting ordered iterable for variables.")
if len(curve.functions) != len(vars):
raise ValueError("Field variable size does not match curve dimension.")
if curve.parameter in vars:
raise ValueError("Curve parameter clashes with field parameters.")
# Calculate derivatives for line parameter functions
# F(r) -> F(r(t)) and finally F(r(t)*r'(t))
Ft = F
dldt = 0
for i, var in enumerate(vars):
_f = curve.functions[i]
_dn = diff(_f, curve.parameter)
# ...arc length
dldt = dldt + (_dn * _dn)
Ft = Ft.subs(var, _f)
Ft = Ft * sqrt(dldt)
integral = Integral(Ft, curve.limits).doit(deep = False)
return integral
| srjoglekar246/sympy | sympy/integrals/integrals.py | Python | bsd-3-clause | 50,685 |
from sympy.polys.rings import ring
from sympy.polys.domains import ZZ, QQ, AlgebraicField
from sympy.polys.modulargcd import (
modgcd_univariate,
modgcd_bivariate,
_chinese_remainder_reconstruction_multivariate,
modgcd_multivariate,
_to_ZZ_poly,
_to_ANP_poly,
func_field_modgcd,
_func_field_modgcd_m)
from sympy import sqrt
def test_modgcd_univariate_integers():
R, x = ring("x", ZZ)
f, g = R.zero, R.zero
assert modgcd_univariate(f, g) == (0, 0, 0)
f, g = R.zero, x
assert modgcd_univariate(f, g) == (x, 0, 1)
assert modgcd_univariate(g, f) == (x, 1, 0)
f, g = R.zero, -x
assert modgcd_univariate(f, g) == (x, 0, -1)
assert modgcd_univariate(g, f) == (x, -1, 0)
f, g = 2*x, R(2)
assert modgcd_univariate(f, g) == (2, x, 1)
f, g = 2*x + 2, 6*x**2 - 6
assert modgcd_univariate(f, g) == (2*x + 2, 1, 3*x - 3)
f = x**4 + 8*x**3 + 21*x**2 + 22*x + 8
g = x**3 + 6*x**2 + 11*x + 6
h = x**2 + 3*x + 2
cff = x**2 + 5*x + 4
cfg = x + 3
assert modgcd_univariate(f, g) == (h, cff, cfg)
f = x**4 - 4
g = x**4 + 4*x**2 + 4
h = x**2 + 2
cff = x**2 - 2
cfg = x**2 + 2
assert modgcd_univariate(f, g) == (h, cff, cfg)
f = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5
g = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21
h = 1
cff = f
cfg = g
assert modgcd_univariate(f, g) == (h, cff, cfg)
f = - 352518131239247345597970242177235495263669787845475025293906825864749649589178600387510272*x**49 \
+ 46818041807522713962450042363465092040687472354933295397472942006618953623327997952*x**42 \
+ 378182690892293941192071663536490788434899030680411695933646320291525827756032*x**35 \
+ 112806468807371824947796775491032386836656074179286744191026149539708928*x**28 \
- 12278371209708240950316872681744825481125965781519138077173235712*x**21 \
+ 289127344604779611146960547954288113529690984687482920704*x**14 \
+ 19007977035740498977629742919480623972236450681*x**7 \
+ 311973482284542371301330321821976049
g = 365431878023781158602430064717380211405897160759702125019136*x**21 \
+ 197599133478719444145775798221171663643171734081650688*x**14 \
- 9504116979659010018253915765478924103928886144*x**7 \
- 311973482284542371301330321821976049
assert modgcd_univariate(f, f.diff(x))[0] == g
f = 1317378933230047068160*x + 2945748836994210856960
g = 120352542776360960*x + 269116466014453760
h = 120352542776360960*x + 269116466014453760
cff = 10946
cfg = 1
assert modgcd_univariate(f, g) == (h, cff, cfg)
def test_modgcd_bivariate_integers():
R, x, y = ring("x,y", ZZ)
f, g = R.zero, R.zero
assert modgcd_bivariate(f, g) == (0, 0, 0)
f, g = 2*x, R(2)
assert modgcd_bivariate(f, g) == (2, x, 1)
f, g = x + 2*y, x + y
assert modgcd_bivariate(f, g) == (1, f, g)
f, g = x**2 + 2*x*y + y**2, x**3 + y**3
assert modgcd_bivariate(f, g) == (x + y, x + y, x**2 - x*y + y**2)
f, g = x*y**2 + 2*x*y + x, x*y**3 + x
assert modgcd_bivariate(f, g) == (x*y + x, y + 1, y**2 - y + 1)
f, g = x**2*y**2 + x**2*y + 1, x*y**2 + x*y + 1
assert modgcd_bivariate(f, g) == (1, f, g)
f = 2*x*y**2 + 4*x*y + 2*x + y**2 + 2*y + 1
g = 2*x*y**3 + 2*x + y**3 + 1
assert modgcd_bivariate(f, g) == (2*x*y + 2*x + y + 1, y + 1, y**2 - y + 1)
f, g = 2*x**2 + 4*x + 2, x + 1
assert modgcd_bivariate(f, g) == (x + 1, 2*x + 2, 1)
f, g = x + 1, 2*x**2 + 4*x + 2
assert modgcd_bivariate(f, g) == (x + 1, 1, 2*x + 2)
f = 2*x**2 + 4*x*y - 2*x - 4*y
g = x**2 + x - 2
assert modgcd_bivariate(f, g) == (x - 1, 2*x + 4*y, x + 2)
f = 2*x**2 + 2*x*y - 3*x - 3*y
g = 4*x*y - 2*x + 4*y**2 - 2*y
assert modgcd_bivariate(f, g) == (x + y, 2*x - 3, 4*y - 2)
def test_chinese_remainder():
R, x, y = ring("x, y", ZZ)
p, q = 3, 5
hp = x**3*y - x**2 - 1
hq = -x**3*y - 2*x*y**2 + 2
hpq = _chinese_remainder_reconstruction_multivariate(hp, hq, p, q)
assert hpq.trunc_ground(p) == hp
assert hpq.trunc_ground(q) == hq
T, z = ring("z", R)
p, q = 3, 7
hp = (x*y + 1)*z**2 + x
hq = (x**2 - 3*y)*z + 2
hpq = _chinese_remainder_reconstruction_multivariate(hp, hq, p, q)
assert hpq.trunc_ground(p) == hp
assert hpq.trunc_ground(q) == hq
def test_modgcd_multivariate_integers():
R, x, y = ring("x,y", ZZ)
f, g = R.zero, R.zero
assert modgcd_multivariate(f, g) == (0, 0, 0)
f, g = 2*x**2 + 4*x + 2, x + 1
assert modgcd_multivariate(f, g) == (x + 1, 2*x + 2, 1)
f, g = x + 1, 2*x**2 + 4*x + 2
assert modgcd_multivariate(f, g) == (x + 1, 1, 2*x + 2)
f = 2*x**2 + 2*x*y - 3*x - 3*y
g = 4*x*y - 2*x + 4*y**2 - 2*y
assert modgcd_multivariate(f, g) == (x + y, 2*x - 3, 4*y - 2)
f, g = x*y**2 + 2*x*y + x, x*y**3 + x
assert modgcd_multivariate(f, g) == (x*y + x, y + 1, y**2 - y + 1)
f, g = x**2*y**2 + x**2*y + 1, x*y**2 + x*y + 1
assert modgcd_multivariate(f, g) == (1, f, g)
f = x**4 + 8*x**3 + 21*x**2 + 22*x + 8
g = x**3 + 6*x**2 + 11*x + 6
h = x**2 + 3*x + 2
cff = x**2 + 5*x + 4
cfg = x + 3
assert modgcd_multivariate(f, g) == (h, cff, cfg)
R, x, y, z, u = ring("x,y,z,u", ZZ)
f, g = x + y + z, -x - y - z - u
assert modgcd_multivariate(f, g) == (1, f, g)
f, g = u**2 + 2*u + 1, 2*u + 2
assert modgcd_multivariate(f, g) == (u + 1, u + 1, 2)
f, g = z**2*u**2 + 2*z**2*u + z**2 + z*u + z, u**2 + 2*u + 1
h, cff, cfg = u + 1, z**2*u + z**2 + z, u + 1
assert modgcd_multivariate(f, g) == (h, cff, cfg)
assert modgcd_multivariate(g, f) == (h, cfg, cff)
R, x, y, z = ring("x,y,z", ZZ)
f, g = x - y*z, x - y*z
assert modgcd_multivariate(f, g) == (x - y*z, 1, 1)
f, g, h = R.fateman_poly_F_1()
H, cff, cfg = modgcd_multivariate(f, g)
assert H == h and H*cff == f and H*cfg == g
R, x, y, z, u, v = ring("x,y,z,u,v", ZZ)
f, g, h = R.fateman_poly_F_1()
H, cff, cfg = modgcd_multivariate(f, g)
assert H == h and H*cff == f and H*cfg == g
R, x, y, z, u, v, a, b = ring("x,y,z,u,v,a,b", ZZ)
f, g, h = R.fateman_poly_F_1()
H, cff, cfg = modgcd_multivariate(f, g)
assert H == h and H*cff == f and H*cfg == g
R, x, y, z, u, v, a, b, c, d = ring("x,y,z,u,v,a,b,c,d", ZZ)
f, g, h = R.fateman_poly_F_1()
H, cff, cfg = modgcd_multivariate(f, g)
assert H == h and H*cff == f and H*cfg == g
R, x, y, z = ring("x,y,z", ZZ)
f, g, h = R.fateman_poly_F_2()
H, cff, cfg = modgcd_multivariate(f, g)
assert H == h and H*cff == f and H*cfg == g
f, g, h = R.fateman_poly_F_3()
H, cff, cfg = modgcd_multivariate(f, g)
assert H == h and H*cff == f and H*cfg == g
R, x, y, z, t = ring("x,y,z,t", ZZ)
f, g, h = R.fateman_poly_F_3()
H, cff, cfg = modgcd_multivariate(f, g)
assert H == h and H*cff == f and H*cfg == g
def test_to_ZZ_ANP_poly():
A = AlgebraicField(QQ, sqrt(2))
R, x = ring("x", A)
f = x*(sqrt(2) + 1)
T, x_, z_ = ring("x_, z_", ZZ)
f_ = x_*z_ + x_
assert _to_ZZ_poly(f, T) == f_
assert _to_ANP_poly(f_, R) == f
R, x, t, s = ring("x, t, s", A)
f = x*t**2 + x*s + sqrt(2)
D, t_, s_ = ring("t_, s_", ZZ)
T, x_, z_ = ring("x_, z_", D)
f_ = (t_**2 + s_)*x_ + z_
assert _to_ZZ_poly(f, T) == f_
assert _to_ANP_poly(f_, R) == f
def test_modgcd_algebraic_field():
A = AlgebraicField(QQ, sqrt(2))
R, x = ring("x", A)
one = A.one
f, g = 2*x, R(2)
assert func_field_modgcd(f, g) == (one, f, g)
f, g = 2*x, R(sqrt(2))
assert func_field_modgcd(f, g) == (one, f, g)
f, g = 2*x + 2, 6*x**2 - 6
assert func_field_modgcd(f, g) == (x + 1, R(2), 6*x - 6)
R, x, y = ring("x, y", A)
f, g = x + sqrt(2)*y, x + y
assert func_field_modgcd(f, g) == (one, f, g)
f, g = x*y + sqrt(2)*y**2, R(sqrt(2))*y
assert func_field_modgcd(f, g) == (y, x + sqrt(2)*y, R(sqrt(2)))
f, g = x**2 + 2*sqrt(2)*x*y + 2*y**2, x + sqrt(2)*y
assert func_field_modgcd(f, g) == (g, g, one)
A = AlgebraicField(QQ, sqrt(2), sqrt(3))
R, x, y, z = ring("x, y, z", A)
h = x**2*y**7 + sqrt(6)/21*z
f, g = h*(27*y**3 + 1), h*(y + x)
assert func_field_modgcd(f, g) == (h, 27*y**3+1, y+x)
h = x**13*y**3 + 1/2*x**10 + 1/sqrt(2)
f, g = h*(x + 1), h*sqrt(2)/sqrt(3)
assert func_field_modgcd(f, g) == (h, x + 1, R(sqrt(2)/sqrt(3)))
A = AlgebraicField(QQ, sqrt(2)**(-1)*sqrt(3))
R, x = ring("x", A)
f, g = x + 1, x - 1
assert func_field_modgcd(f, g) == (A.one, f, g)
# when func_field_modgcd suppors function fields, this test can be changed
def test_modgcd_func_field():
D, t = ring("t", ZZ)
R, x, z = ring("x, z", D)
minpoly = (z**2*t**2 + z**2*t - 1).drop(0)
f, g = x + 1, x - 1
assert _func_field_modgcd_m(f, g, minpoly) == R.one
| wxgeo/geophar | wxgeometrie/sympy/polys/tests/test_modulargcd.py | Python | gpl-2.0 | 9,007 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class VirtualNetworkGatewayConnectionsOperations(object):
"""VirtualNetworkGatewayConnectionsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2015-06-15".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2015-06-15"
self.config = config
def create_or_update(
self, resource_group_name, virtual_network_gateway_connection_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a virtual network gateway connection in the
specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the
virtual network gateway connection.
:type virtual_network_gateway_connection_name: str
:param parameters: Parameters supplied to the create or update virtual
network gateway connection operation.
:type parameters: :class:`VirtualNetworkGatewayConnection
<azure.mgmt.network.v2015_06_15.models.VirtualNetworkGatewayConnection>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`VirtualNetworkGatewayConnection
<azure.mgmt.network.v2015_06_15.models.VirtualNetworkGatewayConnection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VirtualNetworkGatewayConnection')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGatewayConnection', response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkGatewayConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, virtual_network_gateway_connection_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified virtual network gateway connection by resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the
virtual network gateway connection.
:type virtual_network_gateway_connection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VirtualNetworkGatewayConnection
<azure.mgmt.network.v2015_06_15.models.VirtualNetworkGatewayConnection>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGatewayConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, virtual_network_gateway_connection_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified virtual network Gateway connection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the
virtual network gateway connection.
:type virtual_network_gateway_connection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get_shared_key(
self, resource_group_name, connection_shared_key_name, custom_headers=None, raw=False, **operation_config):
"""The Get VirtualNetworkGatewayConnectionSharedKey operation retrieves
information about the specified virtual network gateway connection
shared key through Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param connection_shared_key_name: The virtual network gateway
connection shared key name.
:type connection_shared_key_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ConnectionSharedKeyResult
<azure.mgmt.network.v2015_06_15.models.ConnectionSharedKeyResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{connectionSharedKeyName}/sharedkey'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'connectionSharedKeyName': self._serialize.url("connection_shared_key_name", connection_shared_key_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConnectionSharedKeyResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""The List VirtualNetworkGatewayConnections operation retrieves all the
virtual network gateways connections created.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VirtualNetworkGatewayConnectionPaged
<azure.mgmt.network.v2015_06_15.models.VirtualNetworkGatewayConnectionPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualNetworkGatewayConnectionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualNetworkGatewayConnectionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def reset_shared_key(
self, resource_group_name, virtual_network_gateway_connection_name, key_length=None, custom_headers=None, raw=False, **operation_config):
"""The VirtualNetworkGatewayConnectionResetSharedKey operation resets the
virtual network gateway connection shared key for passed virtual
network gateway connection in the specified resource group through
Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The virtual network
gateway connection reset shared key Name.
:type virtual_network_gateway_connection_name: str
:param key_length: The virtual network connection reset shared key
length
:type key_length: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`ConnectionResetSharedKey
<azure.mgmt.network.v2015_06_15.models.ConnectionResetSharedKey>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.ConnectionResetSharedKey(key_length=key_length)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey/reset'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ConnectionResetSharedKey')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConnectionResetSharedKey', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def set_shared_key(
self, resource_group_name, virtual_network_gateway_connection_name, value=None, custom_headers=None, raw=False, **operation_config):
"""The Put VirtualNetworkGatewayConnectionSharedKey operation sets the
virtual network gateway connection shared key for passed virtual
network gateway connection in the specified resource group through
Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The virtual network
gateway connection name.
:type virtual_network_gateway_connection_name: str
:param value: The virtual network connection shared key value
:type value: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`ConnectionSharedKey
<azure.mgmt.network.v2015_06_15.models.ConnectionSharedKey>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.ConnectionSharedKey(value=value)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ConnectionSharedKey')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [201, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('ConnectionSharedKey', response)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionSharedKey', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
| SUSE/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2015_06_15/operations/virtual_network_gateway_connections_operations.py | Python | mit | 28,692 |
import boto3
from decEncoder import *
from DynamoTable import *
def handler(event, context):
"""Dynamo resource"""
buildingTable = DynamoTable('Buildings')
return getBuildingId(event, buildingTable)
"""Lambda handler function for /buildings/{buildingId} API call
Returns building with the buildingId specified in the path
or 'Building not found' error if buildingId not found by query search
"""
def getBuildingId(event, buildingTable):
"""If buildingId specified, assign it to variable,
use get_item to find it in building table
put it in JSON format and return
"""
if event.get('pathParameters'):
buildingIdVal = event.get('pathParameters').get('buildingId')
response = buildingTable.get(buildingId=buildingIdVal)
if response.get('Item'):
return {
'statusCode': 200,
'headers': {'Content-Type': 'application/json'},
'body': json.dumps(response.get('Item'), cls=DecimalEncoder)
}
else:
"""Error if not found"""
return {
'statusCode': 404,
'headers': {'Content-Type': 'application/json'},
'body': json.dumps({'error': 'Building not found'})
}
else:
"""No path parameters"""
return {
'statusCode': 400,
'headers': {'Content-Type': 'application/json'},
'body': json.dumps({'error': 'Path not found'})
}
| jcolekaplan/WNCYC | src/main/api/getBuildingId.py | Python | mit | 1,479 |
import hashlib
from datetime import datetime, timedelta
import struct
import argparse
def get_seed(seq_nr, date):
key = "\x01\x05\x19\x35"
seq_nr = struct.pack('<I', seq_nr)
year = struct.pack('<H', date.year)
month = struct.pack('<H', date.month)
day = struct.pack('<H', date.day)
m = hashlib.md5()
m.update(seq_nr)
m.update(year)
m.update(key.encode('latin1'))
m.update(month)
m.update(key.encode('latin1'))
m.update(day)
m.update(key.encode('latin1'))
return m.hexdigest()
def create_domain(seq_nr, date):
def generate_domain_part(seed, nr):
part = []
for i in range(nr-1):
edx = seed % 36
seed //= 36
if edx > 9:
char = chr(ord('a') + (edx-10))
else:
char = chr(edx + ord('0'))
part += char
if seed == 0:
break
part = part[::-1]
return ''.join(part)
def hex_to_int(seed):
indices = range(0, 8, 2)
data = [seed[x:x+2] for x in indices]
seed = ''.join(reversed(data))
return int(seed,16)
seed_value = get_seed(seq_nr, date)
domain = ""
for i in range(0,16,4):
seed = seed_value[i*2:i*2+8]
seed = hex_to_int(seed)
domain += generate_domain_part(seed, 8)
if seq_nr % 4 == 0:
domain += ".com"
elif seq_nr % 3 == 0:
domain += ".org"
elif seq_nr % 2 == 0:
domain += ".biz"
else:
domain += ".net"
return domain
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--date", help="date for which to generate domains")
parser.add_argument("-u", "--url", help="search this url in past domains")
parser.add_argument("-n", "--nr", help="nr of domains to generate")
args = parser.parse_args()
if args.date:
d = datetime.strptime(args.date, "%Y-%m-%d")
else:
d = datetime.today()
if args.nr:
nr_of_domains = int(args.nr)
else:
nr_of_domains = 1000
if args.url:
while True:
print("searching in {}".format(d.strftime("%Y-%m-%d")))
for seq_nr in range(1000):
domain = create_domain(seq_nr, d)
if domain == args.url:
print("\nfound it, domain nr {} at {}".format(seq_nr,
d.strftime("%Y-%m-%d")))
break
if domain == args.url:
break
d = d - timedelta(days=1)
else:
for seq_nr in range(nr_of_domains):
domain = create_domain(seq_nr, d)
print(domain)
| baderj/domain_generation_algorithms | newgoz/dga.py | Python | gpl-2.0 | 2,689 |
# Copyright (c) 2015 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
import os.path
from UM.Application import Application
from UM.Math.Color import Color
from UM.PluginRegistry import PluginRegistry
from UM.Event import Event
from UM.View.View import View
from UM.Scene.Iterator.BreadthFirstIterator import BreadthFirstIterator
from UM.View.RenderBatch import RenderBatch
from UM.View.GL.OpenGL import OpenGL
from . import XRayPass
## View used to display a see-through version of objects with errors highlighted.
class XRayView(View):
def __init__(self):
super().__init__()
self._xray_shader = None
self._xray_pass = None
self._xray_composite_shader = None
self._composite_pass = None
self._old_composite_shader = None
self._old_layer_bindings = None
def beginRendering(self):
scene = self.getController().getScene()
renderer = self.getRenderer()
if not self._xray_shader:
self._xray_shader = OpenGL.getInstance().createShaderProgram(os.path.join(PluginRegistry.getInstance().getPluginPath("XRayView"), "xray.shader"))
self._xray_shader.setUniformValue("u_color", Color(*Application.getInstance().getTheme().getColor("xray").getRgb()))
for node in BreadthFirstIterator(scene.getRoot()):
if not node.render(renderer):
if node.getMeshData() and node.isVisible():
renderer.queueNode(node,
shader = self._xray_shader,
type = RenderBatch.RenderType.Solid,
blend_mode = RenderBatch.BlendMode.Additive,
sort = -10,
state_setup_callback = lambda gl: gl.glDepthFunc(gl.GL_ALWAYS),
state_teardown_callback = lambda gl: gl.glDepthFunc(gl.GL_LESS)
)
def endRendering(self):
pass
def event(self, event):
if event.type == Event.ViewActivateEvent:
if not self._xray_pass:
# Currently the RenderPass constructor requires a size > 0
# This should be fixed in RenderPass's constructor.
self._xray_pass = XRayPass.XRayPass(1, 1)
self.getRenderer().addRenderPass(self._xray_pass)
if not self._xray_composite_shader:
self._xray_composite_shader = OpenGL.getInstance().createShaderProgram(os.path.join(PluginRegistry.getInstance().getPluginPath("XRayView"), "xray_composite.shader"))
theme = Application.getInstance().getTheme()
self._xray_composite_shader.setUniformValue("u_background_color", Color(*theme.getColor("viewport_background").getRgb()))
self._xray_composite_shader.setUniformValue("u_error_color", Color(*theme.getColor("xray_error").getRgb()))
self._xray_composite_shader.setUniformValue("u_outline_color", Color(*theme.getColor("model_selection_outline").getRgb()))
if not self._composite_pass:
self._composite_pass = self.getRenderer().getRenderPass("composite")
self._old_layer_bindings = self._composite_pass.getLayerBindings()
self._composite_pass.setLayerBindings(["default", "selection", "xray"])
self._old_composite_shader = self._composite_pass.getCompositeShader()
self._composite_pass.setCompositeShader(self._xray_composite_shader)
if event.type == Event.ViewDeactivateEvent:
self._composite_pass.setLayerBindings(self._old_layer_bindings)
self._composite_pass.setCompositeShader(self._old_composite_shader)
| ynotstartups/Wanhao | plugins/XRayView/XRayView.py | Python | agpl-3.0 | 3,779 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-12 22:20
from __future__ import unicode_literals
import autoslug.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('semesterpage', '0008_auto_20160812_2305'),
]
operations = [
migrations.AlterField(
model_name='mainprofile',
name='slug',
field=autoslug.fields.AutoSlugField(always_update=True, editable=False, populate_from='display_name', unique_with=('study_program',)),
),
migrations.AlterField(
model_name='studyprogram',
name='slug',
field=autoslug.fields.AutoSlugField(always_update=True, editable=False, populate_from='display_name', unique=True),
),
]
| afriestad/WikiLinks | semesterpage/migrations/0009_auto_20160813_0020.py | Python | mit | 803 |
# -*- coding: utf-8 -*-
# utils.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Utils to help in the setup process
"""
import os
import re
import sys
def is_develop_mode():
"""
Returns True if we're calling the setup script using the argument for
setuptools development mode.
This avoids messing up with dependency pinning and order, the
responsibility of installing the leap dependencies is left to the
developer.
"""
args = sys.argv
devflags = "setup.py", "develop"
if (args[0], args[1]) == devflags:
return True
return False
def get_reqs_from_files(reqfiles):
"""
Returns the contents of the top requirement file listed as a
string list with the lines.
@param reqfiles: requirement files to parse
@type reqfiles: list of str
"""
for reqfile in reqfiles:
if os.path.isfile(reqfile):
return open(reqfile, 'r').read().split('\n')
def parse_requirements(reqfiles=['requirements.txt',
'requirements.pip',
'pkg/requirements.pip']):
"""
Parses the requirement files provided.
The passed reqfiles list is a list of possible locations to try, the
function will return the contents of the first path found.
Checks the value of LEAP_VENV_SKIP_PYSIDE to see if it should
return PySide as a dep or not. Don't set, or set to 0 if you want
to install it through pip.
@param reqfiles: requirement files to parse
@type reqfiles: list of str
"""
requirements = []
skip_pyside = os.getenv("LEAP_VENV_SKIP_PYSIDE", "0") != "0"
for line in get_reqs_from_files(reqfiles):
# -e git://foo.bar/baz/master#egg=foobar
if re.match(r'\s*-e\s+', line):
pass
# do not try to do anything with externals on vcs
# requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1',
# line))
# http://foo.bar/baz/foobar/zipball/master#egg=foobar
elif re.match(r'\s*https?:', line):
requirements.append(re.sub(r'\s*https?:.*#egg=(.*)$', r'\1',
line))
# -f lines are for index locations, and don't get used here
elif re.match(r'\s*-f\s+', line):
pass
# argparse is part of the standard library starting with 2.7
# adding it to the requirements list screws distro installs
elif line == 'argparse' and sys.version_info >= (2, 7):
pass
elif line == 'PySide' and skip_pyside:
pass
# do not include comments
elif line.lstrip().startswith('#'):
pass
else:
if line != '':
requirements.append(line)
return requirements
| leapcode/leap_mail | pkg/utils.py | Python | gpl-3.0 | 3,401 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from numpy import array
import warnings
from pyspark import RDD, since
from pyspark.streaming.dstream import DStream
from pyspark.mllib.common import callMLlibFunc, _py2java, _java2py, inherit_doc
from pyspark.mllib.linalg import SparseVector, Vectors, _convert_to_vector
from pyspark.mllib.util import Saveable, Loader
__all__ = ['LabeledPoint', 'LinearModel',
'LinearRegressionModel', 'LinearRegressionWithSGD',
'RidgeRegressionModel', 'RidgeRegressionWithSGD',
'LassoModel', 'LassoWithSGD', 'IsotonicRegressionModel',
'IsotonicRegression', 'StreamingLinearAlgorithm',
'StreamingLinearRegressionWithSGD']
class LabeledPoint(object):
"""
Class that represents the features and labels of a data point.
:param label:
Label for this data point.
:param features:
Vector of features for this point (NumPy array, list,
pyspark.mllib.linalg.SparseVector, or scipy.sparse column matrix).
Note: 'label' and 'features' are accessible as class attributes.
.. versionadded:: 1.0.0
"""
def __init__(self, label, features):
self.label = float(label)
self.features = _convert_to_vector(features)
def __reduce__(self):
return (LabeledPoint, (self.label, self.features))
def __str__(self):
return "(" + ",".join((str(self.label), str(self.features))) + ")"
def __repr__(self):
return "LabeledPoint(%s, %s)" % (self.label, self.features)
class LinearModel(object):
"""
A linear model that has a vector of coefficients and an intercept.
:param weights:
Weights computed for every feature.
:param intercept:
Intercept computed for this model.
.. versionadded:: 0.9.0
"""
def __init__(self, weights, intercept):
self._coeff = _convert_to_vector(weights)
self._intercept = float(intercept)
@property
@since("1.0.0")
def weights(self):
"""Weights computed for every feature."""
return self._coeff
@property
@since("1.0.0")
def intercept(self):
"""Intercept computed for this model."""
return self._intercept
def __repr__(self):
return "(weights=%s, intercept=%r)" % (self._coeff, self._intercept)
@inherit_doc
class LinearRegressionModelBase(LinearModel):
"""A linear regression model.
>>> lrmb = LinearRegressionModelBase(np.array([1.0, 2.0]), 0.1)
>>> abs(lrmb.predict(np.array([-1.03, 7.777])) - 14.624) < 1e-6
True
>>> abs(lrmb.predict(SparseVector(2, {0: -1.03, 1: 7.777})) - 14.624) < 1e-6
True
.. versionadded:: 0.9.0
"""
@since("0.9.0")
def predict(self, x):
"""
Predict the value of the dependent variable given a vector or
an RDD of vectors containing values for the independent variables.
"""
if isinstance(x, RDD):
return x.map(self.predict)
x = _convert_to_vector(x)
return self.weights.dot(x) + self.intercept
@inherit_doc
class LinearRegressionModel(LinearRegressionModelBase):
"""A linear regression model derived from a least-squares fit.
>>> from pyspark.mllib.regression import LabeledPoint
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(3.0, [2.0]),
... LabeledPoint(2.0, [3.0])
... ]
>>> lrm = LinearRegressionWithSGD.train(sc.parallelize(data), iterations=10,
... initialWeights=np.array([1.0]))
>>> abs(lrm.predict(np.array([0.0])) - 0) < 0.5
True
>>> abs(lrm.predict(np.array([1.0])) - 1) < 0.5
True
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
True
>>> abs(lrm.predict(sc.parallelize([[1.0]])).collect()[0] - 1) < 0.5
True
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> lrm.save(sc, path)
>>> sameModel = LinearRegressionModel.load(sc, path)
>>> abs(sameModel.predict(np.array([0.0])) - 0) < 0.5
True
>>> abs(sameModel.predict(np.array([1.0])) - 1) < 0.5
True
>>> abs(sameModel.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except:
... pass
>>> data = [
... LabeledPoint(0.0, SparseVector(1, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(1, {0: 1.0})),
... LabeledPoint(3.0, SparseVector(1, {0: 2.0})),
... LabeledPoint(2.0, SparseVector(1, {0: 3.0}))
... ]
>>> lrm = LinearRegressionWithSGD.train(sc.parallelize(data), iterations=10,
... initialWeights=array([1.0]))
>>> abs(lrm.predict(array([0.0])) - 0) < 0.5
True
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
True
>>> lrm = LinearRegressionWithSGD.train(sc.parallelize(data), iterations=10, step=1.0,
... miniBatchFraction=1.0, initialWeights=array([1.0]), regParam=0.1, regType="l2",
... intercept=True, validateData=True)
>>> abs(lrm.predict(array([0.0])) - 0) < 0.5
True
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
True
.. versionadded:: 0.9.0
"""
@since("1.4.0")
def save(self, sc, path):
"""Save a LinearRegressionModel."""
java_model = sc._jvm.org.apache.spark.mllib.regression.LinearRegressionModel(
_py2java(sc, self._coeff), self.intercept)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since("1.4.0")
def load(cls, sc, path):
"""Load a LinearRegressionModel."""
java_model = sc._jvm.org.apache.spark.mllib.regression.LinearRegressionModel.load(
sc._jsc.sc(), path)
weights = _java2py(sc, java_model.weights())
intercept = java_model.intercept()
model = LinearRegressionModel(weights, intercept)
return model
# train_func should take two parameters, namely data and initial_weights, and
# return the result of a call to the appropriate JVM stub.
# _regression_train_wrapper is responsible for setup and error checking.
def _regression_train_wrapper(train_func, modelClass, data, initial_weights):
from pyspark.mllib.classification import LogisticRegressionModel
first = data.first()
if not isinstance(first, LabeledPoint):
raise TypeError("data should be an RDD of LabeledPoint, but got %s" % type(first))
if initial_weights is None:
initial_weights = [0.0] * len(data.first().features)
if (modelClass == LogisticRegressionModel):
weights, intercept, numFeatures, numClasses = train_func(
data, _convert_to_vector(initial_weights))
return modelClass(weights, intercept, numFeatures, numClasses)
else:
weights, intercept = train_func(data, _convert_to_vector(initial_weights))
return modelClass(weights, intercept)
class LinearRegressionWithSGD(object):
"""
.. versionadded:: 0.9.0
.. note:: Deprecated in 2.0.0. Use ml.regression.LinearRegression.
"""
@classmethod
@since("0.9.0")
def train(cls, data, iterations=100, step=1.0, miniBatchFraction=1.0,
initialWeights=None, regParam=0.0, regType=None, intercept=False,
validateData=True, convergenceTol=0.001):
"""
Train a linear regression model using Stochastic Gradient
Descent (SGD). This solves the least squares regression
formulation
f(weights) = 1/(2n) ||A weights - y||^2
which is the mean squared error. Here the data matrix has n rows,
and the input RDD holds the set of rows of A, each with its
corresponding right hand side label y.
See also the documentation for the precise formulation.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param step:
The step parameter used in SGD.
(default: 1.0)
:param miniBatchFraction:
Fraction of data to be used for each SGD iteration.
(default: 1.0)
:param initialWeights:
The initial weights.
(default: None)
:param regParam:
The regularizer parameter.
(default: 0.0)
:param regType:
The type of regularizer used for training our model.
Supported values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization
- None for no regularization (default)
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e., whether bias
features are activated or not).
(default: False)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param convergenceTol:
A condition which decides iteration termination.
(default: 0.001)
"""
warnings.warn("Deprecated in 2.0.0. Use ml.regression.LinearRegression.")
def train(rdd, i):
return callMLlibFunc("trainLinearRegressionModelWithSGD", rdd, int(iterations),
float(step), float(miniBatchFraction), i, float(regParam),
regType, bool(intercept), bool(validateData),
float(convergenceTol))
return _regression_train_wrapper(train, LinearRegressionModel, data, initialWeights)
@inherit_doc
class LassoModel(LinearRegressionModelBase):
"""A linear regression model derived from a least-squares fit with
an l_1 penalty term.
>>> from pyspark.mllib.regression import LabeledPoint
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(3.0, [2.0]),
... LabeledPoint(2.0, [3.0])
... ]
>>> lrm = LassoWithSGD.train(sc.parallelize(data), iterations=10, initialWeights=array([1.0]))
>>> abs(lrm.predict(np.array([0.0])) - 0) < 0.5
True
>>> abs(lrm.predict(np.array([1.0])) - 1) < 0.5
True
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
True
>>> abs(lrm.predict(sc.parallelize([[1.0]])).collect()[0] - 1) < 0.5
True
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> lrm.save(sc, path)
>>> sameModel = LassoModel.load(sc, path)
>>> abs(sameModel.predict(np.array([0.0])) - 0) < 0.5
True
>>> abs(sameModel.predict(np.array([1.0])) - 1) < 0.5
True
>>> abs(sameModel.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except:
... pass
>>> data = [
... LabeledPoint(0.0, SparseVector(1, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(1, {0: 1.0})),
... LabeledPoint(3.0, SparseVector(1, {0: 2.0})),
... LabeledPoint(2.0, SparseVector(1, {0: 3.0}))
... ]
>>> lrm = LinearRegressionWithSGD.train(sc.parallelize(data), iterations=10,
... initialWeights=array([1.0]))
>>> abs(lrm.predict(np.array([0.0])) - 0) < 0.5
True
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
True
>>> lrm = LassoWithSGD.train(sc.parallelize(data), iterations=10, step=1.0,
... regParam=0.01, miniBatchFraction=1.0, initialWeights=array([1.0]), intercept=True,
... validateData=True)
>>> abs(lrm.predict(np.array([0.0])) - 0) < 0.5
True
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
True
.. versionadded:: 0.9.0
"""
@since("1.4.0")
def save(self, sc, path):
"""Save a LassoModel."""
java_model = sc._jvm.org.apache.spark.mllib.regression.LassoModel(
_py2java(sc, self._coeff), self.intercept)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since("1.4.0")
def load(cls, sc, path):
"""Load a LassoModel."""
java_model = sc._jvm.org.apache.spark.mllib.regression.LassoModel.load(
sc._jsc.sc(), path)
weights = _java2py(sc, java_model.weights())
intercept = java_model.intercept()
model = LassoModel(weights, intercept)
return model
class LassoWithSGD(object):
"""
.. versionadded:: 0.9.0
.. note:: Deprecated in 2.0.0. Use ml.regression.LinearRegression with elasticNetParam = 1.0.
Note the default regParam is 0.01 for LassoWithSGD, but is 0.0 for LinearRegression.
"""
@classmethod
@since("0.9.0")
def train(cls, data, iterations=100, step=1.0, regParam=0.01,
miniBatchFraction=1.0, initialWeights=None, intercept=False,
validateData=True, convergenceTol=0.001):
"""
Train a regression model with L1-regularization using Stochastic
Gradient Descent. This solves the l1-regularized least squares
regression formulation
f(weights) = 1/(2n) ||A weights - y||^2 + regParam ||weights||_1
Here the data matrix has n rows, and the input RDD holds the set
of rows of A, each with its corresponding right hand side label y.
See also the documentation for the precise formulation.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param step:
The step parameter used in SGD.
(default: 1.0)
:param regParam:
The regularizer parameter.
(default: 0.01)
:param miniBatchFraction:
Fraction of data to be used for each SGD iteration.
(default: 1.0)
:param initialWeights:
The initial weights.
(default: None)
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e. whether bias
features are activated or not).
(default: False)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param convergenceTol:
A condition which decides iteration termination.
(default: 0.001)
"""
warnings.warn(
"Deprecated in 2.0.0. Use ml.regression.LinearRegression with elasticNetParam = 1.0. "
"Note the default regParam is 0.01 for LassoWithSGD, but is 0.0 for LinearRegression.")
def train(rdd, i):
return callMLlibFunc("trainLassoModelWithSGD", rdd, int(iterations), float(step),
float(regParam), float(miniBatchFraction), i, bool(intercept),
bool(validateData), float(convergenceTol))
return _regression_train_wrapper(train, LassoModel, data, initialWeights)
@inherit_doc
class RidgeRegressionModel(LinearRegressionModelBase):
"""A linear regression model derived from a least-squares fit with
an l_2 penalty term.
>>> from pyspark.mllib.regression import LabeledPoint
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(3.0, [2.0]),
... LabeledPoint(2.0, [3.0])
... ]
>>> lrm = RidgeRegressionWithSGD.train(sc.parallelize(data), iterations=10,
... initialWeights=array([1.0]))
>>> abs(lrm.predict(np.array([0.0])) - 0) < 0.5
True
>>> abs(lrm.predict(np.array([1.0])) - 1) < 0.5
True
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
True
>>> abs(lrm.predict(sc.parallelize([[1.0]])).collect()[0] - 1) < 0.5
True
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> lrm.save(sc, path)
>>> sameModel = RidgeRegressionModel.load(sc, path)
>>> abs(sameModel.predict(np.array([0.0])) - 0) < 0.5
True
>>> abs(sameModel.predict(np.array([1.0])) - 1) < 0.5
True
>>> abs(sameModel.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
True
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except:
... pass
>>> data = [
... LabeledPoint(0.0, SparseVector(1, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(1, {0: 1.0})),
... LabeledPoint(3.0, SparseVector(1, {0: 2.0})),
... LabeledPoint(2.0, SparseVector(1, {0: 3.0}))
... ]
>>> lrm = LinearRegressionWithSGD.train(sc.parallelize(data), iterations=10,
... initialWeights=array([1.0]))
>>> abs(lrm.predict(np.array([0.0])) - 0) < 0.5
True
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
True
>>> lrm = RidgeRegressionWithSGD.train(sc.parallelize(data), iterations=10, step=1.0,
... regParam=0.01, miniBatchFraction=1.0, initialWeights=array([1.0]), intercept=True,
... validateData=True)
>>> abs(lrm.predict(np.array([0.0])) - 0) < 0.5
True
>>> abs(lrm.predict(SparseVector(1, {0: 1.0})) - 1) < 0.5
True
.. versionadded:: 0.9.0
"""
@since("1.4.0")
def save(self, sc, path):
"""Save a RidgeRegressionMode."""
java_model = sc._jvm.org.apache.spark.mllib.regression.RidgeRegressionModel(
_py2java(sc, self._coeff), self.intercept)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since("1.4.0")
def load(cls, sc, path):
"""Load a RidgeRegressionMode."""
java_model = sc._jvm.org.apache.spark.mllib.regression.RidgeRegressionModel.load(
sc._jsc.sc(), path)
weights = _java2py(sc, java_model.weights())
intercept = java_model.intercept()
model = RidgeRegressionModel(weights, intercept)
return model
class RidgeRegressionWithSGD(object):
"""
.. versionadded:: 0.9.0
.. note:: Deprecated in 2.0.0. Use ml.regression.LinearRegression with elasticNetParam = 0.0.
Note the default regParam is 0.01 for RidgeRegressionWithSGD, but is 0.0 for
LinearRegression.
"""
@classmethod
@since("0.9.0")
def train(cls, data, iterations=100, step=1.0, regParam=0.01,
miniBatchFraction=1.0, initialWeights=None, intercept=False,
validateData=True, convergenceTol=0.001):
"""
Train a regression model with L2-regularization using Stochastic
Gradient Descent. This solves the l2-regularized least squares
regression formulation
f(weights) = 1/(2n) ||A weights - y||^2 + regParam/2 ||weights||^2
Here the data matrix has n rows, and the input RDD holds the set
of rows of A, each with its corresponding right hand side label y.
See also the documentation for the precise formulation.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param step:
The step parameter used in SGD.
(default: 1.0)
:param regParam:
The regularizer parameter.
(default: 0.01)
:param miniBatchFraction:
Fraction of data to be used for each SGD iteration.
(default: 1.0)
:param initialWeights:
The initial weights.
(default: None)
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e. whether bias
features are activated or not).
(default: False)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param convergenceTol:
A condition which decides iteration termination.
(default: 0.001)
"""
warnings.warn(
"Deprecated in 2.0.0. Use ml.regression.LinearRegression with elasticNetParam = 0.0. "
"Note the default regParam is 0.01 for RidgeRegressionWithSGD, but is 0.0 for "
"LinearRegression.")
def train(rdd, i):
return callMLlibFunc("trainRidgeModelWithSGD", rdd, int(iterations), float(step),
float(regParam), float(miniBatchFraction), i, bool(intercept),
bool(validateData), float(convergenceTol))
return _regression_train_wrapper(train, RidgeRegressionModel, data, initialWeights)
class IsotonicRegressionModel(Saveable, Loader):
"""
Regression model for isotonic regression.
:param boundaries:
Array of boundaries for which predictions are known. Boundaries
must be sorted in increasing order.
:param predictions:
Array of predictions associated to the boundaries at the same
index. Results of isotonic regression and therefore monotone.
:param isotonic:
Indicates whether this is isotonic or antitonic.
>>> data = [(1, 0, 1), (2, 1, 1), (3, 2, 1), (1, 3, 1), (6, 4, 1), (17, 5, 1), (16, 6, 1)]
>>> irm = IsotonicRegression.train(sc.parallelize(data))
>>> irm.predict(3)
2.0
>>> irm.predict(5)
16.5
>>> irm.predict(sc.parallelize([3, 5])).collect()
[2.0, 16.5]
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> irm.save(sc, path)
>>> sameModel = IsotonicRegressionModel.load(sc, path)
>>> sameModel.predict(3)
2.0
>>> sameModel.predict(5)
16.5
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
.. versionadded:: 1.4.0
"""
def __init__(self, boundaries, predictions, isotonic):
self.boundaries = boundaries
self.predictions = predictions
self.isotonic = isotonic
@since("1.4.0")
def predict(self, x):
"""
Predict labels for provided features.
Using a piecewise linear function.
1) If x exactly matches a boundary then associated prediction
is returned. In case there are multiple predictions with the
same boundary then one of them is returned. Which one is
undefined (same as java.util.Arrays.binarySearch).
2) If x is lower or higher than all boundaries then first or
last prediction is returned respectively. In case there are
multiple predictions with the same boundary then the lowest
or highest is returned respectively.
3) If x falls between two values in boundary array then
prediction is treated as piecewise linear function and
interpolated value is returned. In case there are multiple
values with the same boundary then the same rules as in 2)
are used.
:param x:
Feature or RDD of Features to be labeled.
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
return np.interp(x, self.boundaries, self.predictions)
@since("1.4.0")
def save(self, sc, path):
"""Save an IsotonicRegressionModel."""
java_boundaries = _py2java(sc, self.boundaries.tolist())
java_predictions = _py2java(sc, self.predictions.tolist())
java_model = sc._jvm.org.apache.spark.mllib.regression.IsotonicRegressionModel(
java_boundaries, java_predictions, self.isotonic)
java_model.save(sc._jsc.sc(), path)
@classmethod
@since("1.4.0")
def load(cls, sc, path):
"""Load an IsotonicRegressionModel."""
java_model = sc._jvm.org.apache.spark.mllib.regression.IsotonicRegressionModel.load(
sc._jsc.sc(), path)
py_boundaries = _java2py(sc, java_model.boundaryVector()).toArray()
py_predictions = _java2py(sc, java_model.predictionVector()).toArray()
return IsotonicRegressionModel(py_boundaries, py_predictions, java_model.isotonic)
class IsotonicRegression(object):
"""
Isotonic regression.
Currently implemented using parallelized pool adjacent violators
algorithm. Only univariate (single feature) algorithm supported.
Sequential PAV implementation based on:
Tibshirani, Ryan J., Holger Hoefling, and Robert Tibshirani.
"Nearly-isotonic regression." Technometrics 53.1 (2011): 54-61.
Available from http://www.stat.cmu.edu/~ryantibs/papers/neariso.pdf
Sequential PAV parallelization based on:
Kearsley, Anthony J., Richard A. Tapia, and Michael W. Trosset.
"An approach to parallelizing isotonic regression."
Applied Mathematics and Parallel Computing. Physica-Verlag HD, 1996. 141-147.
Available from http://softlib.rice.edu/pub/CRPC-TRs/reports/CRPC-TR96640.pdf
See `Isotonic regression (Wikipedia) <http://en.wikipedia.org/wiki/Isotonic_regression>`_.
.. versionadded:: 1.4.0
"""
@classmethod
@since("1.4.0")
def train(cls, data, isotonic=True):
"""
Train an isotonic regression model on the given data.
:param data:
RDD of (label, feature, weight) tuples.
:param isotonic:
Whether this is isotonic (which is default) or antitonic.
(default: True)
"""
boundaries, predictions = callMLlibFunc("trainIsotonicRegressionModel",
data.map(_convert_to_vector), bool(isotonic))
return IsotonicRegressionModel(boundaries.toArray(), predictions.toArray(), isotonic)
class StreamingLinearAlgorithm(object):
"""
Base class that has to be inherited by any StreamingLinearAlgorithm.
Prevents reimplementation of methods predictOn and predictOnValues.
.. versionadded:: 1.5.0
"""
def __init__(self, model):
self._model = model
@since("1.5.0")
def latestModel(self):
"""
Returns the latest model.
"""
return self._model
def _validate(self, dstream):
if not isinstance(dstream, DStream):
raise TypeError(
"dstream should be a DStream object, got %s" % type(dstream))
if not self._model:
raise ValueError(
"Model must be intialized using setInitialWeights")
@since("1.5.0")
def predictOn(self, dstream):
"""
Use the model to make predictions on batches of data from a
DStream.
:return:
DStream containing predictions.
"""
self._validate(dstream)
return dstream.map(lambda x: self._model.predict(x))
@since("1.5.0")
def predictOnValues(self, dstream):
"""
Use the model to make predictions on the values of a DStream and
carry over its keys.
:return:
DStream containing the input keys and the predictions as values.
"""
self._validate(dstream)
return dstream.mapValues(lambda x: self._model.predict(x))
@inherit_doc
class StreamingLinearRegressionWithSGD(StreamingLinearAlgorithm):
"""
Train or predict a linear regression model on streaming data.
Training uses Stochastic Gradient Descent to update the model
based on each new batch of incoming data from a DStream
(see `LinearRegressionWithSGD` for model equation).
Each batch of data is assumed to be an RDD of LabeledPoints.
The number of data points per batch can vary, but the number
of features must be constant. An initial weight vector must
be provided.
:param stepSize:
Step size for each iteration of gradient descent.
(default: 0.1)
:param numIterations:
Number of iterations run for each batch of data.
(default: 50)
:param miniBatchFraction:
Fraction of each batch of data to use for updates.
(default: 1.0)
:param convergenceTol:
Value used to determine when to terminate iterations.
(default: 0.001)
.. versionadded:: 1.5.0
"""
def __init__(self, stepSize=0.1, numIterations=50, miniBatchFraction=1.0, convergenceTol=0.001):
self.stepSize = stepSize
self.numIterations = numIterations
self.miniBatchFraction = miniBatchFraction
self.convergenceTol = convergenceTol
self._model = None
super(StreamingLinearRegressionWithSGD, self).__init__(
model=self._model)
@since("1.5.0")
def setInitialWeights(self, initialWeights):
"""
Set the initial value of weights.
This must be set before running trainOn and predictOn
"""
initialWeights = _convert_to_vector(initialWeights)
self._model = LinearRegressionModel(initialWeights, 0)
return self
@since("1.5.0")
def trainOn(self, dstream):
"""Train the model on the incoming dstream."""
self._validate(dstream)
def update(rdd):
# LinearRegressionWithSGD.train raises an error for an empty RDD.
if not rdd.isEmpty():
self._model = LinearRegressionWithSGD.train(
rdd, self.numIterations, self.stepSize,
self.miniBatchFraction, self._model.weights,
intercept=self._model.intercept, convergenceTol=self.convergenceTol)
dstream.foreachRDD(update)
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.mllib.regression
globs = pyspark.mllib.regression.__dict__.copy()
spark = SparkSession.builder\
.master("local[2]")\
.appName("mllib.regression tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| TK-TarunW/ecosystem | spark-2.0.2-bin-hadoop2.7/python/pyspark/mllib/regression.py | Python | apache-2.0 | 30,567 |
import Adafruit_DHT
from datetime import datetime
from elasticsearch import Elasticsearch
import RPi.GPIO as GPIO
import sys
import util
DEVICE = 'raspberrypi1'
PIN = 20
SENSOR = Adafruit_DHT.DHT22
BLUE_LED = 19
GREEN_LED = 26
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(GREEN_LED, GPIO.OUT)
GPIO.setup(BLUE_LED, GPIO.OUT)
es = None
def get_temperature_mapping_body():
mapping = {
'_timestamp' : {
'enabled' : True,
'store': True
},
'properties': {
'device': { 'type': 'string' },
'place': {'type': 'string'},
'temperature': {'type': 'double'},
'timestamp': {'type': 'long'}
}
}
return mapping
def get_humidity_mapping_body():
mapping = {
'_timestamp' : {
'enabled' : True,
'store': True
},
'properties': {
'device': { 'type': 'string' },
'place': {'type': 'string'},
'humidity': {'type': 'double'},
'timestamp': {'type': 'long'}
}
}
return mapping
def create(index, doc, mapping):
util.put_mapping(es, index, doc, mapping)
util.get_mapping(es, index, doc)
if __name__ == '__main__':
GPIO.output(GREEN_LED, True)
print(datetime.now())
host = ['http://192.168.1.195:9200']
es = Elasticsearch(host)
index = 'ambience'
util.delete_index(es, index)
util.create_index(es, index)
doc = 'temperature'
create(index, doc, get_temperature_mapping_body())
doc = 'humidity'
create(index, doc, get_humidity_mapping_body())
GPIO.output(GREEN_LED, False)
GPIO.cleanup()
| chuajiesheng/raspberrypi | Ambience/create.py | Python | gpl-2.0 | 1,676 |
#!/usr/bin/env python
# barrier.py
import time
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
if rank == 0:
time.sleep(5)
comm.Barrier()
print ("process " + str(rank) + " is here") | linhbngo/cpsc-4770_6770 | codes/mpi4py/barrier.py | Python | gpl-3.0 | 205 |
from tasty.types import conversions
from tasty.types import *
from tasty.types.driver import TestDriver
__params__ = {'la': 32, 'lb': 32, 'dima': 10, 'dimb': 10}
driver = TestDriver()
def protocol(client, server, params):
conversions.Paillier_Garbled_receive(server.hb, client.gb, 233, [1], False)
client.sc = Unsigned(val=client.gb, signed=False, bitlen=233, dim=[1])
| tastyproject/tasty | tasty/tests/functional/protocols/conversions/unsigned-homomorphic-garbled-unsigned/protocol_setup_client.py | Python | gpl-3.0 | 378 |
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from scipy.special import cbrt
from .config import *
_lim_val = np.finfo(np.float64).max
_lim_val_exp = np.log(_lim_val)
_lim_val_square = np.sqrt(_lim_val)
#_lim_val_cube = cbrt(_lim_val)
_lim_val_cube = np.nextafter(_lim_val**(1/3.0), -np.inf)
_lim_val_quad = np.nextafter(_lim_val**(1/4.0), -np.inf)
_lim_val_three_times = np.nextafter(_lim_val/3.0, -np.inf)
def safe_exp(f):
clip_f = np.clip(f, -np.inf, _lim_val_exp)
return np.exp(clip_f)
def safe_square(f):
f = np.clip(f, -np.inf, _lim_val_square)
return f**2
def safe_cube(f):
f = np.clip(f, -np.inf, _lim_val_cube)
return f**3
def safe_quad(f):
f = np.clip(f, -np.inf, _lim_val_quad)
return f**4
def safe_three_times(f):
f = np.clip(f, -np.inf, _lim_val_three_times)
return 3*f
def chain_1(df_dg, dg_dx):
"""
Generic chaining function for first derivative
.. math::
\\frac{d(f . g)}{dx} = \\frac{df}{dg} \\frac{dg}{dx}
"""
if np.all(dg_dx==1.):
return df_dg
return df_dg * dg_dx
def chain_2(d2f_dg2, dg_dx, df_dg, d2g_dx2):
"""
Generic chaining function for second derivative
.. math::
\\frac{d^{2}(f . g)}{dx^{2}} = \\frac{d^{2}f}{dg^{2}}(\\frac{dg}{dx})^{2} + \\frac{df}{dg}\\frac{d^{2}g}{dx^{2}}
"""
if np.all(dg_dx==1.) and np.all(d2g_dx2 == 0):
return d2f_dg2
dg_dx_2 = np.clip(dg_dx, -np.inf, _lim_val_square)**2
#dg_dx_2 = dg_dx**2
return d2f_dg2*(dg_dx_2) + df_dg*d2g_dx2
def chain_3(d3f_dg3, dg_dx, d2f_dg2, d2g_dx2, df_dg, d3g_dx3):
"""
Generic chaining function for third derivative
.. math::
\\frac{d^{3}(f . g)}{dx^{3}} = \\frac{d^{3}f}{dg^{3}}(\\frac{dg}{dx})^{3} + 3\\frac{d^{2}f}{dg^{2}}\\frac{dg}{dx}\\frac{d^{2}g}{dx^{2}} + \\frac{df}{dg}\\frac{d^{3}g}{dx^{3}}
"""
if np.all(dg_dx==1.) and np.all(d2g_dx2==0) and np.all(d3g_dx3==0):
return d3f_dg3
dg_dx_3 = np.clip(dg_dx, -np.inf, _lim_val_cube)**3
return d3f_dg3*(dg_dx_3) + 3*d2f_dg2*dg_dx*d2g_dx2 + df_dg*d3g_dx3
def opt_wrapper(m, **kwargs):
"""
Thit function just wraps the optimization procedure of a GPy
object so that optimize() pickleable (necessary for multiprocessing).
"""
m.optimize(**kwargs)
return m.optimization_runs[-1]
def linear_grid(D, n = 100, min_max = (-100, 100)):
"""
Creates a D-dimensional grid of n linearly spaced points
:param D: dimension of the grid
:param n: number of points
:param min_max: (min, max) list
"""
g = np.linspace(min_max[0], min_max[1], n)
G = np.ones((n, D))
return G*g[:,None]
def kmm_init(X, m = 10):
"""
This is the same initialization algorithm that is used
in Kmeans++. It's quite simple and very useful to initialize
the locations of the inducing points in sparse GPs.
:param X: data
:param m: number of inducing points
"""
# compute the distances
XXT = np.dot(X, X.T)
D = (-2.*XXT + np.diag(XXT)[:,np.newaxis] + np.diag(XXT)[np.newaxis,:])
# select the first point
s = np.random.permutation(X.shape[0])[0]
inducing = [s]
prob = D[s]/D[s].sum()
for z in range(m-1):
s = np.random.multinomial(1, prob.flatten()).argmax()
inducing.append(s)
prob = D[s]/D[s].sum()
inducing = np.array(inducing)
return X[inducing]
### make a parameter to its corresponding array:
def param_to_array(*param):
"""
Convert an arbitrary number of parameters to :class:ndarray class objects.
This is for converting parameter objects to numpy arrays, when using
scipy.weave.inline routine. In scipy.weave.blitz there is no automatic
array detection (even when the array inherits from :class:ndarray)
"""
import warnings
warnings.warn("Please use param.values, as this function will be deprecated in the next release.", DeprecationWarning)
assert len(param) > 0, "At least one parameter needed"
if len(param) == 1:
return param[0].view(np.ndarray)
return [x.view(np.ndarray) for x in param]
def blockify_hessian(func):
def wrapper_func(self, *args, **kwargs):
# Invoke the wrapped function first
retval = func(self, *args, **kwargs)
# Now do something here with retval and/or action
if self.not_block_really and (retval.shape[0] != retval.shape[1]):
return np.diagflat(retval)
else:
return retval
return wrapper_func
def blockify_third(func):
def wrapper_func(self, *args, **kwargs):
# Invoke the wrapped function first
retval = func(self, *args, **kwargs)
# Now do something here with retval and/or action
if self.not_block_really and (len(retval.shape) < 3):
num_data = retval.shape[0]
d3_block_cache = np.zeros((num_data, num_data, num_data))
diag_slice = range(num_data)
d3_block_cache[diag_slice, diag_slice, diag_slice] = np.squeeze(retval)
return d3_block_cache
else:
return retval
return wrapper_func
def blockify_dhess_dtheta(func):
def wrapper_func(self, *args, **kwargs):
# Invoke the wrapped function first
retval = func(self, *args, **kwargs)
# Now do something here with retval and/or action
if self.not_block_really and (len(retval.shape) < 3):
num_data = retval.shape[0]
num_params = retval.shape[-1]
dhess_dtheta = np.zeros((num_data, num_data, num_params))
diag_slice = range(num_data)
for param_ind in range(num_params):
dhess_dtheta[diag_slice, diag_slice, param_ind] = np.squeeze(retval[:,param_ind])
return dhess_dtheta
else:
return retval
return wrapper_func
| beckdaniel/GPy | GPy/util/misc.py | Python | bsd-3-clause | 5,919 |
"""Gammas
This module implements simulation of interactions of gamma photons with
a scintillator. Used in groundparticle simulations.
"""
from __future__ import division
from random import expovariate
import numpy as np
SCINTILLATOR_THICKNESS = 2.0 # cm
MAX_DEPTH = 112. # longest straight path in scintillator in cm
ENERGY_LOSS = 2.0 # 2 MeV per cm
MAX_E = ENERGY_LOSS * SCINTILLATOR_THICKNESS
MIP = 3.38 # MeV
ELECTRON_REST_MASS_MeV = 0.5109989 # MeV
def compton_edge(gamma_energy):
"""Calculate Compton edge for a given photon energy
W.R. Leo (1987) p.54
:param gamma_energy: photon energy [MeV].
:return: compton edge [MeV].
"""
gamma = gamma_energy / ELECTRON_REST_MASS_MeV
return gamma_energy * 2 * gamma / (1 + 2 * gamma)
def compton_energy_transfer(gamma_energy):
"""Calculate the energy transfer from photon to electron
From the differential cross section the cumulative distribution
is calculated. From this distribution a random energy transfer
(within kinematic bounds) is returned.
:param gamma_energy: photon energy [MeV].
:return: transfered energy [MeV].
"""
edge = compton_edge(gamma_energy)
recoil_energies = np.linspace(0, edge, 1000)
# electron energy distribution
electron_energy = [energy_transfer_cross_section(gamma_energy,
recoil_energy)
for recoil_energy in recoil_energies]
cumulative_energy = np.cumsum(electron_energy)
normalised_energy_distribution = (cumulative_energy /
cumulative_energy[-1])
r = np.random.random()
conversion_factor = normalised_energy_distribution.searchsorted(r) / 1000
return compton_edge(gamma_energy) * conversion_factor
def energy_transfer_cross_section(gamma_energy, recoil_energy):
"""Differential cross section dsigma/dT
Differential cross section for energy transfer from gamma
to scattered electron in compton scattering.
W.R. Leo (1987) p 54
:param gamma_energy: photon energy [MeV].
:param recoil_energy: electron recoil energy [MeV].
"""
r_e = 2.82e-15 # classical electron radius [m]
gamma = gamma_energy / ELECTRON_REST_MASS_MeV
s = recoil_energy / gamma_energy
return (np.pi * (r_e ** 2) / (ELECTRON_REST_MASS_MeV * gamma ** 2) *
(2 + (s ** 2 / ((gamma ** 2) * ((1 - s) ** 2))) +
(s / (1 - s)) * (s - 2 / gamma)))
def max_energy_deposit_in_mips(depth, scintillator_depth):
"""Maximum energy transfer from electron to scintillator
Determine maximum energy transfer based on remaining scinitillator
depth.
Assumes scintillator depth is projected onto the direction
of the incident particle (divided by cos(theta)).
:param depth: depth at which the electron is produced [cm].
:param scintillator_depth: total depth of the scintillator [cm].
"""
return (scintillator_depth - depth) * MAX_E / (scintillator_depth * MIP)
def simulate_detector_mips_gammas(p, theta):
"""Simulate detection of gammas
:param p: the momenta of the gammas as array, in eV.
:param theta: angles of incidence of the gammas as array, in radians.
:return: the simulated detector signal (in mips).
"""
# p [eV] and E [MeV]
energies = p / 1e6
mips = 0
for energy, angle in zip(energies, theta):
# project depth onto direction of incident particle
scintillator_depth = min(SCINTILLATOR_THICKNESS / np.cos(angle),
MAX_DEPTH)
# Calculate interaction point in units of scinitlator depth.
# If depth > 1 there is no interaction.
depth_compton = expovariate(1 / compton_mean_free_path(energy))
depth_pair = expovariate(1 / pair_mean_free_path(energy))
if ((depth_pair > scintillator_depth) &
(depth_compton > scintillator_depth)):
# no interaction
continue
# Interactions in scintillator
elif depth_compton < depth_pair:
# Compton scattering
# kinetic energy transfered to electron by compton scattering
energy_deposit = compton_energy_transfer(energy) / MIP
max_deposit = max_energy_deposit_in_mips(depth_compton,
scintillator_depth)
mips += min(max_deposit, energy_deposit)
elif energy > 1.022:
# Pair production: Two "electrons"
# 1.022 MeV used for creation of two particles
# all the rest is electron kinetic energy
energy_deposit = (energy - 1.022) / MIP
max_deposit = max_energy_deposit_in_mips(depth_pair,
scintillator_depth)
mips += min(max_deposit, energy_deposit)
return mips
def pair_mean_free_path(gamma_energy):
"""Mean free path pair production
NIST XCOM database: https://www.nist.gov/pml/data/xcom/
compound: C9H10
pair production (total attenuation)
table generated by @tomkooij/lio-project/photons/nist.py
:param gamma_energy: photon energy [MeV].
:return: mean free path [cm].
"""
energy_path_pair_production = np.array([
(4, 689.31), (5, 504.52), (6, 404.96),
(7, 343.56), (8, 302.00), (9, 271.84),
(10, 249.03), (11, 231.28), (12, 217.04),
(13, 205.23), (14, 195.32), (15, 186.88),
(16, 179.47), (18, 167.40), (20, 157.85),
(22, 149.97), (24, 143.51), (26, 138.00),
(28, 133.30), (30, 129.20), (40, 114.65),
(50, 105.64), (60, 99.37), (80, 91.17),
(100, 85.90), (150, 78.25), (200, 74.07),
(300, 69.44), (400, 66.93), (500, 65.34),
(600, 64.21), (800, 62.73), (1000, 61.82),
(1500, 60.47), (2000, 59.72), (3000, 58.97),
(4000, 58.53), (5000, 58.28), (6000, 58.09),
(8000, 57.85), (10000, 57.70), (15000, 57.51),
(20000, 57.41), (30000, 57.27), (40000, 57.21),
(50000, 57.17), (60000, 57.13), (80000, 57.12),
(100000, 57.08)])
gamma_energies = energy_path_pair_production[:, 0]
mean_free_paths = energy_path_pair_production[:, 1]
idx = gamma_energies.searchsorted(gamma_energy, side='left')
return mean_free_paths[idx]
def compton_mean_free_path(gamma_energy):
"""Mean free path compton scattering
NIST XCOM database: https://www.nist.gov/pml/data/xcom/
compound: C9H10
compton scattering (incoherent scattering)
table generated by @tomkooij/lio-project/photons/nist.py
:param gamma_energy: photon energy [MeV].
:return: mean free path [cm].
"""
energy_path_compton_scattering = np.array([
(4, 31.88), (5, 36.90), (6, 41.75),
(7, 46.47), (8, 51.05), (9, 55.52),
(10, 59.95), (11, 64.27), (12, 68.54),
(13, 72.73), (14, 76.86), (15, 80.97),
(16, 85.03), (18, 93.02), (20, 100.92),
(22, 108.60), (24, 116.23), (26, 123.81),
(28, 131.23), (30, 138.64), (40, 174.40),
(50, 208.94), (60, 242.54), (80, 307.50),
(100, 370.51), (150, 520.29), (200, 663.57),
(300, 936.33), (400, 1195.46), (500, 1444.04),
(600, 1686.34), (800, 2159.36), (1000, 2624.67),
(1500, 3757.99), (2000, 4856.73), (3000, 6983.24),
(4000, 9049.77), (5000, 11063.17), (6000, 13048.02),
(8000, 16940.54), (10000, 20746.89), (15000, 30021.01),
(20000, 39047.25), (30000, 56625.14), (40000, 73746.31),
(50000, 90579.71), (60000, 107146.68), (80000, 139684.31),
(100000, 171791.79)])
gamma_energies = energy_path_compton_scattering[:, 0]
mean_free_paths = energy_path_compton_scattering[:, 1]
idx = gamma_energies.searchsorted(gamma_energy, side='left')
return mean_free_paths[idx]
| HiSPARC/sapphire | sapphire/simulations/gammas.py | Python | gpl-3.0 | 7,878 |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test the BarrierBeforeFinalMeasurements pass"""
import unittest
from qiskit.transpiler.passes import BarrierBeforeFinalMeasurements
from qiskit.converters import circuit_to_dag
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister
from qiskit.test import QiskitTestCase
class TestBarrierBeforeFinalMeasurements(QiskitTestCase):
"""Tests the BarrierBeforeFinalMeasurements pass."""
def test_single_measure(self):
""" A single measurement at the end
|
q:--[m]-- q:--|-[m]---
| -> | |
c:---.--- c:-----.---
"""
qr = QuantumRegister(1, 'q')
cr = ClassicalRegister(1, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.measure(qr, cr)
expected = QuantumCircuit(qr, cr)
expected.barrier(qr)
expected.measure(qr, cr)
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_ignore_single_measure(self):
"""Ignore single measurement because it is not at the end
q:--[m]-[H]- q:--[m]-[H]-
| -> |
c:---.------ c:---.------
"""
qr = QuantumRegister(1, 'q')
cr = ClassicalRegister(1, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.measure(qr, cr)
circuit.h(qr[0])
expected = QuantumCircuit(qr, cr)
expected.measure(qr, cr)
expected.h(qr[0])
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_single_measure_mix(self):
"""Two measurements, but only one is at the end
|
q0:--[m]--[H]--[m]-- q0:--[m]--[H]--|-[m]---
| | -> | | |
c:---.---------.--- c:---.-----------.---
"""
qr = QuantumRegister(1, 'q')
cr = ClassicalRegister(1, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.measure(qr, cr)
circuit.h(qr)
circuit.measure(qr, cr)
expected = QuantumCircuit(qr, cr)
expected.measure(qr, cr)
expected.h(qr)
expected.barrier(qr)
expected.measure(qr, cr)
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_two_qregs(self):
"""Two measurements in different qregs to different cregs
|
q0:--[H]--[m]------ q0:--[H]--|--[m]------
| | |
q1:--------|--[m]-- -> q1:-------|---|--[m]--
| | | | |
c0:--------.---|--- c0:----------.---|---
| |
c1:------------.--- c0:--------------.---
"""
qr0 = QuantumRegister(1, 'q0')
qr1 = QuantumRegister(1, 'q1')
cr0 = ClassicalRegister(1, 'c0')
cr1 = ClassicalRegister(1, 'c1')
circuit = QuantumCircuit(qr0, qr1, cr0, cr1)
circuit.h(qr0)
circuit.measure(qr0, cr0)
circuit.measure(qr1, cr1)
expected = QuantumCircuit(qr0, qr1, cr0, cr1)
expected.h(qr0)
expected.barrier(qr0, qr1)
expected.measure(qr0, cr0)
expected.measure(qr1, cr1)
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_two_qregs_to_a_single_creg(self):
"""Two measurements in different qregs to the same creg
|
q0:--[H]--[m]------ q0:--[H]--|--[m]------
| | |
q1:--------|--[m]-- -> q1:-------|---|--[m]--
| | | | |
c0:--------.---|--- c0:-----------.---|---
------------.--- ---------------.---
"""
qr0 = QuantumRegister(1, 'q0')
qr1 = QuantumRegister(1, 'q1')
cr0 = ClassicalRegister(2, 'c0')
circuit = QuantumCircuit(qr0, qr1, cr0)
circuit.h(qr0)
circuit.measure(qr0, cr0[0])
circuit.measure(qr1, cr0[1])
expected = QuantumCircuit(qr0, qr1, cr0)
expected.h(qr0)
expected.barrier(qr0, qr1)
expected.measure(qr0, cr0[0])
expected.measure(qr1, cr0[1])
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_preserve_measure_for_conditional(self):
"""Test barrier is inserted after any measurements used for conditionals
q0:--[H]--[m]------------ q0:--[H]--[m]---------------
| |
q1:--------|--[ z]--[m]-- -> q1:--------|--[ z]--|--[m]--
| | | | | |
c0:--------.--[=1]---|--- c0:--------.--[=1]------|---
| |
c1:------------------.--- c1:---------------------.---
"""
qr0 = QuantumRegister(1, 'q0')
qr1 = QuantumRegister(1, 'q1')
cr0 = ClassicalRegister(1, 'c0')
cr1 = ClassicalRegister(1, 'c1')
circuit = QuantumCircuit(qr0, qr1, cr0, cr1)
circuit.h(qr0)
circuit.measure(qr0, cr0)
circuit.z(qr1).c_if(cr0, 1)
circuit.measure(qr1, cr1)
expected = QuantumCircuit(qr0, qr1, cr0, cr1)
expected.h(qr0)
expected.measure(qr0, cr0)
expected.z(qr1).c_if(cr0, 1)
expected.barrier(qr1)
expected.measure(qr1, cr1)
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
class TestBarrierBeforeMeasuremetsWhenABarrierIsAlreadyThere(QiskitTestCase):
"""Tests the BarrierBeforeFinalMeasurements pass when there is a barrier already"""
def test_handle_redundancy(self):
"""The pass is idempotent
| |
q:--|-[m]-- q:--|-[m]---
| | -> | |
c:-----.--- c:-----.---
"""
qr = QuantumRegister(1, 'q')
cr = ClassicalRegister(1, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.barrier(qr)
circuit.measure(qr, cr)
expected = QuantumCircuit(qr, cr)
expected.barrier(qr)
expected.measure(qr, cr)
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_remove_barrier_in_different_qregs(self):
"""Two measurements in different qregs to the same creg
q0:--|--[m]------ q0:---|--[m]------
| | |
q1:--|---|--[m]-- -> q1:---|---|--[m]--
| | | |
c0:------.---|--- c0:-------.---|---
----------.--- -----------.---
"""
qr0 = QuantumRegister(1, 'q0')
qr1 = QuantumRegister(1, 'q1')
cr0 = ClassicalRegister(2, 'c0')
circuit = QuantumCircuit(qr0, qr1, cr0)
circuit.barrier(qr0)
circuit.barrier(qr1)
circuit.measure(qr0, cr0[0])
circuit.measure(qr1, cr0[1])
expected = QuantumCircuit(qr0, qr1, cr0)
expected.barrier(qr0, qr1)
expected.measure(qr0, cr0[0])
expected.measure(qr1, cr0[1])
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_preserve_barriers_for_measurement_ordering(self):
"""If the circuit has a barrier to enforce a measurement order,
preserve it in the output.
q:---[m]--|------- q:---|--[m]--|-------
----|---|--[m]-- -> ---|---|---|--[m]--
| | | |
c:----.-------|--- c:-------.-------|---
------------.--- ---------------.---
"""
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(2, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.measure(qr[0], cr[0])
circuit.barrier(qr)
circuit.measure(qr[1], cr[1])
expected = QuantumCircuit(qr, cr)
expected.barrier(qr)
expected.measure(qr[0], cr[0])
expected.barrier(qr)
expected.measure(qr[1], cr[1])
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_measures_followed_by_barriers_should_be_final(self):
"""If a measurement is followed only by a barrier,
insert the barrier before it.
q:---[H]--|--[m]--|------- q:---[H]--|--[m]-|-------
---[H]--|---|---|--[m]-- -> ---[H]--|---|--|--[m]--
| | | |
c:------------.-------|--- c:------------.------|---
--------------------.--- -------------------.---
"""
qr = QuantumRegister(2, 'q')
cr = ClassicalRegister(2, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.h(qr)
circuit.barrier(qr)
circuit.measure(qr[0], cr[0])
circuit.barrier(qr)
circuit.measure(qr[1], cr[1])
expected = QuantumCircuit(qr, cr)
expected.h(qr)
expected.barrier(qr)
expected.measure(qr[0], cr[0])
expected.barrier(qr)
expected.measure(qr[1], cr[1])
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_should_merge_with_smaller_duplicate_barrier(self):
"""If an equivalent barrier exists covering a subset of the qubits
covered by the new barrier, it should be replaced.
q:---|--[m]------------- q:---|--[m]-------------
---|---|---[m]-------- -> ---|---|---[m]--------
-------|----|---[m]--- ---|---|----|---[m]---
| | | | | |
c:-------.----|----|---- c:-------.----|----|----
------------.----|---- ------------.----|----
-----------------.---- -----------------.----
"""
qr = QuantumRegister(3, 'q')
cr = ClassicalRegister(3, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.barrier(qr[0], qr[1])
circuit.measure(qr, cr)
expected = QuantumCircuit(qr, cr)
expected.barrier(qr)
expected.measure(qr, cr)
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_should_merge_with_larger_duplicate_barrier(self):
"""If a barrier exists and is stronger than the barrier to be inserted,
preserve the existing barrier and do not insert a new barrier.
q:---|--[m]--|------- q:---|--[m]-|-------
---|---|---|--[m]-- -> ---|---|--|--[m]--
---|---|---|---|--- ---|---|--|---|---
| | | |
c:-------.-------|--- c:-------.------|---
---------------.--- --------------.---
------------------- ------------------
"""
qr = QuantumRegister(3, 'q')
cr = ClassicalRegister(3, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.barrier(qr)
circuit.measure(qr[0], cr[0])
circuit.barrier(qr)
circuit.measure(qr[1], cr[1])
expected = circuit
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(circuit))
self.assertEqual(result, circuit_to_dag(expected))
def test_barrier_doesnt_reorder_gates(self):
""" A barrier should not allow the reordering of gates, as pointed out in #2102
q:--[u1(0)]-----------[m]--------- q:--[u1(0)]------------|--[m]---------
--[u1(1)]------------|-[m]------ -> --[u1(1)]------------|---|-[m]------
--[u1(2)]-|----------|--|-[m]---- --[u1(2)]-|----------|---|--|-[m]----
----------|-[u1(03)]-|--|--|-[m]- ----------|-[u1(03)]-|---|--|--|-[m]-
| | | | | | | |
c:---------------------.--|--|--|- c:--------------------------.--|--|--|-
------------------------.--|--|- -----------------------------.--|--|-
---------------------------.--|- --------------------------------.--|-
------------------------------.- -----------------------------------.-
"""
qr = QuantumRegister(4)
cr = ClassicalRegister(4)
circuit = QuantumCircuit(qr, cr)
circuit.u1(0, qr[0])
circuit.u1(1, qr[1])
circuit.u1(2, qr[2])
circuit.barrier(qr[2], qr[3])
circuit.u1(3, qr[3])
test_circuit = circuit.copy()
test_circuit.measure(qr, cr)
# expected circuit is the same, just with a barrier before the measurements
expected = circuit.copy()
expected.barrier(qr)
expected.measure(qr, cr)
pass_ = BarrierBeforeFinalMeasurements()
result = pass_.run(circuit_to_dag(test_circuit))
self.assertEqual(result, circuit_to_dag(expected))
if __name__ == '__main__':
unittest.main()
| QISKit/qiskit-sdk-py | test/python/transpiler/test_barrier_before_final_measurements.py | Python | apache-2.0 | 14,581 |
# coding=utf-8
import cherrypy
from cherrypy import Tool
class SPARedirectTool(Tool):
"""
Single Page Application redirect tool.
This makes sure that single page application routing works (404 redirect to index.html).
This replaces the usage of custom error page, which also works, but gives 404 error responses instead.
"""
def __init__(self, index_url):
Tool.__init__(self, 'before_handler', self._redirect)
self._index_url = index_url
def _redirect(self):
if cherrypy.request.handler and cherrypy.request.handler.status == 404:
raise cherrypy.InternalRedirect(self._index_url)
| h3llrais3r/Auto-Subliminal | autosubliminal/server/tool.py | Python | gpl-3.0 | 650 |
"""Tests for display of certificates on the student dashboard. """
import unittest
import datetime
import ddt
import mock
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from mock import patch
from pytz import UTC
from certificates.api import get_certificate_url # pylint: disable=import-error
from certificates.models import CertificateStatuses # pylint: disable=import-error
from certificates.tests.factories import GeneratedCertificateFactory # pylint: disable=import-error
from course_modes.models import CourseMode
from student.models import LinkedInAddToProfileConfiguration
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
# pylint: disable=no-member
PAST_DATE = datetime.datetime.now(UTC) - datetime.timedelta(days=2)
FUTURE_DATE = datetime.datetime.now(UTC) + datetime.timedelta(days=2)
class CertificateDisplayTestBase(SharedModuleStoreTestCase):
"""Tests display of certificates on the student dashboard. """
USERNAME = "test_user"
PASSWORD = "password"
DOWNLOAD_URL = "http://www.example.com/certificate.pdf"
@classmethod
def setUpClass(cls):
super(CertificateDisplayTestBase, cls).setUpClass()
cls.course = CourseFactory()
cls.course.certificates_display_behavior = "early_with_info"
with cls.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, cls.course.id):
cls.store.update_item(cls.course, cls.USERNAME)
def setUp(self):
super(CertificateDisplayTestBase, self).setUp()
self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD)
result = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(result, msg="Could not log in")
def _check_linkedin_visibility(self, is_visible):
"""
Performs assertions on the Dashboard
"""
response = self.client.get(reverse('dashboard'))
if is_visible:
self.assertContains(response, u'Add Certificate to LinkedIn Profile')
else:
self.assertNotContains(response, u'Add Certificate to LinkedIn Profile')
def _create_certificate(self, enrollment_mode):
"""Simulate that the user has a generated certificate. """
CourseEnrollmentFactory.create(
user=self.user,
course_id=self.course.id,
mode=enrollment_mode)
return GeneratedCertificateFactory(
user=self.user,
course_id=self.course.id,
mode=enrollment_mode,
download_url=self.DOWNLOAD_URL,
status=CertificateStatuses.downloadable,
grade=0.98,
)
def _check_can_download_certificate(self):
"""
Inspect the dashboard to see if a certificate can be downloaded.
"""
response = self.client.get(reverse('dashboard'))
self.assertContains(response, u'Download Your ID Verified')
self.assertContains(response, self.DOWNLOAD_URL)
def _check_can_download_certificate_no_id(self):
"""
Inspects the dashboard to see if a certificate for a non verified course enrollment
is present
"""
response = self.client.get(reverse('dashboard'))
self.assertContains(response, u'Download')
self.assertContains(response, u'(PDF)')
self.assertContains(response, self.DOWNLOAD_URL)
def _check_can_not_download_certificate(self):
"""
Make sure response does not have any of the download certificate buttons
"""
response = self.client.get(reverse('dashboard'))
self.assertNotContains(response, u'View Test_Certificate')
self.assertNotContains(response, u'Download Your Test_Certificate (PDF)')
self.assertNotContains(response, u'Download Test_Certificate (PDF)')
self.assertNotContains(response, self.DOWNLOAD_URL)
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class CertificateDashboardMessageDisplayTest(CertificateDisplayTestBase):
"""
Tests the certificates messages for a course in the dashboard.
"""
ENABLED_SIGNALS = ['course_published']
@classmethod
def setUpClass(cls):
super(CertificateDashboardMessageDisplayTest, cls).setUpClass()
cls.course.certificates_display_behavior = "end"
cls.course.save()
cls.store.update_item(cls.course, cls.USERNAME)
def _check_message(self, certificate_available_date):
response = self.client.get(reverse('dashboard'))
if certificate_available_date is None:
self.assertNotContains(response, u"Your certificate will be available on")
self.assertNotContains(response, u"View Test_Certificate")
elif datetime.datetime.now(UTC) < certificate_available_date:
self.assertContains(response, u"Your certificate will be available on")
self.assertNotContains(response, u"View Test_Certificate")
else:
self._check_can_download_certificate()
@ddt.data(True, False, None)
def test_certificate_available_date(self, past_certificate_available_date):
cert = self._create_certificate('verified')
cert.status = CertificateStatuses.downloadable
cert.save()
if past_certificate_available_date is None:
certificate_available_date = None
elif past_certificate_available_date:
certificate_available_date = PAST_DATE
elif not past_certificate_available_date:
certificate_available_date = FUTURE_DATE
self.course.certificate_available_date = certificate_available_date
self.course.save()
self.store.update_item(self.course, self.USERNAME)
self._check_message(certificate_available_date)
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class CertificateDisplayTest(CertificateDisplayTestBase):
"""
Tests of certificate display.
"""
@ddt.data('verified', 'professional')
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': False})
def test_display_verified_certificate(self, enrollment_mode):
self._create_certificate(enrollment_mode)
self._check_can_download_certificate()
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': False})
def test_no_certificate_status_no_problem(self):
with patch('student.views.cert_info', return_value={}):
self._create_certificate('honor')
self._check_can_not_download_certificate()
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': False})
def test_display_verified_certificate_no_id(self):
"""
Confirm that if we get a certificate with a no-id-professional mode
we still can download our certificate
"""
self._create_certificate(CourseMode.NO_ID_PROFESSIONAL_MODE)
self._check_can_download_certificate_no_id()
@ddt.data('verified', 'honor', 'professional')
def test_unverified_certificate_message(self, enrollment_mode):
cert = self._create_certificate(enrollment_mode)
cert.status = CertificateStatuses.unverified
cert.save()
response = self.client.get(reverse('dashboard'))
self.assertContains(
response,
u'do not have a current verified identity with {platform_name}'
.format(platform_name=settings.PLATFORM_NAME))
def test_post_to_linkedin_invisibility(self):
"""
Verifies that the post certificate to linked button
does not appear by default (when config is not set)
"""
self._create_certificate('honor')
# until we set up the configuration, the LinkedIn action
# button should not be visible
self._check_linkedin_visibility(False)
def test_post_to_linkedin_visibility(self):
"""
Verifies that the post certificate to linked button appears
as expected
"""
self._create_certificate('honor')
config = LinkedInAddToProfileConfiguration(
company_identifier='0_mC_o2MizqdtZEmkVXjH4eYwMj4DnkCWrZP_D9',
enabled=True
)
config.save()
# now we should see it
self._check_linkedin_visibility(True)
@mock.patch("openedx.core.djangoapps.theming.helpers.is_request_in_themed_site", mock.Mock(return_value=True))
def test_post_to_linkedin_site_specific(self):
"""
Verifies behavior for themed sites which disables the post to LinkedIn
feature (for now)
"""
self._create_certificate('honor')
config = LinkedInAddToProfileConfiguration(
company_identifier='0_mC_o2MizqdtZEmkVXjH4eYwMj4DnkCWrZP_D9',
enabled=True
)
config.save()
# now we should not see it because we are in a themed site
self._check_linkedin_visibility(False)
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class CertificateDisplayTestHtmlView(CertificateDisplayTestBase):
"""
Tests of webview certificate display
"""
@classmethod
def setUpClass(cls):
super(CertificateDisplayTestHtmlView, cls).setUpClass()
cls.course.cert_html_view_enabled = True
cls.course.save()
cls.store.update_item(cls.course, cls.USERNAME)
@ddt.data('verified', 'honor')
@override_settings(CERT_NAME_SHORT='Test_Certificate')
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True})
def test_display_download_certificate_button(self, enrollment_mode):
"""
Tests if CERTIFICATES_HTML_VIEW is True
and course has enabled web certificates via cert_html_view_enabled setting
and no active certificate configuration available
then any of the Download certificate button should not be visible.
"""
self._create_certificate(enrollment_mode)
self._check_can_not_download_certificate()
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class CertificateDisplayTestLinkedHtmlView(CertificateDisplayTestBase):
"""
Tests of linked student certificates.
"""
@classmethod
def setUpClass(cls):
super(CertificateDisplayTestLinkedHtmlView, cls).setUpClass()
cls.course.cert_html_view_enabled = True
certificates = [
{
'id': 0,
'name': 'Test Name',
'description': 'Test Description',
'is_active': True,
'signatories': [],
'version': 1
}
]
cls.course.certificates = {'certificates': certificates}
cls.course.save()
cls.store.update_item(cls.course, cls.USERNAME)
@ddt.data('verified')
@override_settings(CERT_NAME_SHORT='Test_Certificate')
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True})
def test_linked_student_to_web_view_credential(self, enrollment_mode):
cert = self._create_certificate(enrollment_mode)
test_url = get_certificate_url(course_id=self.course.id, uuid=cert.verify_uuid)
response = self.client.get(reverse('dashboard'))
self.assertContains(response, u'View Test_Certificate')
self.assertContains(response, test_url)
| lduarte1991/edx-platform | common/djangoapps/student/tests/test_certificates.py | Python | agpl-3.0 | 11,733 |
import sqlite3 as lite
import db
class GlobalSettings(object):
lite_schema = "CREATE TABLE IF NOT EXISTS GlobalSettings (key STRING UNIQUE, value STRING)"
indexes = []
def __init__(self, gh_username):
self.key = key
self.value = value
@classmethod
def set(self, key, value):
con = db._get_connection()
c = con.cursor()
c.execute('INSERT OR REPLACE INTO GlobalSettings (key, value) values (?, ?)', (key, value))
# c.execute('UPDATE GlobalSetting SET value=%s WHERE key=%s' % (value, key))
con.commit()
setattr(db.GLOBALSETTINGS, key, value)
sqlite_table_models = [GlobalSettings] | mikewesner-wf/glasshouse | glasshouse.indigoPlugin/Contents/Server Plugin/model.py | Python | apache-2.0 | 670 |
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, division
from builtins import str
import sys
import unittest
import re
import os
dirname = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(dirname, '..', '..', '..'))
import uuid
from SpiffWorkflow.serializer.dict import DictionarySerializer
from .baseTest import SerializerTest
from SpiffWorkflow import Workflow
class DictionarySerializerTest(SerializerTest):
def setUp(self):
super(DictionarySerializerTest, self).setUp()
self.serializer = DictionarySerializer()
self.return_type = dict
def _compare_results(self, item1, item2,
exclude_dynamic=False,
exclude_items=None):
exclude_items = exclude_items if exclude_items is not None else []
if exclude_dynamic:
if 'last_state_change' not in exclude_items:
exclude_items.append('last_state_change')
if 'last_task' not in exclude_items:
exclude_items.append('last_task')
if uuid.UUID not in exclude_items:
exclude_items.append(uuid.UUID)
if type(item1) in exclude_items:
return
if isinstance(item1, dict):
self.assertIsInstance(item2, dict)
for key, value in list(item1.items()):
self.assertIn(key, item2)
if key in exclude_items:
continue
self._compare_results(value, item2[key],
exclude_dynamic=exclude_dynamic,
exclude_items=exclude_items)
for key in item2:
self.assertIn(key, item1)
elif isinstance(item1, list):
msg = "item is not a list (is a " + str(type(item2)) + ")"
self.assertIsInstance(item2, list, msg)
msg = "list lengths differ: {} vs {}".format(
len(item1), len(item2))
self.assertEqual(len(item1), len(item2), msg)
for i, listitem in enumerate(item1):
self._compare_results(listitem, item2[i],
exclude_dynamic=exclude_dynamic,
exclude_items=exclude_items)
elif isinstance(item1, Workflow):
raise Exception("Item is a Workflow")
else:
msg = "{}: types differ: {} vs {}".format(
str(item2), type(item1), type(item2))
self.assertEqual(type(item1), type(item2), msg)
self.assertEqual(item1, item2)
def suite():
return unittest.defaultTestLoader.loadTestsFromTestCase(DictionarySerializerTest)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| knipknap/SpiffWorkflow | tests/SpiffWorkflow/serializer/dictTest.py | Python | lgpl-3.0 | 2,803 |
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns(
'show.views',
url(r'^radioshow/entrylist/$', 'radioshow_entryitem_list', name='radioshow_entryitem_list'),
url(r'^showcontributor/list/(?P<slug>[\w-]+)/$', 'showcontributor_content_list', name='showcontributor_content_list'),
url(r'^showcontributor/appearance/(?P<slug>[\w-]+)/$', 'showcontributor_appearance_list', name='showcontributor_appearance_list'),
url(r'^showcontributor/(?P<slug>[\w-]+)/$', 'showcontributor_detail', name='showcontributor_detail'),
url(r'^showcontributor/content/(?P<slug>[\w-]+)/$', 'showcontributor_content_detail', name='showcontributor_content_detail'),
url(r'^showcontributor/contact/(?P<slug>[\w-]+)/$', 'showcontributor_contact', name='showcontributor_contact'),
)
| praekelt/panya-show | show/urls.py | Python | bsd-3-clause | 804 |
# -*- coding: utf-8 -*-
"""
tests.conftest
~~~~~~~~~~~~~~
configuriation for pytests
"""
import os
import pytest
# from alembic.command import upgrade
# from alembic.config import Config
from uru_crm.factory import create_app
from uru_crm.config import TestConfig
from uru_crm.extensions import db as _db
from uru_crm.utils import INSTANCE_FOLDER_PATH
@pytest.fixture(scope='session')
def app(request):
"""Session-wide test `Flask` application."""
app = create_app()
app.config.from_object(TestConfig)
# Establish an application context before running the tests.
ctx = app.app_context()
ctx.push()
def teardown():
ctx.pop()
request.addfinalizer(teardown)
return app
def apply_migrations(app):
"""Applies all alembic migrations."""
# ALEMBIC_CONFIG = os.path.join(app.config['PROJECT_ROOT'], 'migrations/alembic.ini')
# config = Config(ALEMBIC_CONFIG)
# upgrade(config, 'head')
pass
@pytest.fixture(scope='session')
def db(app, request):
"""Session-wide test database."""
if os.path.exists(os.path.join(INSTANCE_FOLDER_PATH, 'test.sqlite')):
os.unlink(os.path.join(INSTANCE_FOLDER_PATH, 'test.sqlite'))
def teardown():
_db.drop_all()
os.unlink(os.path.join(INSTANCE_FOLDER_PATH, 'test.sqlite'))
_db.app = app
apply_migrations(app)
request.addfinalizer(teardown)
return _db
@pytest.fixture(scope='function')
def session(db, request):
"""Creates a new database session for a test."""
connection = db.engine.connect()
transaction = connection.begin()
session = db.create_scoped_session()
db.create_all()
db.session = session
def teardown():
transaction.rollback()
connection.close()
session.remove()
request.addfinalizer(teardown)
return session
| gitbenji/uru-crm | tests/conftest.py | Python | mit | 1,851 |
# pylint: disable=C0111
# pylint: disable=W0621
#EVERY PROBLEM TYPE MUST HAVE THE FOLLOWING:
# -Section in Dictionary containing:
# -factory
# -kwargs
# -(optional metadata)
# -Correct, Incorrect and Unanswered CSS selectors
# -A way to answer the problem correctly and incorrectly
# -A way to check the problem was answered correctly, incorrectly and blank
from lettuce import world
import random
import textwrap
from common import section_location
from capa.tests.response_xml_factory import (
ChoiceResponseXMLFactory,
ChoiceTextResponseXMLFactory,
CodeResponseXMLFactory,
CustomResponseXMLFactory,
FormulaResponseXMLFactory,
ImageResponseXMLFactory,
MultipleChoiceResponseXMLFactory,
NumericalResponseXMLFactory,
OptionResponseXMLFactory,
StringResponseXMLFactory,
)
# Factories from capa.tests.response_xml_factory that we will use
# to generate the problem XML, with the keyword args used to configure
# the output.
# 'correct', 'incorrect', and 'unanswered' keys are lists of CSS selectors
# the presence of any in the list is sufficient
PROBLEM_DICT = {
'drop down': {
'factory': OptionResponseXMLFactory(),
'kwargs': {
'question_text': 'The correct answer is Option 2',
'options': ['Option 1', 'Option 2', 'Option 3', 'Option 4'],
'correct_option': 'Option 2'},
'correct': ['span.correct'],
'incorrect': ['span.incorrect'],
'unanswered': ['span.unanswered']},
'multiple choice': {
'factory': MultipleChoiceResponseXMLFactory(),
'kwargs': {
'question_text': 'The correct answer is Choice 3',
'choices': [False, False, True, False],
'choice_names': ['choice_0', 'choice_1', 'choice_2', 'choice_3']},
'correct': ['label.choicegroup_correct', 'span.correct'],
'incorrect': ['label.choicegroup_incorrect', 'span.incorrect'],
'unanswered': ['span.unanswered']},
'checkbox': {
'factory': ChoiceResponseXMLFactory(),
'kwargs': {
'question_text': 'The correct answer is Choices 1 and 3',
'choice_type': 'checkbox',
'choices': [True, False, True, False, False],
'choice_names': ['Choice 1', 'Choice 2', 'Choice 3', 'Choice 4']},
'correct': ['span.correct'],
'incorrect': ['span.incorrect'],
'unanswered': ['span.unanswered']},
'radio': {
'factory': ChoiceResponseXMLFactory(),
'kwargs': {
'question_text': 'The correct answer is Choice 3',
'choice_type': 'radio',
'choices': [False, False, True, False],
'choice_names': ['Choice 1', 'Choice 2', 'Choice 3', 'Choice 4']},
'correct': ['label.choicegroup_correct', 'span.correct'],
'incorrect': ['label.choicegroup_incorrect', 'span.incorrect'],
'unanswered': ['span.unanswered']},
'string': {
'factory': StringResponseXMLFactory(),
'kwargs': {
'question_text': 'The answer is "correct string"',
'case_sensitive': False,
'answer': 'correct string'},
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered']},
'numerical': {
'factory': NumericalResponseXMLFactory(),
'kwargs': {
'question_text': 'The answer is pi + 1',
'answer': '4.14159',
'tolerance': '0.00001',
'math_display': True},
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered']},
'formula': {
'factory': FormulaResponseXMLFactory(),
'kwargs': {
'question_text': 'The solution is [mathjax]x^2+2x+y[/mathjax]',
'sample_dict': {'x': (-100, 100), 'y': (-100, 100)},
'num_samples': 10,
'tolerance': 0.00001,
'math_display': True,
'answer': 'x^2+2*x+y'},
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered']},
'script': {
'factory': CustomResponseXMLFactory(),
'kwargs': {
'question_text': 'Enter two integers that sum to 10.',
'cfn': 'test_add_to_ten',
'expect': '10',
'num_inputs': 2,
'script': textwrap.dedent("""
def test_add_to_ten(expect,ans):
try:
a1=int(ans[0])
a2=int(ans[1])
except ValueError:
a1=0
a2=0
return (a1+a2)==int(expect)
""")},
'correct': ['div.correct'],
'incorrect': ['div.incorrect'],
'unanswered': ['div.unanswered']},
'code': {
'factory': CodeResponseXMLFactory(),
'kwargs': {
'question_text': 'Submit code to an external grader',
'initial_display': 'print "Hello world!"',
'grader_payload': '{"grader": "ps1/Spring2013/test_grader.py"}', },
'correct': ['span.correct'],
'incorrect': ['span.incorrect'],
'unanswered': ['span.unanswered']},
'radio_text': {
'factory': ChoiceTextResponseXMLFactory(),
'kwargs': {
'question_text': 'The correct answer is Choice 0 and input 8',
'type': 'radiotextgroup',
'choices': [("true", {"answer": "8", "tolerance": "1"}),
("false", {"answer": "8", "tolerance": "1"})
]
},
'correct': ['section.choicetextgroup_correct'],
'incorrect': ['section.choicetextgroup_incorrect', 'span.incorrect'],
'unanswered': ['span.unanswered']},
'checkbox_text': {
'factory': ChoiceTextResponseXMLFactory(),
'kwargs': {
'question_text': 'The correct answer is Choice 0 and input 8',
'type': 'checkboxtextgroup',
'choices': [("true", {"answer": "8", "tolerance": "1"}),
("false", {"answer": "8", "tolerance": "1"})
]
},
'correct': ['span.correct'],
'incorrect': ['span.incorrect'],
'unanswered': ['span.unanswered']},
'image': {
'factory': ImageResponseXMLFactory(),
'kwargs': {
'src': '/static/images/mit_dome.jpg',
'rectangle': '(50,50)-(100,100)'
},
'correct': ['span.correct'],
'incorrect': ['span.incorrect'],
'unanswered': ['span.unanswered']}
}
def answer_problem(course, problem_type, correctness):
# Make sure that the problem has been completely rendered before
# starting to input an answer.
world.wait_for_ajax_complete()
section_loc = section_location(course)
if problem_type == "drop down":
select_name = "input_i4x-{0.org}-{0.course}-problem-drop_down_2_1".format(section_loc)
option_text = 'Option 2' if correctness == 'correct' else 'Option 3'
world.select_option(select_name, option_text)
elif problem_type == "multiple choice":
if correctness == 'correct':
world.css_check(inputfield(course, 'multiple choice', choice='choice_2'))
else:
world.css_check(inputfield(course, 'multiple choice', choice='choice_1'))
elif problem_type == "checkbox":
if correctness == 'correct':
world.css_check(inputfield(course, 'checkbox', choice='choice_0'))
world.css_check(inputfield(course, 'checkbox', choice='choice_2'))
else:
world.css_check(inputfield(course, 'checkbox', choice='choice_3'))
elif problem_type == 'radio':
if correctness == 'correct':
world.css_check(inputfield(course, 'radio', choice='choice_2'))
else:
world.css_check(inputfield(course, 'radio', choice='choice_1'))
elif problem_type == 'string':
textvalue = 'correct string' if correctness == 'correct' else 'incorrect'
world.css_fill(inputfield(course, 'string'), textvalue)
elif problem_type == 'numerical':
textvalue = "pi + 1" if correctness == 'correct' else str(random.randint(-2, 2))
world.css_fill(inputfield(course, 'numerical'), textvalue)
elif problem_type == 'formula':
textvalue = "x^2+2*x+y" if correctness == 'correct' else 'x^2'
world.css_fill(inputfield(course, 'formula'), textvalue)
elif problem_type == 'script':
# Correct answer is any two integers that sum to 10
first_addend = random.randint(-100, 100)
second_addend = 10 - first_addend
# If we want an incorrect answer, then change
# the second addend so they no longer sum to 10
if correctness == 'incorrect':
second_addend += random.randint(1, 10)
world.css_fill(inputfield(course, 'script', input_num=1), str(first_addend))
world.css_fill(inputfield(course, 'script', input_num=2), str(second_addend))
elif problem_type == 'code':
# The fake xqueue server is configured to respond
# correct / incorrect no matter what we submit.
# Furthermore, since the inline code response uses
# JavaScript to make the code display nicely, it's difficult
# to programatically input text
# (there's not <textarea> we can just fill text into)
# For this reason, we submit the initial code in the response
# (configured in the problem XML above)
pass
elif problem_type == 'radio_text' or problem_type == 'checkbox_text':
input_value = "8" if correctness == 'correct' else "5"
choice = "choiceinput_0bc" if correctness == 'correct' else "choiceinput_1bc"
world.css_fill(
inputfield(
course,
problem_type,
choice="choiceinput_0_numtolerance_input_0"
),
input_value
)
world.css_check(inputfield(course, problem_type, choice=choice))
elif problem_type == 'image':
offset = 25 if correctness == "correct" else -25
def try_click():
image_selector = "#imageinput_i4x-{0.org}-{0.course}-problem-image_2_1".format(section_loc)
input_selector = "#input_i4x-{0.org}-{0.course}-problem-image_2_1".format(section_loc)
world.browser.execute_script('$("body").on("click", function(event) {console.log(event);})')
initial_input = world.css_value(input_selector)
world.wait_for_visible(image_selector)
image = world.css_find(image_selector).first
(image.action_chains
.move_to_element(image._element)
.move_by_offset(offset, offset)
.click()
.perform())
world.wait_for(lambda _: world.css_value(input_selector) != initial_input)
world.retry_on_exception(try_click)
def problem_has_answer(course, problem_type, answer_class):
if problem_type == "drop down":
if answer_class == 'blank':
assert world.is_css_not_present('option[selected="true"]')
else:
actual = world.css_value('option[selected="true"]')
expected = 'Option 2' if answer_class == 'correct' else 'Option 3'
assert actual == expected
elif problem_type == "multiple choice":
if answer_class == 'correct':
assert_checked(course, 'multiple choice', ['choice_2'])
elif answer_class == 'incorrect':
assert_checked(course, 'multiple choice', ['choice_1'])
else:
assert_checked(course, 'multiple choice', [])
elif problem_type == "checkbox":
if answer_class == 'correct':
assert_checked(course, 'checkbox', ['choice_0', 'choice_2'])
elif answer_class == 'incorrect':
assert_checked(course, 'checkbox', ['choice_3'])
else:
assert_checked(course, 'checkbox', [])
elif problem_type == "radio":
if answer_class == 'correct':
assert_checked(course, 'radio', ['choice_2'])
elif answer_class == 'incorrect':
assert_checked(course, 'radio', ['choice_1'])
else:
assert_checked(course, 'radio', [])
elif problem_type == 'string':
if answer_class == 'blank':
expected = ''
else:
expected = 'correct string' if answer_class == 'correct' else 'incorrect'
assert_textfield(course, 'string', expected)
elif problem_type == 'formula':
if answer_class == 'blank':
expected = ''
else:
expected = "x^2+2*x+y" if answer_class == 'correct' else 'x^2'
assert_textfield(course, 'formula', expected)
elif problem_type in ("radio_text", "checkbox_text"):
if answer_class == 'blank':
expected = ('', '')
assert_choicetext_values(course, problem_type, (), expected)
elif answer_class == 'incorrect':
expected = ('5', '')
assert_choicetext_values(course, problem_type, ["choiceinput_1bc"], expected)
else:
expected = ('8', '')
assert_choicetext_values(course, problem_type, ["choiceinput_0bc"], expected)
else:
# The other response types use random data,
# which would be difficult to check
# We trade input value coverage in the other tests for
# input type coverage in this test.
pass
def add_problem_to_course(course, problem_type, extra_meta=None):
'''
Add a problem to the course we have created using factories.
'''
assert(problem_type in PROBLEM_DICT)
# Generate the problem XML using capa.tests.response_xml_factory
factory_dict = PROBLEM_DICT[problem_type]
problem_xml = factory_dict['factory'].build_xml(**factory_dict['kwargs'])
metadata = {'rerandomize': 'always'} if not 'metadata' in factory_dict else factory_dict['metadata']
if extra_meta:
metadata = dict(metadata, **extra_meta)
# Create a problem item using our generated XML
# We set rerandomize=always in the metadata so that the "Reset" button
# will appear.
category_name = "problem"
return world.ItemFactory.create(
parent_location=section_location(course),
category=category_name,
display_name=str(problem_type),
data=problem_xml,
metadata=metadata
)
def inputfield(course, problem_type, choice=None, input_num=1):
""" Return the css selector for `problem_type`.
For example, if problem_type is 'string', return
the text field for the string problem in the test course.
`choice` is the name of the checkbox input in a group
of checkboxes. """
section_loc = section_location(course)
# this is necessary due to naming requirement for this problem type
if problem_type in ("radio_text", "checkbox_text"):
selector_template = "input#i4x-{org}-{course}-problem-{ptype}_2_{input}"
else:
selector_template = "input#input_i4x-{org}-{course}-problem-{ptype}_2_{input}"
sel = selector_template.format(
org=section_loc.org,
course=section_loc.course,
ptype=problem_type.replace(" ", "_"),
input=input_num,
)
if choice is not None:
base = "_choice_" if problem_type == "multiple choice" else "_"
sel = sel + base + str(choice)
# If the input element doesn't exist, fail immediately
assert world.is_css_present(sel)
# Retrieve the input element
return sel
def assert_checked(course, problem_type, choices):
'''
Assert that choice names given in *choices* are the only
ones checked.
Works for both radio and checkbox problems
'''
all_choices = ['choice_0', 'choice_1', 'choice_2', 'choice_3']
for this_choice in all_choices:
def check_problem():
element = world.css_find(inputfield(course, problem_type, choice=this_choice))
if this_choice in choices:
assert element.checked
else:
assert not element.checked
world.retry_on_exception(check_problem)
def assert_textfield(course, problem_type, expected_text, input_num=1):
element_value = world.css_value(inputfield(course, problem_type, input_num=input_num))
assert element_value == expected_text
def assert_choicetext_values(course, problem_type, choices, expected_values):
"""
Asserts that only the given choices are checked, and given
text fields have a desired value
"""
# Names of the radio buttons or checkboxes
all_choices = ['choiceinput_0bc', 'choiceinput_1bc']
# Names of the numtolerance_inputs
all_inputs = [
"choiceinput_0_numtolerance_input_0",
"choiceinput_1_numtolerance_input_0"
]
for this_choice in all_choices:
element = world.css_find(inputfield(course, problem_type, choice=this_choice))
if this_choice in choices:
assert element.checked
else:
assert not element.checked
for (name, expected) in zip(all_inputs, expected_values):
element = world.css_find(inputfield(course, problem_type, name))
# Remove any trailing spaces that may have been added
assert element.value.strip() == expected
| jswope00/GAI | lms/djangoapps/courseware/features/problems_setup.py | Python | agpl-3.0 | 17,403 |
import pyopus.wxmplplot as pyopl
from numpy import arange, sin, cos, exp, pi, e, linspace, outer, ones, size
if __name__ == '__main__':
# Initialize gui thread, clean up.
pyopl.init()
pyopl.close()
# Plot data - sin(x), cos(x), exp(x/pi) .. for x in [0, 2pi] with 0.2 step.
x = arange(0.0, 2*pi, 0.2)
y1 = sin(x)
y2 = cos(x)
y3 = exp(x/pi)
# Create first figure (plot window). This is now the active figure.
# Tag is assigned automatically by the system.
f1=pyopl.figure(windowTitle="Figure - single axes", figpx=(600,400), dpi=100)
# Lock the main GUI event loop. This implicitly disables repainting.
pyopl.lock(True)
# If the window is closed the C++ part of the panel object is deleted,
# but the wxPython wrapper is still around. Accessing any attribute then
# results in an exception. To check if the C++ part is still there, call
# the alive() function with figure as argument.
if pyopl.alive(f1):
ax = f1.add_subplot(1, 1, 1, projection='3d')
u = linspace(0, 2 * pi, 100)
v = linspace(0, pi, 100)
x = 10 * outer(cos(u), sin(v))
y = 10 * outer(sin(u), sin(v))
z = 10 * outer(ones(size(u)), cos(v))
ax.plot_surface(x, y, z, rstride=4, cstride=4, color='b')
# Paint the changes on the screen.
pyopl.draw(f1)
# Now unlock the main GUI event loop
pyopl.lock(False)
# Handle keyboard interrupts properly.
pyopl.join()
| blorgon9000/pyopus | demo/plotting/10-3d.py | Python | gpl-3.0 | 1,443 |
import os
import sys
above_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, above_dir)
from tools import ProgressIterator, random_target_function, random_set, second_order, pla, linear_percepton, weight_error, output, experiment
import numpy as np
np.random.seed(0)
# HOEFFDING INEQUALITY
def coin_data(num_trials, sample_size, num_flips):
return np.random.randint(2, size=(num_trials, sample_size, num_flips))
def collate_flip_results(coin_data):
return np.sum(coin_data, axis=2)
def experiment_results(collated_flips):
avg_v1 = np.average(collated_flips[:,0]) / 10
random_samples = [ trial[np.random.randint(len(trial))]
for trial in collated_flips ]
avg_vrand = np.average(random_samples) / 10
avg_vmin = np.average(np.amin(collated_flips, axis=1)) / 10
return avg_v1, avg_vrand, avg_vmin
def test_one():
data = coin_data(100000,1000,10)
col_results = collate_flip_results(data)
return experiment_results(col_results)
# LINEAR REGRESSION
def test_two(in_sample, out_sample):
target_function = random_target_function()
training_set = random_set(in_sample, target_function)
weight = linear_percepton(training_set.z, training_set.y)
in_error = weight_error(weight, training_set.z, training_set.y)
testing_set = random_set(out_sample, target_function)
out_error = weight_error(weight, testing_set.z, testing_set.y)
return in_error, out_error
def test_three(in_sample):
target_function = random_target_function()
training_set = random_set(in_sample, target_function)
return pla(training_set.z, training_set.y, return_iterations=True)[1]
# NONLINEAR TRANSFORMATION
def moved_circle(data_point):
if data_point[1] ** 2 + data_point[2] ** 2 - 0.6 < 0:
return -1
else:
return 1
def test_four(in_sample, out_sample):
training_set = random_set(in_sample, moved_circle)
noisy_indices = np.random.choice(in_sample, size=round(0.1 * in_sample), replace=False)
training_set.y[noisy_indices] *= -1
weight = linear_percepton(training_set.z, training_set.y)
in_error_no_transform = weight_error(weight, training_set.z, training_set.y)
training_set.z = second_order(training_set.z)
weight = linear_percepton(training_set.z, training_set.y)
in_error_transform = weight_error(weight, training_set.z, training_set.y)
testing_set = random_set(out_sample, moved_circle, second_order)
noisy_indices = np.random.choice(out_sample, size=round(0.1 * out_sample), replace=False)
testing_set.y[noisy_indices] *= -1
out_error_transform = weight_error(weight, testing_set.z, testing_set.y)
return in_error_no_transform, weight, out_error_transform
def main():
print("The following simulations are computationally intensive")
output(simulations)
def simulations():
que ={}
progress_iterator = ProgressIterator(4)
progress_iterator.next()
avg_v1, avg_vrand, avg_vmin = test_one()
que[1] = ("v min :", avg_vmin)
progress_iterator.next()
in_error, out_error = experiment(test_two, [100, 1000], 1000)
que[5] = ("in sample error :", in_error)
que[6] = ("out sample error :", out_error)
progress_iterator.next()
iterations = experiment(test_three, [10], 1000)
que[7] = ("iterations :", iterations)
progress_iterator.next()
results = np.array([ test_four(100, 1000) for _ in range(1000) ], dtype=object)
in_error_no_transform = np.mean(results[:,0])
weight = np.mean(results[:,1], axis=0)
out_error_transform = np.mean(results[:,2])
que[8] = ("in sample error -- without higher dimension transformation :",
in_error_no_transform)
que[9] = ("higher dimensional weights :", weight)
que[10] = ("out of sample error -- with higher dimension transformation :",
out_error_transform)
return que
ans = {
1 : 'b',
2 : 'd',
3 : 'e',
4 : 'b',
5 : 'c',
6 : 'c',
7 : 'a',
8 : 'd',
9 : 'a',
10 : 'b',
}
if __name__ == '__main__':
main()
| zhiyanfoo/caltech-machine-learning | ass2/hw2.py | Python | mit | 4,126 |
"""
Clone an undirected graph. Each node in the graph contains a label and a list of its neighbors.
OJ's undirected graph serialization:
Nodes are labeled uniquely.
We use # as a separator for each node, and , as a separator for node label and each neighbor of the node.
As an example, consider the serialized graph {0,1,2#1,2#2,2}.
The graph has a total of three nodes, and therefore contains three parts as separated by #.
First node is labeled as 0. Connect node 0 to both nodes 1 and 2.
Second node is labeled as 1. Connect node 1 to node 2.
Third node is labeled as 2. Connect node 2 to node 2 (itself), thus forming a self-cycle.
Visually, the graph looks like the following:
1
/ \
/ \
0 --- 2
/ \
\_/
"""
# Definition for a undirected graph node
# class UndirectedGraphNode:
# def __init__(self, x):
# self.label = x
# self.neighbors = []
class Solution:
# @param node, a undirected graph node
# @return a undirected graph node
def cloneGraph(self, node):
def dfs(root):
if not root:
return None
if root.label in visited:
return visited[root.label]
visited[root.label] = UndirectedGraphNode(root.label)
for n in root.neighbors:
visited[root.label].neighbors.append(dfs(n))
return visited[root.label]
visited = {}
return dfs(node)
if not node:
return node
root = UndirectedGraphNode(node.label)
visited = {}
visited[node.label] = root
stack = [node]
while stack:
top = stack.pop()
for n in top.neighbors:
if n.label not in visited:
stack.append(n)
visited[n.label] = UndirectedGraphNode(n.label)
visited[top.label].neighbors.append(visited[n.label])
return root
| dichen001/Go4Jobs | JackChen/Google/133. Clone Graph.py | Python | gpl-3.0 | 1,949 |
#!/usr/bin/env python
import argparse
from collections import defaultdict
from itertools import combinations
import os
import datetime
import time
import random
import numpy as np
from scipy.optimize import minimize, check_grad
import networkx as nx
from lib.graph import make_edge, read_graph_from_file, cosh_d, distance, grad_cosh_d, fringe
from lib.pair_generators import BinaryPairGenerator
from lib.embedding_models import PoincareModel
from lib.loss_functions import MSE, LogLoss
from lib.optimization import SGD
class Margin:
"difference between distance and R"
def __init__(self, R):
self.R = R
self.coshR = np.cosh(R)
def __call__(self, r1, phi1, r2, phi2):
cd = cosh_d((r1, phi1), (r2, phi2))
return np.arccosh(cd) - self.R
class GradMargin(Margin):
"gradient of margin wrt r1, phi1, r2, phi2"
def __call__(self, r1, phi1, r2, phi2):
cd = cosh_d((r1, phi1), (r2, phi2))
if abs(cd - 1.) < 1e-15:
return np.array((0.,0.,0.,0.))
grad_cd = grad_cosh_d((r1, phi1), (r2, phi2))
return grad_cd / np.sqrt(cd - 1) / np.sqrt(cd + 1)
class Smooth:
"""approximation of step function of margin
also, model of edge probability"""
def __init__(self, beta=1., height=1.):
self.height = height
self.beta = beta
def __call__(self, margin):
return 1 / (1. + np.exp(margin * self.beta)) * self.height
class GradSmooth(Smooth):
"gradient of step function approximation wrt margin"
def __call__(self, margin):
return -np.exp(margin * self.beta) / (1. + np.exp(margin * self.beta))**2 * self.height * self.beta
class Q:
"loss function to minimize"
def __init__(self, vertices, edges, nedges):
self.vertices = vertices
self.edges = edges
self.nedges = nedges
n = len(vertices)
assert n > 1
R = 2 * np.log(n)
self.R = R
self.coshR = np.cosh(R)
self.non_edge_weight = float(len(self.edges)) / len(self.nedges) if len(self.nedges) else 1.
self.margin = Margin(R)
self.grad_margin = GradMargin(R)
beta = 1.
self.smooth = Smooth(beta=beta)
self.grad_smooth = GradSmooth(beta=beta)
def __value_term(self, x, v1, v2, true_edge):
i1 = self.vertices.index(v1)
i2 = self.vertices.index(v2)
r1 = x[2*i1]
phi1 = x[2*i1+1]
r2 = x[2*i2]
phi2 = x[2*i2+1]
z = self.margin(r1, phi1, r2, phi2)
pred_edge = self.smooth(z)
w = 1. if true_edge else self.non_edge_weight
return (pred_edge - true_edge)**2 * w
def __call__(self, x):
"x = [r1, phi1, r2, phi2, ...] for vertex sequence v1, v2, ..."
value = 0.
assert len(x) % 2 == 0
for (v1, v2) in self.edges:
value += self.__value_term(x, v1, v2, 1.)
for (v1, v2) in self.nedges:
value += self.__value_term(x, v1, v2, 0.)
return value
class GradQ(Q):
def __grad_terms(self, x, i1, i2, true_edge):
r1 = x[2*i1]
phi1 = x[2*i1+1]
r2 = x[2*i2]
phi2 = x[2*i2+1]
z = self.margin(r1, phi1, r2, phi2)
smooth_der = self.grad_smooth(z)
margin_der = self.grad_margin(r1, phi1, r2, phi2)
v1 = self.vertices[i1]
v2 = self.vertices[i2]
w = 1. if true_edge else self.non_edge_weight
disc = 2 * (self.smooth(z) - true_edge) * w
return disc * smooth_der * margin_der
def __call__(self, x):
assert len(x) % 2 == 0
value = np.zeros(len(x))
for (v1, v2) in self.edges:
i1 = self.vertices.index(v1)
i2 = self.vertices.index(v2)
v = self.__grad_terms(x, i1, i2, 1.)
value[2*i1] += v[0] # r1
value[2*i1+1] += v[1] # phi1
value[2*i2] += v[2] # r2
value[2*i2+1] += v[3] # phi2
for (v1, v2) in self.nedges:
i1 = self.vertices.index(v1)
i2 = self.vertices.index(v2)
v = self.__grad_terms(x, i1, i2, 0.)
value[2*i1] += v[0] # r1
value[2*i1+1] += v[1] # phi1
value[2*i2] += v[2] # r2
value[2*i2+1] += v[3] # phi2
return value
def vertex_pair_grad(self, x, v1, v2, is_true_edge):
assert len(x) % 2 == 0
value = np.zeros(len(x))
i1 = self.vertices.index(v1)
i2 = self.vertices.index(v2)
edge_ind = 1. if is_true_edge else 0.
v = self.__grad_terms(x, i1, i2, edge_ind)
value[2*i1] = v[0] # r1
value[2*i1+1] = v[1] # phi1
value[2*i2] = v[2] # r2
value[2*i2+1] = v[3] # phi2
return value
def find_embeddings(vertices, edges, mode,
learning_rate=0.1, n_epoch=100,
ratio_to_second=2., ratio_between_first=1., ratio_random=1.,
silent=False):
"find (r, phi) for each vertex"
vertices = list(vertices)
n = len(vertices)
R = 2 * np.log(n)
print "mode: {}".format(mode)
np.random.seed(0)
degrees = defaultdict(int)
print "count degrees"
for v1, v2 in edges:
degrees[v1] += 1
degrees[v2] += 1
if mode=='random':
# phi=rand(0, 2pi), r = rand(0,R)
return {v: (np.random.uniform(0.0, R), np.random.uniform(0.0, 2*np.pi)) for v in vertices}
elif mode == 'degrees':
# phi=rand(0,2pi), r = 2log(n/k)
return {v: (2*np.log(n / degrees[v]), np.random.uniform(0.0, 2*np.pi)) for v in vertices}
elif mode.startswith('fit'):
x0 = []
for (r, phi) in zip([2*np.log(n / degrees[v]) for v in vertices], [np.random.uniform(0.0, 2*np.pi) for v in vertices]):
x0.append(r)
x0.append(phi)
x0 = np.array(x0)
nedges = set()
all_nedges = set()
for (v1, v2) in combinations(vertices, 2):
#if (v1, v2) not in edges and (v2, v1) not in edges:
e = make_edge(v1, v2)
if e not in edges:
all_nedges.add(e)
if mode == 'fit_random':
a = list(all_nedges)
random.shuffle(a)
nedges = set(a[:len(edges)])
elif mode == 'fit_degrees':
K = float(ratio_to_second) # ratio of nedges to second neighbour
L = float(ratio_between_first) # ratio of nedges between first neighbours
M = float(ratio_random) # ratio of random nedges
#free_nedges = all_nedges.copy()
G = nx.Graph()
G.add_edges_from(edges)
srt_vertices = sorted(degrees.keys(), key=lambda v: -degrees[v])
shuf_vertices = srt_vertices[:]
random.shuffle(shuf_vertices)
for v in srt_vertices:
# get first neighbours
first_neigh = set(G.neighbors(v))
# get second neighbours
second_neigh = set()
for neigh in first_neigh:
second_neigh.update(G.neighbors(neigh))
second_neigh.remove(v)
n_vertex_nedges = 0
# from v to second neighbours
for i, sec_n in enumerate(second_neigh):
#print "i: {}".format(i)
if i+1 > degrees[v] * K:
continue
e = make_edge(v, sec_n)
if e not in nedges:
nedges.add(e)
n_vertex_nedges += 1
# between first neighbours
for j, pair in enumerate(combinations(first_neigh, 2)):
#print "j: {}".format(j)
if j+1 > degrees[v] * L:
continue
v1, v2 = pair
e = make_edge(v1, v2)
if e not in nedges:
nedges.add(e)
# random edges
max_n_random_vertices = int(degrees[v]*M)
n_random_vertices = 0
for rand_v in shuf_vertices:
if n_random_vertices >= max_n_random_vertices:
break
e = make_edge(v, rand_v)
if e not in nedges and e not in edges:
nedges.add(e)
n_random_vertices += 1
else:
nedges = all_nedges.copy()
print "number of nedges={}".format(len(nedges))
q = Q(vertices, edges, nedges)
grad_q = GradQ(vertices, edges, nedges)
if mode == 'fit_degrees_sgd':
print "Learning rate: {}".format(learning_rate)
print "Ratio to second: {}".format(ratio_to_second)
print "Ratio between first: {}".format(ratio_between_first)
print "Ratio random: {}".format(ratio_random)
G = nx.Graph()
G.add_edges_from(edges)
# construct connected(!) core
core_exponent = 0.4
core_vertices, fringe_vertices = [], []
# one-pass split by condition
for v in vertices:
core_vertices.append(v) if degrees[v] >= n**core_exponent else fringe_vertices.append(v)
# add vertices to ensure connectivity of core
fringe_vertices.sort(key=lambda v: -degrees[v])
while not nx.is_connected(G.subgraph(core_vertices)):
core_vertices.append(fringe_vertices.pop(0))
print "Core size: {}".format(len(core_vertices))
G_core = G.subgraph(core_vertices)
print "Is core connected:", nx.is_connected(G_core)
#loss_function = MSE(binary_edges=True)
loss_function = LogLoss(binary_edges=True)
optimizer = SGD(n_epoch=n_epoch, learning_rate=learning_rate, verbose=not silent)
FRINGE_FRACTION = 0.1
max_fringe_size = int(G.number_of_nodes() * FRINGE_FRACTION)
curr_graph = G.subgraph(core_vertices)
curr_core_vertices = set(core_vertices)
curr_embedding_model = PoincareModel(curr_graph, fit_radius=False)
curr_pair_generator = BinaryPairGenerator(curr_graph, batch_size=1)
optimizer.optimize_embedding(curr_embedding_model, loss_function, curr_pair_generator)
for i in range(int(1/FRINGE_FRACTION)+1):
total_fringe = fringe(G, curr_core_vertices)
#print "DEBUG:", curr_graph. number_of_nodes(), len(curr_core_vertices), len(total_fringe)
fringe_vertices = set(sorted(total_fringe, key=lambda v: -G.degree(v))[:max_fringe_size])
#print "DEBUG:", i+1, fringe_vertices
if not fringe_vertices:
break
curr_graph = G.subgraph(curr_core_vertices | fringe_vertices)
curr_embedding_model = PoincareModel(curr_graph, fit_radius=False, init_embedding=curr_embedding_model)
curr_pair_generator = BinaryPairGenerator(curr_graph, batch_size=1)
optimizer.optimize_embedding(curr_embedding_model, loss_function, curr_pair_generator, fixed_vertices=curr_core_vertices)
curr_core_vertices |= fringe_vertices
embedding_model = curr_embedding_model
'''
core_embedding_model = PoincareModel(G_core, fit_radius=False)
core_pair_generator = BinaryPairGenerator(G_core, batch_size=1)
optimizer.optimize_embedding(core_embedding_model, loss_function, core_pair_generator)
#optimizer = SGD(n_epoch=n_epoch, learning_rate=learning_rate, verbose=not silent)
embedding_model = PoincareModel(G, fit_radius=False, init_embedding=core_embedding_model)
pair_generator = BinaryPairGenerator(G, batch_size=1)
optimizer.optimize_embedding(embedding_model, loss_function, pair_generator, fixed_vertices=core_vertices)
#print "Radius before: {}".format(embedding_model.embedding['radius'])
#print "Radius after: {}".format(embedding_model.embedding['radius'])
'''
return (embedding_model.embedding['vertices'], {'core': list(G.edges())})
else:
print "Check gradient: ", check_grad(q, grad_q, x0)
res = minimize(q, x0, method='BFGS', jac=grad_q)
#print res
x = res.x
retval = {}
for i in range(len(vertices)):
r = x[2*i]
phi = x[2*i+1]
retval[vertices[i]] = (r, phi)
return retval
else:
raise Exception('unknown mode')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('graph_file')
parser.add_argument('out_prefix')
parser.add_argument('--mode', default='fit_degrees_sgd', help='random|degrees|fit|fit_random|fit_degrees|fit_degrees_sgd')
parser.add_argument('--learning-rate', default=0.1, help='learning rate for fit_degrees_sgd', type=float)
parser.add_argument('--n-epoch', default=100, help='number of training epoch for fit_degrees_sgd', type=int)
parser.add_argument('--ratio-to-second', default=2., help='ratio of nedges to second neighbour', type=float)
parser.add_argument('--ratio-between-first', default=1., help='ratio of nedges between first neighbours', type=float)
parser.add_argument('--ratio-random', default=1., help='ratio of random nedges', type=float)
parser.add_argument('--silent', action='store_true')
args = parser.parse_args()
vertices, edges = read_graph_from_file(args.graph_file)
n = len(vertices)
print "Number of vertices: {}".format(n)
print "Number of edges: {}".format(len(edges))
print "Number of non-edges: {}".format(n*(n-1)/2 - len(edges))
print "Find embeddings"
embeddings, info = find_embeddings(vertices, edges, mode=args.mode,
learning_rate=args.learning_rate, n_epoch=args.n_epoch,
ratio_to_second=args.ratio_to_second, ratio_between_first=args.ratio_between_first, ratio_random=args.ratio_random,
silent=args.silent
)
with open(args.out_prefix+'-embeddings.txt', 'w') as of:
for v in embeddings.keys():
r, phi = embeddings[v]
of.write('\t'.join(map(str, [v, r, phi]))+'\n')
core = info['core']
if core is not None:
with open(args.out_prefix+'-core.txt', 'w') as of_core:
for v1, v2 in core:
of_core.write('\t'.join(map(str, [v1, v2]))+'\n')
if __name__ == '__main__':
main()
| horoshenkih/rgg | fit.py | Python | mit | 14,528 |
from vmwaremirage import VmwareMirageClient
import config
import config_secure
import os
vm = VmwareMirageClient(server=config_secure.server,
username=config_secure.username,
password=os.environ['VMWARE_MIRAGE_PASSWORD'])
def test_reauth():
# Cofirm we are working
cvd = vm.get_cvd(config.cvd_1['id'])
assert cvd.Name == config.cvd_1['name']
# Logout
vm.client.service.Logout()
# And try again. It should automatically re-authenticate
cvd = vm.get_cvd(config.cvd_1['id'])
assert cvd.Name == config.cvd_1['name']
def test_get_cvds():
# Test the by id function
cvd = vm.get_cvd(config.cvd_1['id'])
assert cvd.Name == config.cvd_1['name']
# Test getting two cvds by id
cvds = vm.get_cvds(by='ID', value=[config.cvd_1['id'],config.cvd_2['id']], query_type='EQUALS')
assert len(cvds) == 2
cvds = vm.get_cvds(by='DEVICE_ID', value=[config.cvd_1['deviceid'],config.cvd_2['deviceid']], query_type='EQUALS')
assert len(cvds) == 2
cvds = vm.get_cvds(by='POLICY_ID', value=config.cvd_1['policyid'], query_type='EQUALS')
assert len(cvds) >= 1
cvds = vm.get_cvds(by='NAME', value=config.cvd_1['name'])
assert len(cvds) == 1
cvds = vm.get_cvds(by='USER_NAME', value=config.cvd_1['username'], query_type='CONTAINS')
assert len(cvds) >= 1
cvds = vm.get_cvds(by='POLICY_NAME', value=config.cvd_1['policyname'], query_type='ENDS_WITH')
assert len(cvds) >= 1
cvds = vm.get_cvds(by='CONNECTION_STATE', value=False, query_type='EQUALS')
assert len(cvds) >= 1
cvds = vm.get_cvds(by='CLIENT_STATUS', value='Idle', query_type='EQUALS')
assert len(cvds) >= 1
cvds = vm.get_cvds(by='PROGRESS', value=100, query_type='NOT_EQUALS')
assert len(cvds) >= 1
def test_get_collection_cvds():
cvds = vm.get_collection_cvds(config.collection['id'])
assert len(cvds) >= 1
def test_get_app_layers():
layers = vm.get_app_layers()
assert len(layers) >= 1
layer = vm.get_app_layers(by='ID', value=config.app_layer['id'], query_type='EQUALS')[0]
assert layer.Name == config.app_layer['name']
layers = vm.get_app_layers(by='NAME', value=config.app_layer['name'])
assert len(layers) >= 1
def test_get_base_layers():
layers = vm.get_base_layers()
assert len(layers) >= 1
layer = vm.get_base_layers(by='ID', value=config.base_layer['id'], query_type='EQUALS')[0]
assert layer.Name == config.base_layer['name']
layers = vm.get_base_layers(by='NAME', value=config.base_layer['name'])
assert len(layers) >= 1
def test_get_collections():
colls = vm.get_collections(by='ID', value=config.collection['id'], query_type='EQUALS')
assert len(colls) == 1
colls = vm.get_collections(by='NAME', value=config.collection['name'])
assert len(colls) >= 1
colls = vm.get_collections(by='DESCRIPTION', value=config.collection['description'], query_type='CONTAINS')
assert len(colls) >= 1
def test_get_pending_devices():
pends = vm.get_pending_devices(by='DEVICE_ID', value=config.pending['deviceid'], query_type='EQUALS')
assert len(pends) == 1
pends = vm.get_pending_devices(by='NAME', value=config.pending['name'])
assert len(pends) == 1
pends = vm.get_pending_devices(by='USER_NAME', value=config.pending['username'], query_type='CONTAINS')
assert len(pends) >= 1
pends = vm.get_pending_devices(by='CONNECTION_STATE', value=False, query_type='EQUALS')
assert len(pends) >= 1
pends = vm.get_pending_devices(by='MODEL_NAME', value=config.pending['model'], query_type='EQUALS')
assert len(pends) >= 1
pends = vm.get_pending_devices(by='VENDOR_NAME', value=config.pending['vendor'], query_type='EQUALS')
assert len(pends) >= 1
pends = vm.get_pending_devices(by='OS_VERSION', value=config.pending['os'], query_type='EQUALS')
assert len(pends) >= 1
def test_get_policies():
pols = vm.get_policies(by='ID', value=config.policy['id'], query_type='EQUALS')
assert len(pols) == 1
pols = vm.get_policies(by='NAME', value=config.policy['name'], query_type='EQUALS')
assert len(pols) == 1
def test_get_volumes():
vols = vm.get_volumes(by='ID', value=config.volume['id'], query_type='EQUALS')
assert len(vols) == 1
vols = vm.get_volumes(by='NAME', value=config.volume['name'], query_type='EQUALS')
assert len(vols) == 1
vols = vm.get_volumes(by='PATH', value=config.volume['path'], query_type='EQUALS')
| jay-tuckey/python-vmwaremirage | tests/test_main_functions.py | Python | mit | 4,514 |
# jhbuild - a build script for GNOME 1.x and 2.x
# Copyright (C) 2001-2006 James Henstridge
#
# httpcache.py: a simple HTTP cache
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
'''Very simple minded class that can be used to maintain a cache of files
downloaded from web servers. It is designed to reduce load on web servers,
and draws ideas from feedparser.py. Strategies include:
- If a resource has been checked in the last 6 hours, consider it current.
- support gzip transfer encoding.
- send If-Modified-Since and If-None-Match headers when validating a
resource to reduce downloads when the file has not changed.
- honour Expires headers returned by server. If no expiry time is
given, it defaults to 6 hours.
'''
import os
import sys
import urllib2
import urlparse
import time
import rfc822
import StringIO
try:
import gzip
except ImportError:
gzip = None
try:
import xml.dom.minidom
except ImportError:
raise SystemExit, _('Python xml packages are required but could not be found')
def _parse_isotime(string):
if string[-1] != 'Z':
return time.mktime(time.strptime(string, '%Y-%m-%dT%H:%M:%S'))
tm = time.strptime(string, '%Y-%m-%dT%H:%M:%SZ')
return time.mktime(tm[:8] + (0,)) - time.timezone
def _format_isotime(tm):
return time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(tm))
def _parse_date(date):
tm = rfc822.parsedate_tz(date)
if tm:
return rfc822.mktime_tz(tm)
return 0
class CacheEntry:
def __init__(self, uri, local, modified, etag, expires=0):
self.uri = uri
self.local = local
self.modified = modified
self.etag = etag
self.expires = expires
class Cache:
try:
cachedir = os.path.join(os.environ['XDG_CACHE_HOME'], 'jhbuild')
except KeyError:
cachedir = os.path.join(os.environ['HOME'], '.cache','jhbuild')
# default to a 6 hour expiry time.
default_age = 6 * 60 * 60
def __init__(self, cachedir=None):
if cachedir:
self.cachedir = cachedir
if not os.path.exists(self.cachedir):
os.makedirs(self.cachedir)
self.entries = {}
def read_cache(self):
self.entries = {}
cindex = os.path.join(self.cachedir, 'index.xml')
try:
document = xml.dom.minidom.parse(cindex)
except:
return # treat like an empty cache
if document.documentElement.nodeName != 'cache':
document.unlink()
return # doesn't look like a cache
for node in document.documentElement.childNodes:
if node.nodeType != node.ELEMENT_NODE: continue
if node.nodeName != 'entry': continue
uri = node.getAttribute('uri')
local = str(node.getAttribute('local'))
if node.hasAttribute('modified'):
modified = node.getAttribute('modified')
else:
modified = None
if node.hasAttribute('etag'):
etag = node.getAttribute('etag')
else:
etag = None
expires = _parse_isotime(node.getAttribute('expires'))
# only add to cache list if file actually exists.
if os.path.exists(os.path.join(self.cachedir, local)):
self.entries[uri] = CacheEntry(uri, local, modified,
etag, expires)
document.unlink()
def write_cache(self):
cindex = os.path.join(self.cachedir, 'index.xml')
document = xml.dom.minidom.Document()
document.appendChild(document.createElement('cache'))
node = document.createTextNode('\n')
document.documentElement.appendChild(node)
for uri in self.entries.keys():
entry = self.entries[uri]
node = document.createElement('entry')
node.setAttribute('uri', entry.uri)
node.setAttribute('local', entry.local)
if entry.modified:
node.setAttribute('modified', entry.modified)
if entry.etag:
node.setAttribute('etag', entry.etag)
node.setAttribute('expires', _format_isotime(entry.expires))
document.documentElement.appendChild(node)
node = document.createTextNode('\n')
document.documentElement.appendChild(node)
document.writexml(open(cindex, 'w'))
document.unlink()
def _make_filename(self, uri):
'''picks a unique name for a new entry in the cache.
Very simplistic.'''
# get the basename from the URI
parts = urlparse.urlparse(uri, allow_fragments=False)
base = parts[2].split('/')[-1]
if not base: base = 'index.html'
is_unique = False
while not is_unique:
is_unique = True
for uri in self.entries.keys():
if self.entries[uri].local == base:
is_unique = False
break
if not is_unique:
base = base + '-'
return base
def load(self, uri, nonetwork=False, age=None):
'''Downloads the file associated with the URI, and returns a local
file name for contents.'''
# pass file URIs straight through -- no need to cache them
parts = urlparse.urlparse(uri)
if parts[0] in ('', 'file'):
return parts[2]
if sys.platform.startswith('win') and uri[1] == ':':
# On Windows, path like c:... are local
return uri
now = time.time()
# is the file cached and not expired?
self.read_cache()
entry = self.entries.get(uri)
if entry and (age != 0 or nonetwork):
if (nonetwork or now <= entry.expires):
return os.path.join(self.cachedir, entry.local)
if nonetwork:
raise RuntimeError(_('file not in cache, but not allowed to check network'))
request = urllib2.Request(uri)
if gzip:
request.add_header('Accept-encoding', 'gzip')
if entry:
if entry.modified:
request.add_header('If-Modified-Since', entry.modified)
if entry.etag:
request.add_header('If-None-Match', entry.etag)
try:
response = urllib2.urlopen(request)
# get data, and gunzip it if it is encoded
data = response.read()
if gzip and response.headers.get('Content-Encoding', '') == 'gzip':
try:
data = gzip.GzipFile(fileobj=StringIO.StringIO(data)).read()
except:
data = ''
expires = response.headers.get('Expires')
# add new content to cache
entry = CacheEntry(uri, self._make_filename(uri),
response.headers.get('Last-Modified'),
response.headers.get('ETag'))
filename = os.path.join(self.cachedir, entry.local)
open(filename, 'wb').write(data)
except urllib2.HTTPError, e:
if e.code == 304: # not modified; update validated
expires = e.hdrs.get('Expires')
filename = os.path.join(self.cachedir, entry.local)
else:
raise
# set expiry date
entry.expires = _parse_date(expires)
if entry.expires <= now: # ignore expiry times that have already passed
if age is None:
age = self.default_age
entry.expires = now + age
# save cache
self.entries[uri] = entry
self.write_cache()
return filename
_cache = None
def load(uri, nonetwork=False, age=None):
'''Downloads the file associated with the URI, and returns a local
file name for contents.'''
global _cache
if not _cache: _cache = Cache()
return _cache.load(uri, nonetwork=nonetwork, age=age)
| rpavlik/jhbuild-vrjuggler | jhbuild/utils/httpcache.py | Python | gpl-2.0 | 8,664 |
"""bill do the dan flan dance"""
from urllib import quote
import re
import requests
from random import shuffle
def gif(unsafe=False):
return "https://raw.githubusercontent.com/BobbyJohansen/imgs/master/dan.png"
def on_message(msg, server):
text = msg.get("text", "")
match = re.findall(r"bill do the dan flan dance", text)
if not match: return
searchterm = match[0]
return gif()
| BobbyJohansen/BillBot | plugins/thedanflandance.py | Python | mit | 408 |
#Backend tests to be implemented
#Test 1 - Succesful setup/running of server
#Test 2 - Succesful retrieval of query
#Test 3 - Correct query return
#Test 4 - Succesful upload of POST request
#Test 5 - Succesful retrieval of newly inserted Rice
#Test 6 - Succesful filtering of spam requests
| install-logos/RiceBE | test.py | Python | mit | 293 |
import os
import datetime
import sys
import time
import string
import random
import pandas as pd
import numpy as np
import gc
if(len(sys.argv) < 2):
print('Usage: CSVTrainer.py train.csv validation.csv model.h5 log.txt')
sys.exit(1)
trainingName = sys.argv[1]
validationName = sys.argv[2]
modelName = sys.argv[3]
logName = sys.argv[4]
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import *
import keras.preprocessing.image as image
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint, CSVLogger
from keras.layers import Input, merge, Dropout, Dense, Flatten, Activation
from keras.layers.convolutional import MaxPooling2D, Convolution2D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam, SGD
from keras.models import Model, load_model
from keras import regularizers
from keras import backend as K
from keras.utils.data_utils import get_file
from sklearn.metrics import accuracy_score
from keras.applications import resnet50
def readCSV(fileList):
namesDataFrame = pd.read_csv(fileList)
flatten = lambda l: [item for sublist in l for item in sublist]
labels = sorted(list(set(flatten([l.split(' ') for l in namesDataFrame['tags'].values]))))
labelMap = {l: i for i, l in enumerate(labels)}
numberOfLabels = len(labels)
numberOfImages = len(namesDataFrame)
fileNames = []
y = np.zeros((numberOfImages, numberOfLabels), np.float32)
for index in range(0, numberOfImages):
inputImage = image.img_to_array(image.load_img(namesDataFrame.iloc[index][0]))
fileNames.append(namesDataFrame.iloc[index][0])
tags = namesDataFrame.iloc[index][1]
for t in tags.split(' '):
y[index, labelMap[t]] = 1.0
return (fileNames, y, labelMap)
print('Loading images..........', end = '',flush = True)
(trainingFileNames, trainY, trainingLabelMap) = readCSV(trainingName)
(validationFileNames, validationY, validationLabelMap) = readCSV(validationName)
print('done.', flush = True)
if len(trainingLabelMap) != len(validationLabelMap):
print("Label maps for training and validation are not equal")
sys.exit(1)
numberOfTrainingImages = len(trainingFileNames)
numberOfValidationImages = len(validationFileNames)
numberOfChannels = 3
nx = 256
ny = 256
batchSize = 25
lossName = 'binary_crossentropy'
activationName = 'sigmoid'
resnetModel = resnet50.ResNet50(include_top=False, weights='imagenet', input_shape=(numberOfChannels, nx, ny))
print('The number of layers in the resnet model = %d' % (len(resnetModel.layers)))
bottleneckTrainingDataGenerator = ImageDataGenerator(rescale = 1.0/255.0)
bottleneckValidationDataGenerator = ImageDataGenerator(rescale = 1.0/255.0)
bottleneckTrainingGenerator = bottleneckTrainingDataGenerator.flow_from_filenames(trainingFileNames, target_size = (nx, ny), batch_size = batchSize, shuffle = False)
bottleneckValidationGenerator = bottleneckTrainingDataGenerator.flow_from_filenames(validationFileNames, target_size = (nx, ny), batch_size = batchSize, shuffle = False)
bottleneckTrainingFeatures = resnetModel.predict_generator(bottleneckTrainingGenerator, numberOfTrainingImages)
bottleneckValidationFeatures = resnetModel.predict_generator(bottleneckValidationGenerator, numberOfValidationImages)
newTop = Sequential()
newTop.add(Flatten(input_shape = bottleneckTrainingFeatures.shape[1:]))
newTop.add(Dense(512, activation='relu'))
newTop.add(Dropout(0.5))
newTop.add(Dense(len(trainingLabelMap), activation=activationName, name='predictions'))
newTop.compile(loss=lossName, optimizer=Adam(lr=1.0E-3))
print('Fitting predicted features...', flush = True)
newTop.fit(bottleneckTrainingFeatures, trainY, validation_data = (bottleneckValidationFeatures, validationY), verbose = 1, batch_size = batchSize, nb_epoch = 25)
print('Done.', flush = True)
finalModel = Model(input = resnetModel.input, output = newTop(resnetModel.output))
print('The number of layers in the final model = %d' % (len(finalModel.layers)))
for layer in finalModel.layers[:(len(resnetModel.layers) - 21)]:
layer.trainable = False
finalModel.compile(loss=lossName,optimizer=SGD(lr=1e-4, momentum=0.9))
print(finalModel.summary())
# Could add vertical_flip = True
trainingDataGenerator = ImageDataGenerator(rescale = 1.0/255.0, rotation_range = 40, zoom_range = 0.15, horizontal_flip = True,
width_shift_range = 0.1, height_shift_range = 0.1, shear_range = 0.1)
validationDataGenerator = ImageDataGenerator(rescale = 1.0/255.0)
trainingGenerator = trainingDataGenerator.flow_from_filenames(trainingFileNames, trainY, batch_size = batchSize, target_size = (nx, ny))
validationGenerator = validationDataGenerator.flow_from_filenames(validationFileNames, validationY, batch_size = batchSize, target_size = (nx, ny))
csvLogger = CSVLogger(logName, append=True)
checkPointer = ModelCheckpoint(filepath=modelName, verbose = 1, save_best_only = True)
finalModel.fit_generator(trainingGenerator, numberOfTrainingImages, 50, validation_data = validationGenerator,
nb_val_samples = numberOfValidationImages, callbacks = [checkPointer, csvLogger])
| rboyes/KerasScripts | CSVTrainer.py | Python | apache-2.0 | 5,321 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from setuptools import setup
_PUBLISH_WARNING = '''
******************
!!! DEPRECATED !!!
******************
Use twine to publish packages to pypi now.
Ensure you have the `wheel` and `twine` packages installed with
pip install wheel twine
Then create some distributions like
python setup.py sdist bdist_wheel
Then upload with twine
twine upload dist/*
'''
if sys.argv[-1] == 'publish':
print(_PUBLISH_WARNING)
sys.exit()
setup(test_suite='runtests.runtests')
| lpomfrey/django-taggit-machinetags | setup.py | Python | bsd-2-clause | 551 |
# c1219/access/telephone.py
#
# Copyright 2011 Spencer J. McIntyre <SMcIntyre [at] SecureState [dot] net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
# This library contains classes to facilitate retreiving complex C1219
# tables from a target device. Each parser expects to be passed a
# connection object. Right now the connection object is a
# c1218.connection.Connection instance, but anythin implementing the basic
# methods should work.
import struct
from c1219.constants import *
from c1219.errors import C1219ParseError, C1219ProcedureError
class C1219TelephoneAccess(object): # Corresponds To Decade 9x
"""
This class provides generic access to the telephone/modem configuration
tables that are stored in the decade 9x tables.
"""
__global_bit_rate__ = None
__originate_bit_rate__ = None
__answer_bit_rate__ = None
__prefix_number__ = ''
__primary_phone_number_idx__ = None
__secondary_phone_number_idx__ = None
def __init__(self, conn):
"""
Initializes a new instance of the class and reads tables from the
corresponding decades to populate information.
@type conn: c1218.connection.Connection
@param conn: The driver to be used for interacting with the
necessary tables.
"""
self.conn = conn
actual_telephone_table = self.conn.get_table_data(ACT_TELEPHONE_TBL)
global_parameters_table = self.conn.get_table_data(GLOBAL_PARAMETERS_TBL)
originate_parameters_table = self.conn.get_table_data(ORIGINATE_PARAMETERS_TBL)
originate_schedule_table = self.conn.get_table_data(ORIGINATE_SCHEDULE_TBL)
answer_parameters_table = self.conn.get_table_data(ANSWER_PARAMETERS_TBL)
if (actual_telephone_table) < 14:
raise C1219ParseError('expected to read more data from ACT_TELEPHONE_TBL', ACT_TELEPHONE_TBL)
info = {}
### Parse ACT_TELEPHONE_TBL ###
use_extended_status = bool(ord(actual_telephone_table[0]) & 128)
prefix_length = ord(actual_telephone_table[4])
nbr_originate_numbers = ord(actual_telephone_table[5])
phone_number_length = ord(actual_telephone_table[6])
bit_rate_settings = (ord(actual_telephone_table[1]) >> 3) & 3 # not the actual settings but rather where they are defined
self.__can_answer__ = bool(ord(actual_telephone_table[0]) & 1)
self.__use_extended_status__ = use_extended_status
self.__nbr_originate_numbers__ = nbr_originate_numbers
### Parse GLOBAL_PARAMETERS_TBL ###
self.__psem_identity__ = ord(global_parameters_table[0])
if bit_rate_settings == 1:
if len(global_parameters_table) < 5:
raise C1219ParseError('expected to read more data from GLOBAL_PARAMETERS_TBL', GLOBAL_PARAMETERS_TBL)
self.__global_bit_rate__ = struct.unpack(conn.c1219_endian + 'I', global_parameters_table[1:5])[0]
### Parse ORIGINATE_PARAMETERS_TBL ###
if bit_rate_settings == 2:
self.__originate_bit_rate__ = struct.unpack(conn.c1219_endian + 'I', originate_parameters_table[0:4])[0]
originate_parameters_table = originate_parameters_table[4:]
self.__dial_delay__ = ord(originate_parameters_table[0])
originate_parameters_table = originate_parameters_table[1:]
if prefix_length != 0:
self.__prefix_number__ = originate_parameters_table[:prefix_length]
originate_parameters_table = originate_parameters_table[prefix_length:]
self.__originating_numbers__ = {}
tmp = 0
while tmp < self.__nbr_originate_numbers__:
self.__originating_numbers__[tmp] = {'idx': tmp, 'number':originate_parameters_table[:phone_number_length], 'status':None}
originate_parameters_table = originate_parameters_table[phone_number_length:]
tmp += 1
### Parse ORIGINATE_SHCEDULE_TBL ###
primary_phone_number_idx = ord(originate_schedule_table[0]) & 7
secondary_phone_number_idx = (ord(originate_schedule_table[0]) >> 4) & 7
if primary_phone_number_idx < 7:
self.__primary_phone_number_idx__ = primary_phone_number_idx
if secondary_phone_number_idx < 7:
self.__secondary_phone_number_idx__ = secondary_phone_number_idx
### Prase ANSWER_PARAMETERS_TBL ###
if bit_rate_settings == 2:
self.__answer_bit_rate__ = struct.unpack(conn.c1219_endian + 'I', answer_parameters_table[0:4])[0]
self.update_last_call_statuses()
def initiate_call(self, number = None, idx = None):
if number:
idx = None
for tmpidx in self.__originating_numbers__.keys():
if self.__originating_numbers__[tmpidx]['number'] == number:
idx = tmpidx
if idx == None:
raise C1219ProcedureError('target phone number not found in originating numbers')
if not idx in self.__originating_numbers__.keys():
raise C1219ProcedureError('phone number index not within originating numbers range')
return self.initiate_call_ex(self.conn, idx)
@staticmethod
def initiate_call_ex(conn, idx):
return conn.run_procedure(20, False, chr(idx))
def update_last_call_statuses(self):
tmp = 0
call_status_table = self.conn.get_table_data(CALL_STATUS_TBL)
if (len(call_status_table) % self.nbr_originate_numbers) != 0:
raise C1219ParseError('expected to read more data from CALL_STATUS_TBL', CALL_STATUS_TBL)
call_status_rcd_length = (len(call_status_table) / self.nbr_originate_numbers)
while tmp < self.nbr_originate_numbers:
self.__originating_numbers__[tmp]['status'] = ord(call_status_table[0])
call_status_table = call_status_table[call_status_rcd_length:]
tmp += 1
@property
def answer_bit_rate(self):
return self.__answer_bit_rate__
@property
def can_answer(self):
return self.__can_answer__
@property
def dial_delay(self):
return self.__dial_delay__
@property
def global_bit_rate(self):
return self.__global_bit_rate__
@property
def nbr_originate_numbers(self):
return self.__nbr_originate_numbers__
@property
def originate_bit_rate(self):
return self.__originate_bit_rate__
@property
def originating_numbers(self):
return self.__originating_numbers__
@property
def prefix_number(self):
return self.__prefix_number__
@property
def primary_phone_number_idx(self):
return self.__primary_phone_number_idx__
@property
def psem_identity(self):
return self.__psem_identity__
@property
def secondary_phone_number_idx(self):
return self.__secondary_phone_number_idx__
@property
def use_extended_status(self):
return self.__use_extended_status__
| firebitsbr/termineter | c1219/access/telephone.py | Python | gpl-3.0 | 6,917 |
from base import *
import os
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Audrey Roy', 'audreyr@cartwheelweb.com'),
('Daniel Greenfeld', 'pydanny@cartwheelweb.com'),
)
MANAGERS = ADMINS
DEFAULT_FROM_EMAIL = os.environ.get('DEFAULT_FROM_EMAIL',
'roda <info@roda.com>')
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_PASSWORD = os.environ.get('SENDGRID_PASSWORD')
EMAIL_HOST_USER = os.environ.get('SENDGRID_USERNAME')
EMAIL_PORT = os.environ.get('EMAIL_PORT', 587)
SERVER_EMAIL = 'info@roda.com'
EMAIL_USE_TLS = True
from postgresify import postgresify
DATABASES = postgresify()
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates'),
)
########## STORAGE CONFIGURATION
#INSTALLED_APPS += ('storages', 'raven.contrib.django', )
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_QUERYSTRING_AUTH = False
AWS_HEADERS = {
'Expires': 'Thu, 15 Apr 2020 20:00:00 GMT',
'Cache-Control': 'max-age=86400',
}
# Boto requires subdomain formatting.
from S3 import CallingFormat
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
# Amazon S3 configuration.
if 'AWS_ACCESS_KEY_ID' in os.environ:
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
else:
raise Exception("Missing AWS_ACCESS_KEY_ID")
if 'AWS_SECRET_ACCESS_KEY' in os.environ:
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
else:
raise Exception("Missing AWS_SECRET_ACCESS_KEY")
AWS_STORAGE_BUCKET_NAME = 'roda'
STATIC_URL = 'https://s3.amazonaws.com/roda/'
MEDIA_URL = STATIC_URL
########## END STORAGE CONFIGURATION
DJANGO_WYSIWYG_MEDIA_URL = os.path.join(STATIC_URL, 'ckeditor')
| pydanny/roda | roda/roda/settings/heroku.py | Python | mit | 1,814 |
from compare import expect
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.client import Client
from django.core.management import call_command
import sys
from tardis.tardis_portal.models import \
Experiment, Dataset, Dataset_File, ExperimentACL, License, UserProfile, \
ExperimentParameterSet, ExperimentParameter, DatasetParameterSet, DatafileParameterSet
def _create_test_user():
user_ = User(username='tom',
first_name='Thomas',
last_name='Atkins',
email='tommy@atkins.net')
user_.save()
UserProfile(user=user_).save()
return user_
def _create_license():
license_ = License(name='Creative Commons Attribution-NoDerivs 2.5 Australia',
url='http://creativecommons.org/licenses/by-nd/2.5/au/',
internal_description='CC BY 2.5 AU',
allows_distribution=True)
license_.save()
return license_
def _create_test_experiment(user, license_):
experiment = Experiment(title='Norwegian Blue',
description='Parrot + 40kV',
created_by=user)
experiment.public_access = Experiment.PUBLIC_ACCESS_FULL
experiment.license = license_
experiment.save()
experiment.author_experiment_set.create(order=0,
author="John Cleese",
url="http://nla.gov.au/nla.party-1")
experiment.author_experiment_set.create(order=1,
author="Michael Palin",
url="http://nla.gov.au/nla.party-2")
acl = ExperimentACL(experiment=experiment,
pluginId='django_user',
entityId=str(user.id),
isOwner=True,
canRead=True,
canWrite=True,
canDelete=True,
aclOwnershipType=ExperimentACL.OWNER_OWNED)
acl.save()
return experiment
def _create_test_dataset(nosDatafiles):
ds_ = Dataset(description='happy snaps of plumage')
ds_.save()
for i in range (0, nosDatafiles) :
df_ = Dataset_File(dataset=ds_, url='http://planet-python.org/' + str(_next_id()))
df_.save()
ds_.save()
return ds_
def _create_test_data():
# Create 2 experiments with 3 datasets, one of which is in both experiments.
user_ = _create_test_user()
license_ = _create_license()
exp1_ = _create_test_experiment(user_, license_)
exp2_ = _create_test_experiment(user_, license_)
ds1_ = _create_test_dataset(1)
ds2_ = _create_test_dataset(2)
ds3_ = _create_test_dataset(3)
ds1_.experiments.add(exp1_);
ds2_.experiments.add(exp1_);
ds2_.experiments.add(exp2_);
ds3_.experiments.add(exp2_);
ds1_.save()
ds2_.save()
ds3_.save()
exp1_.save()
exp2_.save()
return (exp1_, exp2_)
_counter = 1
def _next_id():
global _counter
res = _counter
_counter += 1
return res
class RmExperimentTestCase(TestCase):
def setUp(self):
pass
def testList(self):
(exp1_, exp2_) = _create_test_data()
expect(Dataset_File.objects.all().count()).to_be(6)
expect(len(exp1_.get_datafiles())).to_be(3)
expect(len(exp2_.get_datafiles())).to_be(5)
# Check that --list doesn't remove anything
call_command('rmexperiment', exp1_.pk, list=True)
expect(Dataset_File.objects.all().count()).to_be(6)
expect(len(exp1_.get_datafiles())).to_be(3)
expect(len(exp2_.get_datafiles())).to_be(5)
def testRemove(self):
(exp1_, exp2_) = _create_test_data()
expect(Dataset_File.objects.all().count()).to_be(6)
expect(len(exp1_.get_datafiles())).to_be(3)
expect(len(exp2_.get_datafiles())).to_be(5)
# Remove first experiment and check that the shared dataset hasn't been removed
call_command('rmexperiment', exp1_.pk, confirmed=True)
expect(Dataset_File.objects.all().count()).to_be(5)
expect(len(exp2_.get_datafiles())).to_be(5)
#Remove second experiment
call_command('rmexperiment', exp2_.pk, confirmed=True)
expect(Dataset_File.objects.all().count()).to_be(0)
#Check that everything else has been removed too
expect(ExperimentACL.objects.all().count()).to_be(0)
expect(ExperimentParameterSet.objects.all().count()).to_be(0)
expect(ExperimentParameter.objects.all().count()).to_be(0)
expect(DatasetParameterSet.objects.all().count()).to_be(0)
expect(DatafileParameterSet.objects.all().count()).to_be(0)
def tearDown(self):
pass
| steveandroulakis/mytardis | tardis/tardis_portal/tests/test_rmexperiment.py | Python | bsd-3-clause | 4,827 |
#!/usr/bin/env python
from distutils.core import setup
setup(name='WebScrape',
version='0.1.2',
description='A tool for data mining, tailored for RateMyProfessors.com',
author='Nick Rebhun',
author_email='nrfactor@gmail.com',
url='https://www.nrebhun.com',
packages=['beautifulsoup4', 'sqlite3'],
)
| nrebhun/RMPScrape | setup.py | Python | mit | 341 |
from __future__ import unicode_literals
import ast
import os.path
import dj_database_url
import dj_email_url
from django.contrib.messages import constants as messages
import django_cache_url
DEBUG = ast.literal_eval(os.environ.get('DEBUG', 'True'))
SITE_ID = 1
PROJECT_ROOT = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))
ROOT_URLCONF = 'saleor.urls'
WSGI_APPLICATION = 'saleor.wsgi.application'
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
INTERNAL_IPS = os.environ.get('INTERNAL_IPS', '127.0.0.1').split()
CACHES = {'default': django_cache_url.config()}
if os.environ.get('REDIS_URL'):
CACHES['default'] = {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': os.environ.get('REDIS_URL')}
DATABASES = {
'default': dj_database_url.config(
default='postgres://saleor:saleor@localhost:5432/saleor',
conn_max_age=600)}
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
LOCALE_PATHS = [os.path.join(PROJECT_ROOT, 'locale')]
USE_I18N = True
USE_L10N = True
USE_TZ = True
EMAIL_URL = os.environ.get('EMAIL_URL')
SENDGRID_USERNAME = os.environ.get('SENDGRID_USERNAME')
SENDGRID_PASSWORD = os.environ.get('SENDGRID_PASSWORD')
if not EMAIL_URL and SENDGRID_USERNAME and SENDGRID_PASSWORD:
EMAIL_URL = 'smtp://%s:%s@smtp.sendgrid.net:587/?tls=True' % (
SENDGRID_USERNAME, SENDGRID_PASSWORD)
email_config = dj_email_url.parse(EMAIL_URL or 'console://')
EMAIL_FILE_PATH = email_config['EMAIL_FILE_PATH']
EMAIL_HOST_USER = email_config['EMAIL_HOST_USER']
EMAIL_HOST_PASSWORD = email_config['EMAIL_HOST_PASSWORD']
EMAIL_HOST = email_config['EMAIL_HOST']
EMAIL_PORT = email_config['EMAIL_PORT']
EMAIL_BACKEND = email_config['EMAIL_BACKEND']
EMAIL_USE_TLS = email_config['EMAIL_USE_TLS']
EMAIL_USE_SSL = email_config['EMAIL_USE_SSL']
DEFAULT_FROM_EMAIL = os.environ.get('DEFAULT_FROM_EMAIL')
ORDER_FROM_EMAIL = os.getenv('ORDER_FROM_EMAIL', DEFAULT_FROM_EMAIL)
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
('assets', os.path.join(PROJECT_ROOT, 'saleor', 'static', 'assets')),
('images', os.path.join(PROJECT_ROOT, 'saleor', 'static', 'images')),
('dashboard', os.path.join(PROJECT_ROOT, 'saleor', 'static', 'dashboard'))
]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder'
]
context_processors = [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
'saleor.core.context_processors.default_currency',
'saleor.core.context_processors.categories',
'saleor.cart.context_processors.cart_counter',
'saleor.core.context_processors.search_enabled',
'saleor.site.context_processors.settings',
'saleor.core.context_processors.webpage_schema',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
]
loaders = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# TODO: this one is slow, but for now need for mptt?
'django.template.loaders.eggs.Loader']
if not DEBUG:
loaders = [('django.template.loaders.cached.Loader', loaders)]
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(PROJECT_ROOT, 'templates')],
'OPTIONS': {
'debug': DEBUG,
'context_processors': context_processors,
'loaders': loaders,
'string_if_invalid': '<< MISSING VARIABLE "%s" >>' if DEBUG else ''}}]
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ.get('SECRET_KEY')
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'babeldjango.middleware.LocaleMiddleware',
'saleor.core.middleware.DiscountMiddleware',
'saleor.core.middleware.GoogleAnalytics',
'saleor.core.middleware.CountryMiddleware',
'saleor.core.middleware.CurrencyMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
]
INSTALLED_APPS = [
# External apps that need to go before django's
'storages',
# Django modules
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sitemaps',
'django.contrib.sites',
'django.contrib.staticfiles',
'django.contrib.auth',
'django.contrib.postgres',
# Local apps
'saleor.userprofile',
'saleor.discount',
'saleor.product',
'saleor.cart',
'saleor.checkout',
'saleor.core',
'saleor.graphql',
'saleor.order',
'saleor.dashboard',
'saleor.shipping',
'saleor.search',
'saleor.site',
'saleor.data_feeds',
# External apps
'versatileimagefield',
'babeldjango',
'bootstrap3',
'django_prices',
'django_prices_openexchangerates',
'emailit',
'graphene_django',
'mptt',
'payments',
'materializecssform',
'rest_framework',
'webpack_loader',
'social_django',
'django_countries',
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'filters': ['require_debug_true'],
'formatter': 'simple'
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'saleor': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True
}
}
}
AUTH_USER_MODEL = 'userprofile.User'
LOGIN_URL = '/account/login/'
DEFAULT_COUNTRY = 'US'
DEFAULT_CURRENCY = 'USD'
AVAILABLE_CURRENCIES = [DEFAULT_CURRENCY]
OPENEXCHANGERATES_API_KEY = os.environ.get('OPENEXCHANGERATES_API_KEY')
ACCOUNT_ACTIVATION_DAYS = 3
LOGIN_REDIRECT_URL = 'home'
GOOGLE_ANALYTICS_TRACKING_ID = os.environ.get('GOOGLE_ANALYTICS_TRACKING_ID')
def get_host():
from saleor.site.utils import get_domain
return get_domain()
PAYMENT_HOST = get_host
PAYMENT_MODEL = 'order.Payment'
PAYMENT_VARIANTS = {
'default': ('payments.dummy.DummyProvider', {})}
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
CHECKOUT_PAYMENT_CHOICES = [
('default', 'Dummy provider')]
MESSAGE_TAGS = {
messages.ERROR: 'danger'}
LOW_STOCK_THRESHOLD = 10
MAX_CART_LINE_QUANTITY = os.environ.get('MAX_CART_LINE_QUANTITY', 50)
PAGINATE_BY = 16
BOOTSTRAP3 = {
'set_placeholder': False,
'set_required': False,
'success_css_class': '',
'form_renderers': {
'default': 'saleor.core.utils.form_renderer.FormRenderer',
},
}
TEST_RUNNER = ''
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', 'localhost').split()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Amazon S3 configuration
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME')
AWS_MEDIA_BUCKET_NAME = os.environ.get('AWS_MEDIA_BUCKET_NAME')
AWS_QUERYSTRING_AUTH = ast.literal_eval(
os.environ.get('AWS_QUERYSTRING_AUTH', 'False'))
if AWS_STORAGE_BUCKET_NAME:
STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
if AWS_MEDIA_BUCKET_NAME:
DEFAULT_FILE_STORAGE = 'saleor.core.storages.S3MediaStorage'
THUMBNAIL_DEFAULT_STORAGE = DEFAULT_FILE_STORAGE
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
VERSATILEIMAGEFIELD_RENDITION_KEY_SETS = {
'defaults': [
('product_gallery', 'crop__540x540'),
('product_gallery_2x', 'crop__1080x1080'),
('product_small', 'crop__60x60'),
('product_small_2x', 'crop__120x120'),
('product_list', 'crop__255x255'),
('product_list_2x', 'crop__510x510')]}
VERSATILEIMAGEFIELD_SETTINGS = {
# Images should be pre-generated on Production environment
'create_images_on_demand': ast.literal_eval(
os.environ.get('CREATE_IMAGES_ON_DEMAND', 'True')),
}
PLACEHOLDER_IMAGES = {
60: 'images/placeholder60x60.png',
120: 'images/placeholder120x120.png',
255: 'images/placeholder255x255.png',
540: 'images/placeholder540x540.png',
1080: 'images/placeholder1080x1080.png'
}
DEFAULT_PLACEHOLDER = 'images/placeholder255x255.png'
WEBPACK_LOADER = {
'DEFAULT': {
'CACHE': not DEBUG,
'BUNDLE_DIR_NAME': 'assets/',
'STATS_FILE': os.path.join(PROJECT_ROOT, 'webpack-bundle.json'),
'POLL_INTERVAL': 0.1,
'IGNORE': [
r'.+\.hot-update\.js',
r'.+\.map']}}
LOGOUT_ON_PASSWORD_CHANGE = False
ELASTICSEARCH_URL = os.environ.get('ELASTICSEARCH_URL')
SEARCHBOX_URL = os.environ.get('SEARCHBOX_URL')
BONSAI_URL = os.environ.get('BONSAI_URL')
# We'll support couple of elasticsearch add-ons, but finally we'll use single
# variable
ES_URL = ELASTICSEARCH_URL or SEARCHBOX_URL or BONSAI_URL or ''
if ES_URL:
SEARCH_BACKENDS = {
'default': {
'BACKEND': 'saleor.search.backends.elasticsearch2',
'URLS': [ES_URL],
'INDEX': os.environ.get('ELASTICSEARCH_INDEX_NAME', 'storefront'),
'TIMEOUT': 5,
'AUTO_UPDATE': True},
'dashboard': {
'BACKEND': 'saleor.search.backends.dashboard',
'URLS': [ES_URL],
'INDEX': os.environ.get('ELASTICSEARCH_INDEX_NAME', 'storefront'),
'TIMEOUT': 5,
'AUTO_UPDATE': False}
}
else:
SEARCH_BACKENDS = {}
GRAPHENE = {
'MIDDLEWARE': [
'graphene_django.debug.DjangoDebugMiddleware'
],
'SCHEMA': 'saleor.graphql.api.schema',
'SCHEMA_OUTPUT': os.path.join(
PROJECT_ROOT, 'saleor', 'static', 'schema.json')
}
SITE_SETTINGS_ID = 1
AUTHENTICATION_BACKENDS = [
'saleor.registration.backends.facebook.CustomFacebookOAuth2',
'saleor.registration.backends.google.CustomGoogleOAuth2',
'django.contrib.auth.backends.ModelBackend',
]
SOCIAL_AUTH_PIPELINE = [
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'social_core.pipeline.social_auth.associate_by_email',
'social_core.pipeline.user.create_user',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.social_auth.load_extra_data',
'social_core.pipeline.user.user_details',
]
SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = True
SOCIAL_AUTH_USER_MODEL = AUTH_USER_MODEL
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email']
SOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {
'fields': 'id, email'}
| itbabu/saleor | saleor/settings.py | Python | bsd-3-clause | 12,155 |
from collections import defaultdict
import logging
import Pyro4
from pyage.core.agent.agent import AGENT
from pyage.core.inject import Inject
from pyage_forams.solutions.agent.shadow_cell import ShadowCell
from pyage_forams.solutions.distributed.request import Request
logger = logging.getLogger(__name__)
class NeighbourMatcher(object):
@Inject("request_dispatcher", "neighbours", 'ns_hostname')
def __init__(self):
super(NeighbourMatcher, self).__init__()
self._located_agents = defaultdict(self._locate_agent)
def match_neighbours(self, agent):
for (side, address) in self.neighbours.iteritems():
neighbour = self._locate_neighbour(address)
if neighbour:
self._join(neighbour, agent, side)
def _locate_neighbour(self, address):
try:
ns = Pyro4.locateNS(self.ns_hostname)
agents = ns.list(AGENT + "." + address)
return Pyro4.Proxy(agents.values().pop())
except:
logging.warning("could not locate %s" % address)
def _join(self, neighbour, agent, side):
raise NotImplementedError()
def _locate_agent(self, remote_address):
ns = Pyro4.locateNS(self.ns_hostname)
agent = Pyro4.Proxy(ns.lookup(remote_address))
return agent
class Neighbour2dMatcher(NeighbourMatcher):
def _join(self, remote_agent, agent, side):
try:
remote_address = AGENT + "." + remote_agent.get_address()
logger.info("%s matching with: %s" % (side, remote_address))
shadows = remote_agent.get_shadows(opposite(side))
logger.debug("received shadows: %s" % shadows)
shadow_cells = self.create_shadow_cells(remote_address, shadows)
agent.join(remote_address, shadow_cells, side, remote_agent.get_steps())
self.request_dispatcher.submit_request(
Match2dRequest(remote_address, agent.get_shadows(side),
AGENT + "." + agent.get_address(), opposite(side), agent.get_steps()))
except Exception, e:
logger.exception("could not join: %s", e.message)
@staticmethod
def create_shadow_cells(remote_address, shadows):
shadow_cells = {address: ShadowCell(address, available_food, algae, empty, remote_address) for
(address, available_food, algae, empty, _) in shadows}
for (address, _, _, _, neighbours) in shadows:
for neighbour in neighbours:
try:
shadow_cells[address].add_neighbour(shadow_cells[neighbour])
except KeyError:
pass
return shadow_cells.values()
def update(self, remote_address, side, mapping):
logger.info("updating shadow cels from: %s" % remote_address)
agent = self._locate_agent(remote_address)
shadows = agent.get_shadows(opposite(side))
for shadow in shadows:
if shadow[0] in mapping:
mapping[shadow[0]].update(shadow)
else:
logger.info("unsuccessful attempt to update cell with address %s", shadow.get_address())
return agent.get_steps()
class Neighbour3dMatcher(NeighbourMatcher):
def _join(self, remote_agent, agent, side):
try:
remote_address = AGENT + "." + remote_agent.get_address()
logger.info("%s matching with: %s" % (side, remote_address))
shadows = remote_agent.get_shadows(opposite(side))
logger.debug("received shadows: %s" % shadows)
shadow_cells = self.create_shadow_cells(remote_address, shadows)
agent.join(remote_address, shadow_cells, side, remote_agent.get_steps())
self.request_dispatcher.submit_request(
Match3dRequest(remote_address, agent.environment.get_shadows(side),
AGENT + "." + agent.get_address(), opposite(side), agent.get_steps()))
except Exception, e:
logger.exception("could not join: %s", e.message)
@staticmethod
def create_shadow_cells(remote_address, shadows):
shadow_cells = [[ShadowCell(address, available_food, algae, empty, remote_address) for
(address, available_food, algae, empty, _) in row] for row in shadows]
mapping = {shadow_cell.get_address(): shadow_cell for row in shadow_cells for shadow_cell in row}
for row in shadows:
for (address, _, _, _, neighbours) in row:
for neighbour in neighbours:
try:
mapping[address].add_neighbour(mapping[neighbour])
except KeyError:
pass
return shadow_cells
def update(self, remote_address, side, mapping):
logger.info("updating shadow cels from: %s" % remote_address)
agent = self._locate_agent(remote_address)
shadows = agent.get_shadows(opposite(side))
for row in shadows:
for shadow in row:
if shadow[0] in mapping:
mapping[shadow[0]].update(shadow)
else:
logger.info("unsuccessful attempt to update cell with address %s", shadow.get_address())
return agent.get_steps()
class Match2dRequest(Request):
def __init__(self, agent_address, cells, remote_address, side, steps):
super(Match2dRequest, self).__init__(agent_address)
self.side = side
self.shadows = cells
self.remote_address = remote_address
self.steps = steps
def execute(self, agent):
logger.info("2d-matching with %s" % self.remote_address)
shadow_cells = Neighbour2dMatcher.create_shadow_cells(self.remote_address, self.shadows)
agent.join(self.remote_address, shadow_cells, self.side, self.steps)
class Match3dRequest(Request):
def __init__(self, agent_address, shadows, remote_address, side, steps):
super(Match3dRequest, self).__init__(agent_address)
self.side = side
self.shadows = shadows
self.remote_address = remote_address
self.steps = steps
def execute(self, agent):
logger.info("3d-matching with %s" % self.remote_address)
shadow_cells = Neighbour3dMatcher.create_shadow_cells(self.remote_address, self.shadows)
agent.join(self.remote_address, shadow_cells, self.side, self.steps)
def opposite(side):
if side == "left":
return "right"
elif side == "right":
return "left"
elif side == "upper":
return "lower"
elif side == "lower":
return "upper"
elif side == "front":
return "back"
elif side == "back":
return "front"
else:
raise ValueError("unrecognized side: " + side)
| maciek123/pyage-forams | pyage_forams/solutions/distributed/neighbour_matcher.py | Python | gpl-2.0 | 6,823 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.